From 7e4963592f83f2cb3b79c5c3e72766f8aab0953f Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 15 Aug 2024 17:25:43 +0530 Subject: [PATCH 01/41] Support multiple producers in redis streams --- arbnode/dataposter/data_poster.go | 2 +- arbnode/dataposter/redis/redisstorage.go | 4 +- arbnode/redislock/redis.go | 2 +- arbnode/seq_coordinator.go | 2 +- .../rediscoordinator/redis_coordinator.go | 2 +- das/redis_storage_service.go | 2 +- go.mod | 8 +- go.sum | 17 +- pubsub/common.go | 7 +- pubsub/consumer.go | 155 ++++----- pubsub/producer.go | 307 ++++++------------ pubsub/pubsub_test.go | 210 ++++++++---- system_tests/common_test.go | 2 +- system_tests/seq_coordinator_test.go | 2 +- util/redisutil/redis_coordinator.go | 2 +- util/redisutil/redisutil.go | 2 +- validator/client/redis/producer.go | 5 +- validator/validation_entry.go | 16 + validator/valnode/redis/consumer.go | 2 +- 19 files changed, 388 insertions(+), 361 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 15446fe855..f90dc9bf3f 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -33,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core/apitypes" - "github.com/go-redis/redis/v8" "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" @@ -46,6 +45,7 @@ import ( "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/redis/go-redis/v9" "github.com/spf13/pflag" redisstorage "github.com/offchainlabs/nitro/arbnode/dataposter/redis" diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index 8b6dcf65ac..b54abf618b 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -9,9 +9,9 @@ import ( "errors" "fmt" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/util/signature" + "github.com/redis/go-redis/v9" ) // Storage implements redis sorted set backed storage. It does not support @@ -196,7 +196,7 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return err } - if err := pipe.ZAdd(ctx, s.key, &redis.Z{ + if err := pipe.ZAdd(ctx, s.key, redis.Z{ Score: float64(index), Member: string(signedItem), }).Err(); err != nil { diff --git a/arbnode/redislock/redis.go b/arbnode/redislock/redis.go index 7e26010cae..de9508323a 100644 --- a/arbnode/redislock/redis.go +++ b/arbnode/redislock/redis.go @@ -12,8 +12,8 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/redis/go-redis/v9" flag "github.com/spf13/pflag" ) diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 98c19ce361..80c22ab510 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -14,7 +14,7 @@ import ( "sync/atomic" "time" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/log" diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index e963c0e96c..b897b23252 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -5,8 +5,8 @@ import ( "errors" "strings" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/redis/go-redis/v9" ) // RedisCoordinator builds upon RedisCoordinator of redisutil with additional functionality diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 210d5cb2d4..e57240992c 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -12,11 +12,11 @@ import ( "golang.org/x/crypto/sha3" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/redis/go-redis/v9" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" diff --git a/go.mod b/go.mod index 6649973725..5453205c5d 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,6 @@ require ( github.com/ethereum/go-ethereum v1.10.26 github.com/fatih/structtag v1.2.0 github.com/gdamore/tcell/v2 v2.7.1 - github.com/go-redis/redis/v8 v8.11.5 github.com/gobwas/httphead v0.1.0 github.com/gobwas/ws v1.2.1 github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484 @@ -38,6 +37,7 @@ require ( github.com/mitchellh/mapstructure v1.4.1 github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 + github.com/redis/go-redis/v9 v9.6.1 github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 github.com/spf13/pflag v1.0.5 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 @@ -51,7 +51,11 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) -require github.com/google/go-querystring v1.1.0 // indirect +require ( + github.com/google/go-querystring v1.1.0 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/onsi/gomega v1.18.1 // indirect +) require ( github.com/DataDog/zstd v1.4.5 // indirect diff --git a/go.sum b/go.sum index 8529b2497d..bf0b385631 100644 --- a/go.sum +++ b/go.sum @@ -136,6 +136,10 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= @@ -273,11 +277,10 @@ github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AE github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= @@ -377,6 +380,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= @@ -433,6 +437,7 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -577,10 +582,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= @@ -628,6 +636,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rhnvrm/simples3 v0.6.1 h1:H0DJwybR6ryQE+Odi9eqkHuzjYAeJgtGcGtuBwOhsH8= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 h1:bWLHTRekAy497pE7+nXSuzXwwFHI0XauRzz6roUvY+s= @@ -823,6 +833,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -904,6 +915,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1001,6 +1013,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/pubsub/common.go b/pubsub/common.go index d7f041af15..4b5778b9ba 100644 --- a/pubsub/common.go +++ b/pubsub/common.go @@ -2,12 +2,17 @@ package pubsub import ( "context" + "fmt" "strings" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" ) +const UNIQUEID_MSGID_MAP_KEY string = ".msgId" // Is used to map unique identifier to msgId of the message consisting request in the stream + +func MessageKeyFor(streamName, id string) string { return fmt.Sprintf("%s.%s", streamName, id) } + // CreateStream tries to create stream with given name, if it already exists // does not return an error. func CreateStream(ctx context.Context, streamName string, client redis.UniversalClient) error { diff --git a/pubsub/consumer.go b/pubsub/consumer.go index df3695606d..3adb571343 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -8,44 +8,44 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" "github.com/google/uuid" "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/redis/go-redis/v9" "github.com/spf13/pflag" ) type ConsumerConfig struct { // Timeout of result entry in Redis. ResponseEntryTimeout time.Duration `koanf:"response-entry-timeout"` - // Duration after which consumer is considered to be dead if heartbeat - // is not updated. - KeepAliveTimeout time.Duration `koanf:"keepalive-timeout"` + // Minimum idle time after which messages will be autoclaimed + IdletimeToAutoclaim time.Duration `koanf:"Idletime-to-autoclaim"` } var DefaultConsumerConfig = ConsumerConfig{ ResponseEntryTimeout: time.Hour, - KeepAliveTimeout: 5 * time.Minute, + IdletimeToAutoclaim: 30 * time.Minute, } var TestConsumerConfig = ConsumerConfig{ ResponseEntryTimeout: time.Minute, - KeepAliveTimeout: 30 * time.Millisecond, + IdletimeToAutoclaim: time.Second, } func ConsumerConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".response-entry-timeout", DefaultConsumerConfig.ResponseEntryTimeout, "timeout for response entry") - f.Duration(prefix+".keepalive-timeout", DefaultConsumerConfig.KeepAliveTimeout, "timeout after which consumer is considered inactive if heartbeat wasn't performed") + f.Duration(prefix+".Idletime-to-autoclaim", DefaultConsumerConfig.IdletimeToAutoclaim, "After a message spends this amount of time in PEL (Pending Entries List i.e claimed by another consumer but not Acknowledged) it will be allowed to be autoclaimed by other consumers") } // Consumer implements a consumer for redis stream provides heartbeat to // indicate it is alive. type Consumer[Request any, Response any] struct { stopwaiter.StopWaiter - id string - client redis.UniversalClient - redisStream string - redisGroup string - cfg *ConsumerConfig + id string + client redis.UniversalClient + redisStream string + redisGroup string + cfg *ConsumerConfig + ackNotifiers map[string]chan struct{} } type Message[Request any] struct { @@ -58,32 +58,22 @@ func NewConsumer[Request any, Response any](client redis.UniversalClient, stream return nil, fmt.Errorf("redis stream name cannot be empty") } return &Consumer[Request, Response]{ - id: uuid.NewString(), - client: client, - redisStream: streamName, - redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. - cfg: cfg, + id: uuid.NewString(), + client: client, + redisStream: streamName, + redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. + cfg: cfg, + ackNotifiers: make(map[string]chan struct{}), }, nil } // Start starts the consumer to iteratively perform heartbeat in configured intervals. func (c *Consumer[Request, Response]) Start(ctx context.Context) { c.StopWaiter.Start(ctx, c) - c.StopWaiter.CallIteratively( - func(ctx context.Context) time.Duration { - c.heartBeat(ctx) - return c.cfg.KeepAliveTimeout / 10 - }, - ) } func (c *Consumer[Request, Response]) StopAndWait() { c.StopWaiter.StopAndWait() - c.deleteHeartBeat(c.GetParentContext()) -} - -func heartBeatKey(id string) string { - return fmt.Sprintf("consumer:%s:heartbeat", id) } func (c *Consumer[Request, Response]) RedisClient() redis.UniversalClient { @@ -94,55 +84,44 @@ func (c *Consumer[Request, Response]) StreamName() string { return c.redisStream } -func (c *Consumer[Request, Response]) heartBeatKey() string { - return heartBeatKey(c.id) -} - -// deleteHeartBeat deletes the heartbeat to indicate it is being shut down. -func (c *Consumer[Request, Response]) deleteHeartBeat(ctx context.Context) { - if err := c.client.Del(ctx, c.heartBeatKey()).Err(); err != nil { - l := log.Info - if ctx.Err() != nil { - l = log.Error - } - l("Deleting heardbeat", "consumer", c.id, "error", err) - } -} - -// heartBeat updates the heartBeat key indicating aliveness. -func (c *Consumer[Request, Response]) heartBeat(ctx context.Context) { - if err := c.client.Set(ctx, c.heartBeatKey(), time.Now().UnixMilli(), 2*c.cfg.KeepAliveTimeout).Err(); err != nil { - l := log.Info - if ctx.Err() != nil { - l = log.Error - } - l("Updating heardbeat", "consumer", c.id, "error", err) - } -} - // Consumer first checks it there exists pending message that is claimed by // unresponsive consumer, if not then reads from the stream. func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Request], error) { - res, err := c.client.XReadGroup(ctx, &redis.XReadGroupArgs{ + // First try to XAUTOCLAIM, this prioritizes processing PEL messages + // that have been waiting for more than IdletimeToAutoclaim duration + messages, _, err := c.client.XAutoClaim(ctx, &redis.XAutoClaimArgs{ Group: c.redisGroup, Consumer: c.id, - // Receive only messages that were never delivered to any other consumer, - // that is, only new messages. - Streams: []string{c.redisStream, ">"}, - Count: 1, - Block: time.Millisecond, // 0 seems to block the read instead of immediately returning + MinIdle: c.cfg.IdletimeToAutoclaim, // Minimum idle time for messages to claim (in milliseconds) + Stream: c.redisStream, + Start: "0", + Count: 1, // Limit the number of messages to claim }).Result() - if errors.Is(err, redis.Nil) { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("reading message for consumer: %q: %w", c.id, err) - } - if len(res) != 1 || len(res[0].Messages) != 1 { - return nil, fmt.Errorf("redis returned entries: %+v, for querying single message", res) + if len(messages) != 1 || err != nil { + // Fallback to reading new messages + res, err := c.client.XReadGroup(ctx, &redis.XReadGroupArgs{ + Group: c.redisGroup, + Consumer: c.id, + // Receive only messages that were never delivered to any other consumer, + // that is, only new messages. + Streams: []string{c.redisStream, ">"}, + Count: 1, + Block: time.Millisecond, // 0 seems to block the read instead of immediately returning + }).Result() + if errors.Is(err, redis.Nil) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("reading message for consumer: %q: %w", c.id, err) + } + if len(res) != 1 || len(res[0].Messages) != 1 { + return nil, fmt.Errorf("redis returned entries: %+v, for querying single message", res) + } + messages = res[0].Messages } + var ( - value = res[0].Messages[0].Values[messageKey] + value = messages[0].Values[messageKey] data, ok = (value).(string) ) if !ok { @@ -152,24 +131,52 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req if err := json.Unmarshal([]byte(data), &req); err != nil { return nil, fmt.Errorf("unmarshaling value: %v, error: %w", value, err) } - log.Debug("Redis stream consuming", "consumer_id", c.id, "message_id", res[0].Messages[0].ID) + ackNotifier := make(chan struct{}) + c.StopWaiter.LaunchThread(func(ctx context.Context) { + for { + if err := c.client.XClaim(ctx, &redis.XClaimArgs{ + Stream: c.redisStream, + Group: c.redisGroup, + Consumer: c.id, + MinIdle: 0, + Messages: []string{messages[0].ID}, + }).Err(); err != nil { + log.Error("error claiming message, it might be possible that other consumers might pick this request", "msgID", messages[0].ID) + } + select { + case <-ackNotifier: + return + case <-ctx.Done(): + log.Info("Context done while claiming message to indicate hearbeat", "error", ctx.Err().Error()) + return + case <-time.After(c.cfg.IdletimeToAutoclaim / 3): + } + } + }) + c.ackNotifiers[messages[0].ID] = ackNotifier + log.Debug("Redis stream consuming", "consumer_id", c.id, "message_id", messages[0].ID) return &Message[Request]{ - ID: res[0].Messages[0].ID, + ID: messages[0].ID, Value: req, }, nil } -func (c *Consumer[Request, Response]) SetResult(ctx context.Context, messageID string, result Response) error { +func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, messageID string, result Response) error { + if id == "" { + log.Info("Request doesn't have a unique identifier (SelfHash field is not set), defaulting to using redis stream messageId", "msgId", messageID) + id = messageID + } resp, err := json.Marshal(result) if err != nil { return fmt.Errorf("marshaling result: %w", err) } - acquired, err := c.client.SetNX(ctx, messageID, resp, c.cfg.ResponseEntryTimeout).Result() + acquired, err := c.client.SetNX(ctx, MessageKeyFor(c.StreamName(), id), resp, c.cfg.ResponseEntryTimeout).Result() if err != nil || !acquired { - return fmt.Errorf("setting result for message: %v, error: %w", messageID, err) + return fmt.Errorf("setting result for message with message-id in stream: %v, unique request identifier: %v, error: %w", messageID, id, err) } if _, err := c.client.XAck(ctx, c.redisStream, c.redisGroup, messageID).Result(); err != nil { return fmt.Errorf("acking message: %v, error: %w", messageID, err) } + close(c.ackNotifiers[messageID]) return nil } diff --git a/pubsub/producer.go b/pubsub/producer.go index 2b1cdb5e3f..df6e7d5a28 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -13,17 +13,16 @@ import ( "encoding/json" "errors" "fmt" - "math" "strconv" "strings" "sync" "time" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" "github.com/google/uuid" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/redis/go-redis/v9" "github.com/spf13/pflag" ) @@ -32,6 +31,11 @@ const ( defaultGroup = "default_consumer_group" ) +type MsgIdAndPromise[Response any] struct { + msgID string + promise *containers.Promise[Response] +} + type Producer[Request any, Response any] struct { stopwaiter.StopWaiter id string @@ -41,52 +45,33 @@ type Producer[Request any, Response any] struct { cfg *ProducerConfig promisesLock sync.RWMutex - promises map[string]*containers.Promise[Response] + promises map[string]*MsgIdAndPromise[Response] - // Used for running checks for pending messages with inactive consumers - // and checking responses from consumers iteratively for the first time when - // Produce is called. + // Used for checking responses from consumers iteratively + // For the first time when Produce is called. once sync.Once } type ProducerConfig struct { - // When enabled, messages that are sent to consumers that later die before - // processing them, will be re-inserted into the stream to be proceesed by - // another consumer - EnableReproduce bool `koanf:"enable-reproduce"` - // Interval duration in which producer checks for pending messages delivered - // to the consumers that are currently inactive. - CheckPendingInterval time.Duration `koanf:"check-pending-interval"` - // Duration after which consumer is considered to be dead if heartbeat - // is not updated. - KeepAliveTimeout time.Duration `koanf:"keepalive-timeout"` // Interval duration for checking the result set by consumers. CheckResultInterval time.Duration `koanf:"check-result-interval"` - CheckPendingItems int64 `koanf:"check-pending-items"` + // Timeout of entry's written to redis by producer + ResponseEntryTimeout time.Duration `koanf:"response-entry-timeout"` } var DefaultProducerConfig = ProducerConfig{ - EnableReproduce: true, - CheckPendingInterval: time.Second, - KeepAliveTimeout: 5 * time.Minute, CheckResultInterval: 5 * time.Second, - CheckPendingItems: 256, + ResponseEntryTimeout: time.Hour, } var TestProducerConfig = ProducerConfig{ - EnableReproduce: false, - CheckPendingInterval: 10 * time.Millisecond, - KeepAliveTimeout: 100 * time.Millisecond, CheckResultInterval: 5 * time.Millisecond, - CheckPendingItems: 256, + ResponseEntryTimeout: time.Minute, } func ProducerAddConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.Bool(prefix+".enable-reproduce", DefaultProducerConfig.EnableReproduce, "when enabled, messages with dead consumer will be re-inserted into the stream") - f.Duration(prefix+".check-pending-interval", DefaultProducerConfig.CheckPendingInterval, "interval in which producer checks pending messages whether consumer processing them is inactive") f.Duration(prefix+".check-result-interval", DefaultProducerConfig.CheckResultInterval, "interval in which producer checks pending messages whether consumer processing them is inactive") - f.Duration(prefix+".keepalive-timeout", DefaultProducerConfig.KeepAliveTimeout, "timeout after which consumer is considered inactive if heartbeat wasn't performed") - f.Int64(prefix+".check-pending-items", DefaultProducerConfig.CheckPendingItems, "items to screen during check-pending") + f.Duration(prefix+".response-entry-timeout", DefaultProducerConfig.ResponseEntryTimeout, "timeout after which responses written from producer to the redis are cleared. Currently used for the key mapping unique request id to redis stream message id") } func NewProducer[Request any, Response any](client redis.UniversalClient, streamName string, cfg *ProducerConfig) (*Producer[Request, Response], error) { @@ -102,150 +87,88 @@ func NewProducer[Request any, Response any](client redis.UniversalClient, stream redisStream: streamName, redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. cfg: cfg, - promises: make(map[string]*containers.Promise[Response]), + promises: make(map[string]*MsgIdAndPromise[Response]), }, nil } -func (p *Producer[Request, Response]) errorPromisesFor(msgIds []string) { - p.promisesLock.Lock() - defer p.promisesLock.Unlock() - for _, msg := range msgIds { - if promise, found := p.promises[msg]; found { - promise.ProduceError(fmt.Errorf("internal error, consumer died while serving the request")) - delete(p.promises, msg) - } - } -} - -// checkAndReproduce reproduce pending messages that were sent to consumers -// that are currently inactive. -func (p *Producer[Request, Response]) checkAndReproduce(ctx context.Context) time.Duration { - staleIds, err := p.checkPending(ctx) - if err != nil { - log.Error("Checking pending messages", "error", err) - return p.cfg.CheckPendingInterval - } - if len(staleIds) == 0 { - return p.cfg.CheckPendingInterval - } - if p.cfg.EnableReproduce { - err = p.reproduceIds(ctx, staleIds) - if err != nil { - log.Warn("filed reproducing messages", "err", err) - } - } else { - p.errorPromisesFor(staleIds) - } - return p.cfg.CheckPendingInterval -} - -func (p *Producer[Request, Response]) reproduceIds(ctx context.Context, staleIds []string) error { - log.Info("Attempting to claim", "messages", staleIds) - claimedMsgs, err := p.client.XClaim(ctx, &redis.XClaimArgs{ - Stream: p.redisStream, - Group: p.redisGroup, - Consumer: p.id, - MinIdle: p.cfg.KeepAliveTimeout, - Messages: staleIds, - }).Result() - if err != nil { - return fmt.Errorf("claiming ownership on messages: %v, error: %w", staleIds, err) - } - for _, msg := range claimedMsgs { - data, ok := (msg.Values[messageKey]).(string) - if !ok { - log.Error("redis producer reproduce: message not string", "id", msg.ID, "value", msg.Values[messageKey]) - continue - } - var req Request - if err := json.Unmarshal([]byte(data), &req); err != nil { - log.Error("redis producer reproduce: message not a request", "id", msg.ID, "err", err, "value", msg.Values[messageKey]) - continue - } - if _, err := p.client.XAck(ctx, p.redisStream, p.redisGroup, msg.ID).Result(); err != nil { - log.Error("redis producer reproduce: could not ACK", "id", msg.ID, "err", err) - continue - } - // Only re-insert messages that were removed the the pending list first. - if _, err := p.reproduce(ctx, req, msg.ID); err != nil { - log.Error("redis producer reproduce: error", "err", err) - } - } - return nil -} - -func setMinIdInt(min *[2]uint64, id string) error { - idParts := strings.Split(id, "-") +func setMaxMsgIdInt(maxMsgIdInt *[2]uint64, msgId string) error { + idParts := strings.Split(msgId, "-") if len(idParts) != 2 { - return fmt.Errorf("invalid i.d: %v", id) + return fmt.Errorf("invalid i.d: %v", msgId) } idTimeStamp, err := strconv.ParseUint(idParts[0], 10, 64) if err != nil { - return fmt.Errorf("invalid i.d: %v err: %w", id, err) + return fmt.Errorf("invalid i.d: %v err: %w", msgId, err) } - if idTimeStamp > min[0] { + if idTimeStamp < maxMsgIdInt[0] { return nil } idSerial, err := strconv.ParseUint(idParts[1], 10, 64) if err != nil { - return fmt.Errorf("invalid i.d serial: %v err: %w", id, err) + return fmt.Errorf("invalid i.d serial: %v err: %w", msgId, err) } - if idTimeStamp < min[0] { - min[0] = idTimeStamp - min[1] = idSerial + if idTimeStamp > maxMsgIdInt[0] { + maxMsgIdInt[0] = idTimeStamp + maxMsgIdInt[1] = idSerial return nil } - // idTimeStamp == min[0] - if idSerial < min[1] { - min[1] = idSerial + // idTimeStamp == maxMsgIdInt[0] + if idSerial > maxMsgIdInt[1] { + maxMsgIdInt[1] = idSerial } return nil } // checkResponses checks iteratively whether response for the promise is ready. func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.Duration { - minIdInt := [2]uint64{math.MaxUint64, math.MaxUint64} + maxMsgIdInt := [2]uint64{0, 0} p.promisesLock.Lock() defer p.promisesLock.Unlock() responded := 0 errored := 0 - for id, promise := range p.promises { + for id, msgIDAndPromise := range p.promises { if ctx.Err() != nil { return 0 } - res, err := p.client.Get(ctx, id).Result() + msgKey := MessageKeyFor(p.redisStream, id) + res, err := p.client.Get(ctx, msgKey).Result() if err != nil { - errSetId := setMinIdInt(&minIdInt, id) - if errSetId != nil { - log.Error("error setting minId", "err", err) - return p.cfg.CheckResultInterval - } if !errors.Is(err, redis.Nil) { log.Error("Error reading value in redis", "key", id, "error", err) } continue } + // We keep track of a maxMsgId of a successfully solved request, because messages + // with id lower than this are either ack-ed or in PEL, so its safe to call XTRIMMINID on maxMsgId + errSetId := setMaxMsgIdInt(&maxMsgIdInt, msgIDAndPromise.msgID) + if errSetId != nil { + log.Error("error setting maxMsgId", "err", err) + return p.cfg.CheckResultInterval + } var resp Response if err := json.Unmarshal([]byte(res), &resp); err != nil { - promise.ProduceError(fmt.Errorf("error unmarshalling: %w", err)) + msgIDAndPromise.promise.ProduceError(fmt.Errorf("error unmarshalling: %w", err)) log.Error("Error unmarshaling", "value", res, "error", err) errored++ } else { - promise.Produce(resp) + msgIDAndPromise.promise.Produce(resp) responded++ } + // Try deleting UNIQUEID_MSGID_MAP_KEY corresponding to this id from redis + if err := p.client.Del(ctx, msgKey+UNIQUEID_MSGID_MAP_KEY).Err(); err != nil { + log.Error("Error deleting key from redis that flags that a request is being processed", "err", err) + } delete(p.promises, id) } var trimmed int64 var trimErr error - minId := "+" - if minIdInt[0] < math.MaxUint64 { - minId = fmt.Sprintf("%d-%d", minIdInt[0], minIdInt[1]) - trimmed, trimErr = p.client.XTrimMinID(ctx, p.redisStream, minId).Result() - } else { - trimmed, trimErr = p.client.XTrimMaxLen(ctx, p.redisStream, 0).Result() + maxMsgId := "+" + // If at least response for one promise was found, find the maximum of the found ones and XTRIMMINID from that msg id + 1 + if maxMsgIdInt[0] > 0 { + maxMsgId = fmt.Sprintf("%d-%d", maxMsgIdInt[0], maxMsgIdInt[1]+1) + trimmed, trimErr = p.client.XTrimMinID(ctx, p.redisStream, maxMsgId).Result() } - log.Trace("trimming", "id", minId, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr) + log.Trace("trimming", "xTrimMinID", maxMsgId, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr) return p.cfg.CheckResultInterval } @@ -259,101 +182,77 @@ func (p *Producer[Request, Response]) promisesLen() int { return len(p.promises) } -// reproduce is used when Producer claims ownership on the pending -// message that was sent to inactive consumer and reinserts it into the stream, -// so that seamlessly return the answer in the same promise. -func (p *Producer[Request, Response]) reproduce(ctx context.Context, value Request, oldKey string) (*containers.Promise[Response], error) { +func (p *Producer[Request, Response]) produce(ctx context.Context, id string, value Request) (*containers.Promise[Response], error) { + if id != "" { + msgKey := MessageKeyFor(p.redisStream, id) + + // If the request has already been solved by a consumer + if res, err := p.client.Get(ctx, msgKey).Result(); err == nil { + var resp Response + if err := json.Unmarshal([]byte(res), &resp); err != nil { + log.Error("Error unmarshaling", "value", res, "error", err) + return nil, fmt.Errorf("error unmarshalling: %w", err) + } else { + pr := containers.NewPromise[Response](nil) + pr.Produce(resp) + return &pr, nil + } + } else if !errors.Is(err, redis.Nil) { + log.Error("error while checking for response to a request in redis", "err", err) + } + + // Check for duplicate unsolved request messages in stream + if res, err := p.client.Get(ctx, msgKey+UNIQUEID_MSGID_MAP_KEY).Result(); err == nil { + log.Info("Request already submitted by another producer", "msgId", res, "requestUniqueId", id) + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + pr := containers.NewPromise[Response](nil) + p.promises[id] = &MsgIdAndPromise[Response]{ + msgID: res, + promise: &pr, + } + return &pr, nil + } + } + val, err := json.Marshal(value) if err != nil { return nil, fmt.Errorf("marshaling value: %w", err) } - // catching the promiseLock before we sendXadd makes sure promise ids will - // be always ascending + // catching the promiseLock before we sendXadd makes sure promise ids will be always ascending p.promisesLock.Lock() defer p.promisesLock.Unlock() - id, err := p.client.XAdd(ctx, &redis.XAddArgs{ + msgId, err := p.client.XAdd(ctx, &redis.XAddArgs{ Stream: p.redisStream, Values: map[string]any{messageKey: val}, }).Result() if err != nil { return nil, fmt.Errorf("adding values to redis: %w", err) } - promise := p.promises[oldKey] - if oldKey != "" && promise == nil { - // This will happen if the old consumer became inactive but then ack_d - // the message afterwards. - // don't error - log.Warn("tried reproducing a message but it wasn't found - probably got response", "oldKey", oldKey) + + if id == "" { + // If unique id doesn't exist, use the newly created msgId as unique id and follow the same steps as before + log.Info("Request doesn't have a unique identifier (SelfHash field set), defaulting to using redis stream messageId", "msgId", msgId) + id = msgId } - if oldKey == "" || promise == nil { - pr := containers.NewPromise[Response](nil) - promise = &pr + + // Try adding key that flags that request is being processed + if err := p.client.Set(ctx, MessageKeyFor(p.redisStream, id)+UNIQUEID_MSGID_MAP_KEY, msgId, p.cfg.ResponseEntryTimeout).Err(); err != nil { + log.Error("Error adding key to redis that flags that a request is being processed, stream may encounter duplicate requests", "err", err) + } + + pr := containers.NewPromise[Response](nil) + p.promises[id] = &MsgIdAndPromise[Response]{ + msgID: msgId, + promise: &pr, } - delete(p.promises, oldKey) - p.promises[id] = promise - return promise, nil + return &pr, nil } -func (p *Producer[Request, Response]) Produce(ctx context.Context, value Request) (*containers.Promise[Response], error) { +func (p *Producer[Request, Response]) Produce(ctx context.Context, id string, value Request) (*containers.Promise[Response], error) { log.Debug("Redis stream producing", "value", value) p.once.Do(func() { - p.StopWaiter.CallIteratively(p.checkAndReproduce) p.StopWaiter.CallIteratively(p.checkResponses) }) - return p.reproduce(ctx, value, "") -} - -// Check if a consumer is with specified ID is alive. -func (p *Producer[Request, Response]) isConsumerAlive(ctx context.Context, consumerID string) bool { - if _, err := p.client.Get(ctx, heartBeatKey(consumerID)).Int64(); err != nil { - return false - } - return true -} - -func (p *Producer[Request, Response]) havePromiseFor(messageID string) bool { - p.promisesLock.Lock() - defer p.promisesLock.Unlock() - _, found := p.promises[messageID] - return found -} - -// returns ids of pending messages that's worker doesn't appear alive -func (p *Producer[Request, Response]) checkPending(ctx context.Context) ([]string, error) { - pendingMessages, err := p.client.XPendingExt(ctx, &redis.XPendingExtArgs{ - Stream: p.redisStream, - Group: p.redisGroup, - Start: "-", - End: "+", - Count: p.cfg.CheckPendingItems, - }).Result() - - if err != nil && !errors.Is(err, redis.Nil) { - return nil, fmt.Errorf("querying pending messages: %w", err) - } - if len(pendingMessages) == 0 { - return nil, nil - } - if len(pendingMessages) >= int(p.cfg.CheckPendingItems) { - log.Warn("redis producer: many pending items found", "stream", p.redisStream, "check-pending-items", p.cfg.CheckPendingItems) - } - // IDs of the pending messages with inactive consumers. - var ids []string - active := make(map[string]bool) - for _, msg := range pendingMessages { - // Ignore messages not produced by this producer. - if !p.havePromiseFor(msg.ID) { - continue - } - alive, found := active[msg.Consumer] - if !found { - alive = p.isConsumerAlive(ctx, msg.Consumer) - active[msg.Consumer] = alive - } - if alive { - continue - } - ids = append(ids, msg.ID) - } - return ids, nil + return p.produce(ctx, id, value) } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 9f774b6372..69839737e3 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -2,6 +2,9 @@ package pubsub import ( "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" "errors" "fmt" "os" @@ -10,11 +13,11 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" "github.com/google/go-cmp/cmp" "github.com/google/uuid" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/redis/go-redis/v9" ) var ( @@ -23,7 +26,18 @@ var ( ) type testRequest struct { - Request string + Request string + SelfHash string // Is a unique identifier which can be used to compare any two validationInputs +} + +// SetSelfHash should be only called once. In the context of redis streams- by the producer +func (t *testRequest) SetSelfHash() { + jsonData, err := json.Marshal(t) + if err != nil { + return + } + hash := sha256.Sum256(jsonData) + t.SelfHash = hex.EncodeToString(hash[:]) } type testResponse struct { @@ -45,36 +59,21 @@ func destroyRedisGroup(ctx context.Context, t *testing.T, streamName string, cli } } -type configOpt interface { - apply(consCfg *ConsumerConfig, prodCfg *ProducerConfig) -} - -type withReproduce struct { - reproduce bool -} - -func (e *withReproduce) apply(_ *ConsumerConfig, prodCfg *ProducerConfig) { - prodCfg.EnableReproduce = e.reproduce -} - func producerCfg() *ProducerConfig { return &ProducerConfig{ - EnableReproduce: TestProducerConfig.EnableReproduce, - CheckPendingInterval: TestProducerConfig.CheckPendingInterval, - KeepAliveTimeout: TestProducerConfig.KeepAliveTimeout, CheckResultInterval: TestProducerConfig.CheckResultInterval, - CheckPendingItems: TestProducerConfig.CheckPendingItems, + ResponseEntryTimeout: TestProducerConfig.ResponseEntryTimeout, } } func consumerCfg() *ConsumerConfig { return &ConsumerConfig{ ResponseEntryTimeout: TestConsumerConfig.ResponseEntryTimeout, - KeepAliveTimeout: TestConsumerConfig.KeepAliveTimeout, + IdletimeToAutoclaim: TestConsumerConfig.IdletimeToAutoclaim, } } -func newProducerConsumers(ctx context.Context, t *testing.T, opts ...configOpt) (redis.UniversalClient, string, *Producer[testRequest, testResponse], []*Consumer[testRequest, testResponse]) { +func newProducerConsumers(ctx context.Context, t *testing.T) (redis.UniversalClient, string, *Producer[testRequest, testResponse], []*Consumer[testRequest, testResponse]) { t.Helper() redisClient, err := redisutil.RedisClientFromURL(redisutil.CreateTestRedis(ctx, t)) if err != nil { @@ -82,9 +81,7 @@ func newProducerConsumers(ctx context.Context, t *testing.T, opts ...configOpt) } prodCfg, consCfg := producerCfg(), consumerCfg() streamName := fmt.Sprintf("stream:%s", uuid.NewString()) - for _, o := range opts { - o.apply(consCfg, prodCfg) - } + producer, err := NewProducer[testRequest, testResponse](redisClient, streamName, prodCfg) if err != nil { t.Fatalf("Error creating new producer: %v", err) @@ -102,13 +99,6 @@ func newProducerConsumers(ctx context.Context, t *testing.T, opts ...configOpt) t.Cleanup(func() { ctx := context.Background() destroyRedisGroup(ctx, t, streamName, producer.client) - var keys []string - for _, c := range consumers { - keys = append(keys, c.heartBeatKey()) - } - if _, err := producer.client.Del(ctx, keys...).Result(); err != nil { - log.Debug("Error deleting heartbeat keys", "error", err) - } }) return redisClient, streamName, producer, consumers } @@ -125,10 +115,13 @@ func msgForIndex(idx int) string { return fmt.Sprintf("msg: %d", idx) } -func wantMessages(n int) []string { +func wantMessages(n int, group string, withDuplicates bool) []string { var ret []string for i := 0; i < n; i++ { - ret = append(ret, msgForIndex(i)) + ret = append(ret, group+msgForIndex(i)) + if withDuplicates && i%3 == 0 { + ret = append(ret, msgForIndex(i)) + } } sort.Strings(ret) return ret @@ -143,10 +136,14 @@ func flatten(responses [][]string) []string { return ret } -func produceMessages(ctx context.Context, msgs []string, producer *Producer[testRequest, testResponse]) ([]*containers.Promise[testResponse], error) { +func produceMessages(ctx context.Context, msgs []string, producer *Producer[testRequest, testResponse], useUniqueIdentifier bool) ([]*containers.Promise[testResponse], error) { var promises []*containers.Promise[testResponse] - for i := 0; i < messagesCount; i++ { - promise, err := producer.Produce(ctx, testRequest{Request: msgs[i]}) + for i := 0; i < len(msgs); i++ { + req := testRequest{Request: msgs[i]} + if useUniqueIdentifier { + req.SetSelfHash() + } + promise, err := producer.Produce(ctx, req.SelfHash, req) if err != nil { return nil, err } @@ -198,7 +195,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques } gotMessages[idx][res.ID] = res.Value.Request resp := fmt.Sprintf("result for: %v", res.ID) - if err := c.SetResult(ctx, res.ID, testResponse{Response: resp}); err != nil { + if err := c.SetResult(ctx, res.Value.SelfHash, res.ID, testResponse{Response: resp}); err != nil { t.Errorf("Error setting a result: %v", err) } wantResponses[idx] = append(wantResponses[idx], resp) @@ -208,40 +205,86 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques return wantResponses } -func TestRedisProduce(t *testing.T) { +func TestRedisProduceComplex(t *testing.T) { log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) t.Parallel() for _, tc := range []struct { - name string - killConsumers bool - autoRecover bool + name string + entries1Count int + entries2Count int + numProducers int + withDuplicates bool // If this is set, then every fourth entry (while generation) of each entries list is equal + killConsumers bool }{ { - name: "all consumers are active", - killConsumers: false, - autoRecover: false, + name: "one producer, all consumers are active", + entries1Count: messagesCount, + numProducers: 1, }, { - name: "some consumers killed, others should take over their work", + name: "one producer, some consumers killed, others should take over their work", + entries1Count: messagesCount, + numProducers: 1, killConsumers: true, - autoRecover: true, }, { - name: "some consumers killed, should return failure", - killConsumers: true, - autoRecover: false, + name: "two producers, all consumers are active, all unique entries", + entries1Count: 20, + entries2Count: 20, + numProducers: 2, + }, + { + name: "two producers, all consumers are active, some duplicate entries", + entries1Count: 20, + entries2Count: 20, + numProducers: 2, + withDuplicates: true, + }, + { + name: "two producers, some consumers killed, others should take over their work, some duplicate entries, unequal number of requests from producers", + entries1Count: messagesCount, + entries2Count: 2 * messagesCount, + numProducers: 2, + withDuplicates: true, + killConsumers: true, }, } { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - redisClient, streamName, producer, consumers := newProducerConsumers(ctx, t, &withReproduce{tc.autoRecover}) - producer.Start(ctx) - wantMsgs := wantMessages(messagesCount) - promises, err := produceMessages(ctx, wantMsgs, producer) - if err != nil { - t.Fatalf("Error producing messages: %v", err) + + var producers []*Producer[testRequest, testResponse] + redisClient, streamName, producer, consumers := newProducerConsumers(ctx, t) + producers = append(producers, producer) + if tc.numProducers == 2 { + producer, err := NewProducer[testRequest, testResponse](redisClient, streamName, producerCfg()) + if err != nil { + t.Fatalf("Error creating second producer: %v", err) + } + producers = append(producers, producer) } + + for _, producer := range producers { + producer.Start(ctx) + } + + var entries [][]string + if tc.numProducers == 2 { + entries = append(entries, wantMessages(tc.entries1Count, "1.", tc.withDuplicates)) + entries = append(entries, wantMessages(tc.entries2Count, "2.", tc.withDuplicates)) + } else { + entries = append(entries, wantMessages(tc.entries1Count, "", tc.withDuplicates)) + } + + var promises [][]*containers.Promise[testResponse] + for i := 0; i < tc.numProducers; i++ { + prs, err := produceMessages(ctx, entries[i], producers[i], tc.numProducers == 2) + if err != nil { + t.Fatalf("Error producing messages from producer%d: %v", i, err) + } + promises = append(promises, prs) + } + gotMessages := messagesMaps(len(consumers)) if tc.killConsumers { // Consumer messages in every third consumer but don't ack them to check @@ -252,40 +295,66 @@ func TestRedisProduce(t *testing.T) { if err != nil { t.Errorf("Error consuming message: %v", err) } - if !tc.autoRecover { - gotMessages[i][req.ID] = req.Value.Request + if req == nil { + t.Error("Didn't consume any message") } consumers[i].StopAndWait() } } + time.Sleep(time.Second) wantResponses := consume(ctx, t, consumers, gotMessages) - gotResponses, errIndexes := awaitResponses(ctx, promises) - if len(errIndexes) != 0 && tc.autoRecover { - t.Fatalf("Error awaiting responses: %v", errIndexes) + + var gotResponses []string + for i := 0; i < tc.numProducers; i++ { + grs, errIndexes := awaitResponses(ctx, promises[i]) + if len(errIndexes) != 0 { + t.Fatalf("Error awaiting responses from promises%d: %v", i, errIndexes) + } + gotResponses = append(gotResponses, grs...) } - producer.StopAndWait() + for _, c := range consumers { c.StopAndWait() } + got, err := mergeValues(gotMessages) if err != nil { t.Fatalf("mergeMaps() unexpected error: %v", err) } + + var combinedEntries []string + for i := 0; i < tc.numProducers; i++ { + combinedEntries = append(combinedEntries, entries[i]...) + } + wantMsgs := removeDuplicates(combinedEntries) if diff := cmp.Diff(wantMsgs, got); diff != "" { t.Errorf("Unexpected diff (-want +got):\n%s\n", diff) } + + // Consumers are not supposed to get duplicate requests + gotResponses = removeDuplicates(gotResponses) wantResp := flatten(wantResponses) - sort.Strings(gotResponses) if diff := cmp.Diff(wantResp, gotResponses); diff != "" { t.Errorf("Unexpected diff in responses:\n%s\n", diff) } - if cnt := producer.promisesLen(); cnt != 0 { - t.Errorf("Producer still has %d unfullfilled promises", cnt) + + // Check each producers all promises were responded to + for i := 0; i < tc.numProducers; i++ { + if cnt := producers[i].promisesLen(); cnt != 0 { + t.Errorf("Producer%d still has %d unfullfilled promises", i, cnt) + } } + // Trigger a trim - producer.checkResponses(ctx) + time.Sleep(time.Second) + for i := 0; i < tc.numProducers; i++ { + producers[i].checkResponses(ctx) + producers[i].StopAndWait() + } + + // Check that no messages remain in the stream msgs, err := redisClient.XRange(ctx, streamName, "-", "+").Result() if err != nil { t.Errorf("XRange failed: %v", err) @@ -297,6 +366,19 @@ func TestRedisProduce(t *testing.T) { } } +func removeDuplicates(list []string) []string { + capture := map[string]bool{} + var ret []string + for _, elem := range list { + if _, found := capture[elem]; !found { + ret = append(ret, elem) + capture[elem] = true + } + } + sort.Strings(ret) + return ret +} + // mergeValues merges maps from the slice and returns their values. // Returns and error if there exists duplicate key. func mergeValues(messages []map[string]string) ([]string, error) { diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 62053c17f1..4d864b6a77 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -19,7 +19,6 @@ import ( "testing" "time" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" @@ -39,6 +38,7 @@ import ( "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" rediscons "github.com/offchainlabs/nitro/validator/valnode/redis" + "github.com/redis/go-redis/v9" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 1b8926a1b9..e9b2adabe8 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 59e3b0e0f9..c30a59cb8c 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -6,7 +6,7 @@ import ( "fmt" "strings" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "github.com/ethereum/go-ethereum/log" diff --git a/util/redisutil/redisutil.go b/util/redisutil/redisutil.go index f89c250e9a..01ba836d5b 100644 --- a/util/redisutil/redisutil.go +++ b/util/redisutil/redisutil.go @@ -1,6 +1,6 @@ package redisutil -import "github.com/go-redis/redis/v8" +import "github.com/redis/go-redis/v9" func RedisClientFromURL(url string) (redis.UniversalClient, error) { if url == "" { diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index b3ad0f8839..7e2578d100 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/pubsub" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/redisutil" @@ -15,6 +14,7 @@ import ( "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_api" "github.com/offchainlabs/nitro/validator/server_common" + "github.com/redis/go-redis/v9" "github.com/spf13/pflag" ) @@ -125,7 +125,8 @@ func (c *ValidationClient) Launch(entry *validator.ValidationInput, moduleRoot c errPromise := containers.NewReadyPromise(validator.GoGlobalState{}, fmt.Errorf("no validation is configured for wasm root %v", moduleRoot)) return server_common.NewValRun(errPromise, moduleRoot) } - promise, err := producer.Produce(c.GetContext(), entry) + entry.SetSelfHash() + promise, err := producer.Produce(c.GetContext(), entry.SelfHash, entry) if err != nil { errPromise := containers.NewReadyPromise(validator.GoGlobalState{}, fmt.Errorf("error producing input: %w", err)) return server_common.NewValRun(errPromise, moduleRoot) diff --git a/validator/validation_entry.go b/validator/validation_entry.go index 133a67a8a8..326bbc355b 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -1,6 +1,10 @@ package validator import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbutil" ) @@ -21,4 +25,16 @@ type ValidationInput struct { DelayedMsg []byte StartState GoGlobalState DebugChain bool + + SelfHash string // Is a unique identifier which can be used to compare any two instances of validationInput +} + +// SetSelfHash should be only called once. In the context of redis streams- by the producer, before submitting a request +func (v *ValidationInput) SetSelfHash() { + jsonData, err := json.Marshal(v) + if err != nil { + return + } + hash := sha256.Sum256(jsonData) + v.SelfHash = hex.EncodeToString(hash[:]) } diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index fb7db1e870..13bf19ac43 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -99,7 +99,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { log.Error("Error validating", "request value", req.Value, "error", err) return 0 } - if err := c.SetResult(ctx, req.ID, res); err != nil { + if err := c.SetResult(ctx, req.Value.SelfHash, req.ID, res); err != nil { log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err) return 0 } From e155ebb60c157af9ea7188a419b1495085262036 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 15 Aug 2024 21:27:47 +0530 Subject: [PATCH 02/41] trim acknotifiers map and use previous keepalive timeouts --- pubsub/consumer.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 3adb571343..410c3c75f4 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -23,12 +23,12 @@ type ConsumerConfig struct { var DefaultConsumerConfig = ConsumerConfig{ ResponseEntryTimeout: time.Hour, - IdletimeToAutoclaim: 30 * time.Minute, + IdletimeToAutoclaim: 5 * time.Minute, } var TestConsumerConfig = ConsumerConfig{ ResponseEntryTimeout: time.Minute, - IdletimeToAutoclaim: time.Second, + IdletimeToAutoclaim: 30 * time.Millisecond, } func ConsumerConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -149,7 +149,7 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req case <-ctx.Done(): log.Info("Context done while claiming message to indicate hearbeat", "error", ctx.Err().Error()) return - case <-time.After(c.cfg.IdletimeToAutoclaim / 3): + case <-time.After(c.cfg.IdletimeToAutoclaim / 10): } } }) @@ -177,6 +177,9 @@ func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, if _, err := c.client.XAck(ctx, c.redisStream, c.redisGroup, messageID).Result(); err != nil { return fmt.Errorf("acking message: %v, error: %w", messageID, err) } - close(c.ackNotifiers[messageID]) + if ackNotifier, found := c.ackNotifiers[messageID]; found { + close(ackNotifier) + delete(c.ackNotifiers, messageID) + } return nil } From 1473c602064d8818e7a7985630fabc1aa41714de Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 16 Aug 2024 10:58:13 +0530 Subject: [PATCH 03/41] Use faster hash function --- go.mod | 2 +- go.sum | 4 +-- pubsub/consumer.go | 41 ++++++++++++----------------- pubsub/pubsub_test.go | 6 +++-- validator/validation_entry.go | 7 +++-- validator/valnode/redis/consumer.go | 8 ++++-- 6 files changed, 33 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 5453205c5d..3ba779ae62 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect github.com/cockroachdb/redact v1.1.3 // indirect diff --git a/go.sum b/go.sum index bf0b385631..79d9263076 100644 --- a/go.sum +++ b/go.sum @@ -151,8 +151,8 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 410c3c75f4..9c3785ee3f 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -40,12 +40,11 @@ func ConsumerConfigAddOptions(prefix string, f *pflag.FlagSet) { // indicate it is alive. type Consumer[Request any, Response any] struct { stopwaiter.StopWaiter - id string - client redis.UniversalClient - redisStream string - redisGroup string - cfg *ConsumerConfig - ackNotifiers map[string]chan struct{} + id string + client redis.UniversalClient + redisStream string + redisGroup string + cfg *ConsumerConfig } type Message[Request any] struct { @@ -58,12 +57,11 @@ func NewConsumer[Request any, Response any](client redis.UniversalClient, stream return nil, fmt.Errorf("redis stream name cannot be empty") } return &Consumer[Request, Response]{ - id: uuid.NewString(), - client: client, - redisStream: streamName, - redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. - cfg: cfg, - ackNotifiers: make(map[string]chan struct{}), + id: uuid.NewString(), + client: client, + redisStream: streamName, + redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. + cfg: cfg, }, nil } @@ -86,7 +84,7 @@ func (c *Consumer[Request, Response]) StreamName() string { // Consumer first checks it there exists pending message that is claimed by // unresponsive consumer, if not then reads from the stream. -func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Request], error) { +func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Request], chan struct{}, error) { // First try to XAUTOCLAIM, this prioritizes processing PEL messages // that have been waiting for more than IdletimeToAutoclaim duration messages, _, err := c.client.XAutoClaim(ctx, &redis.XAutoClaimArgs{ @@ -109,13 +107,13 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req Block: time.Millisecond, // 0 seems to block the read instead of immediately returning }).Result() if errors.Is(err, redis.Nil) { - return nil, nil + return nil, nil, nil } if err != nil { - return nil, fmt.Errorf("reading message for consumer: %q: %w", c.id, err) + return nil, nil, fmt.Errorf("reading message for consumer: %q: %w", c.id, err) } if len(res) != 1 || len(res[0].Messages) != 1 { - return nil, fmt.Errorf("redis returned entries: %+v, for querying single message", res) + return nil, nil, fmt.Errorf("redis returned entries: %+v, for querying single message", res) } messages = res[0].Messages } @@ -125,11 +123,11 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req data, ok = (value).(string) ) if !ok { - return nil, fmt.Errorf("casting request to string: %w", err) + return nil, nil, fmt.Errorf("casting request to string: %w", err) } var req Request if err := json.Unmarshal([]byte(data), &req); err != nil { - return nil, fmt.Errorf("unmarshaling value: %v, error: %w", value, err) + return nil, nil, fmt.Errorf("unmarshaling value: %v, error: %w", value, err) } ackNotifier := make(chan struct{}) c.StopWaiter.LaunchThread(func(ctx context.Context) { @@ -153,12 +151,11 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req } } }) - c.ackNotifiers[messages[0].ID] = ackNotifier log.Debug("Redis stream consuming", "consumer_id", c.id, "message_id", messages[0].ID) return &Message[Request]{ ID: messages[0].ID, Value: req, - }, nil + }, ackNotifier, nil } func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, messageID string, result Response) error { @@ -177,9 +174,5 @@ func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, if _, err := c.client.XAck(ctx, c.redisStream, c.redisGroup, messageID).Result(); err != nil { return fmt.Errorf("acking message: %v, error: %w", messageID, err) } - if ackNotifier, found := c.ackNotifiers[messageID]; found { - close(ackNotifier) - delete(c.ackNotifiers, messageID) - } return nil } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 69839737e3..b1ffdca0fd 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -182,7 +182,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques func(ctx context.Context) { for { - res, err := c.Consume(ctx) + res, ackNotifier, err := c.Consume(ctx) if err != nil { if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { t.Errorf("Consume() unexpected error: %v", err) @@ -198,6 +198,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques if err := c.SetResult(ctx, res.Value.SelfHash, res.ID, testResponse{Response: resp}); err != nil { t.Errorf("Error setting a result: %v", err) } + close(ackNotifier) wantResponses[idx] = append(wantResponses[idx], resp) } }) @@ -291,13 +292,14 @@ func TestRedisProduceComplex(t *testing.T) { // that other consumers will claim ownership on those messages. for i := 0; i < len(consumers); i += 3 { consumers[i].Start(ctx) - req, err := consumers[i].Consume(ctx) + req, _, err := consumers[i].Consume(ctx) if err != nil { t.Errorf("Error consuming message: %v", err) } if req == nil { t.Error("Didn't consume any message") } + // Kills the actnotifier hence allowing XAUTOCLAIM consumers[i].StopAndWait() } diff --git a/validator/validation_entry.go b/validator/validation_entry.go index 326bbc355b..dc102a4055 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -1,10 +1,10 @@ package validator import ( - "crypto/sha256" - "encoding/hex" "encoding/json" + "fmt" + "github.com/cespare/xxhash/v2" "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbutil" ) @@ -35,6 +35,5 @@ func (v *ValidationInput) SetSelfHash() { if err != nil { return } - hash := sha256.Sum256(jsonData) - v.SelfHash = hex.EncodeToString(hash[:]) + v.SelfHash = fmt.Sprintf("%d", xxhash.Sum64(jsonData)) } diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 13bf19ac43..e5a5dae1df 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -84,7 +84,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { case <-ready: // Wait until the stream exists and start consuming iteratively. } s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration { - req, err := c.Consume(ctx) + req, ackNotifier, err := c.Consume(ctx) if err != nil { log.Error("Consuming request", "error", err) return 0 @@ -97,9 +97,13 @@ func (s *ValidationServer) Start(ctx_in context.Context) { res, err := valRun.Await(ctx) if err != nil { log.Error("Error validating", "request value", req.Value, "error", err) + close(ackNotifier) return 0 } - if err := c.SetResult(ctx, req.Value.SelfHash, req.ID, res); err != nil { + err = c.SetResult(ctx, req.Value.SelfHash, req.ID, res) + // Even in error we close ackNotifier as there's no retry mechanism here and closing it will alow other consumers to autoclaim + close(ackNotifier) + if err != nil { log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err) return 0 } From 40e6b9bce33b2af048561889a7ed914e113def77 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 26 Aug 2024 13:43:32 +0530 Subject: [PATCH 04/41] address PR comments, handle memory better and add test to cover incorrect request scenario --- pubsub/consumer.go | 18 ++++-- pubsub/producer.go | 129 ++++++++++++++++++++++++++++-------------- pubsub/pubsub_test.go | 91 +++++++++++++++++------------ 3 files changed, 155 insertions(+), 83 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 9c3785ee3f..2c4787101d 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -18,7 +18,7 @@ type ConsumerConfig struct { // Timeout of result entry in Redis. ResponseEntryTimeout time.Duration `koanf:"response-entry-timeout"` // Minimum idle time after which messages will be autoclaimed - IdletimeToAutoclaim time.Duration `koanf:"Idletime-to-autoclaim"` + IdletimeToAutoclaim time.Duration `koanf:"idletime-to-autoclaim"` } var DefaultConsumerConfig = ConsumerConfig{ @@ -33,7 +33,7 @@ var TestConsumerConfig = ConsumerConfig{ func ConsumerConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".response-entry-timeout", DefaultConsumerConfig.ResponseEntryTimeout, "timeout for response entry") - f.Duration(prefix+".Idletime-to-autoclaim", DefaultConsumerConfig.IdletimeToAutoclaim, "After a message spends this amount of time in PEL (Pending Entries List i.e claimed by another consumer but not Acknowledged) it will be allowed to be autoclaimed by other consumers") + f.Duration(prefix+".idletime-to-autoclaim", DefaultConsumerConfig.IdletimeToAutoclaim, "After a message spends this amount of time in PEL (Pending Entries List i.e claimed by another consumer but not Acknowledged) it will be allowed to be autoclaimed by other consumers") } // Consumer implements a consumer for redis stream provides heartbeat to @@ -93,9 +93,12 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req MinIdle: c.cfg.IdletimeToAutoclaim, // Minimum idle time for messages to claim (in milliseconds) Stream: c.redisStream, Start: "0", - Count: 1, // Limit the number of messages to claim + Count: 5, // Try looking for 50 entries in PEL, this assumes there are a maximum of 50 consumers in this redisGroup }).Result() - if len(messages) != 1 || err != nil { + if len(messages) == 0 || err != nil { + if err != nil { + log.Error("error from xautoclaim", "err", err) + } // Fallback to reading new messages res, err := c.client.XReadGroup(ctx, &redis.XReadGroupArgs{ Group: c.redisGroup, @@ -132,7 +135,9 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req ackNotifier := make(chan struct{}) c.StopWaiter.LaunchThread(func(ctx context.Context) { for { - if err := c.client.XClaim(ctx, &redis.XClaimArgs{ + // Use XClaimJustID so that we would have clear difference between invalid requests that are claimed multiple times due to xautoclaim and + // valid requests that are just being claimed in regular intervals to indicate heartbeat + if err := c.client.XClaimJustID(ctx, &redis.XClaimArgs{ Stream: c.redisStream, Group: c.redisGroup, Consumer: c.id, @@ -174,5 +179,8 @@ func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, if _, err := c.client.XAck(ctx, c.redisStream, c.redisGroup, messageID).Result(); err != nil { return fmt.Errorf("acking message: %v, error: %w", messageID, err) } + if _, err := c.client.XDel(ctx, c.redisStream, messageID).Result(); err != nil { + return fmt.Errorf("deleting message: %v, error: %w", messageID, err) + } return nil } diff --git a/pubsub/producer.go b/pubsub/producer.go index df6e7d5a28..cf5dfdbd36 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -57,21 +57,26 @@ type ProducerConfig struct { CheckResultInterval time.Duration `koanf:"check-result-interval"` // Timeout of entry's written to redis by producer ResponseEntryTimeout time.Duration `koanf:"response-entry-timeout"` + // RequestTimeout is a TTL for any message sent to the redis stream + RequestTimeout time.Duration `koanf:"request-timeout"` } var DefaultProducerConfig = ProducerConfig{ CheckResultInterval: 5 * time.Second, ResponseEntryTimeout: time.Hour, + RequestTimeout: time.Hour, // should we increase this? } var TestProducerConfig = ProducerConfig{ CheckResultInterval: 5 * time.Millisecond, ResponseEntryTimeout: time.Minute, + RequestTimeout: 2 * time.Second, } func ProducerAddConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".check-result-interval", DefaultProducerConfig.CheckResultInterval, "interval in which producer checks pending messages whether consumer processing them is inactive") f.Duration(prefix+".response-entry-timeout", DefaultProducerConfig.ResponseEntryTimeout, "timeout after which responses written from producer to the redis are cleared. Currently used for the key mapping unique request id to redis stream message id") + f.Duration(prefix+".request-timeout", DefaultProducerConfig.RequestTimeout, "timeout after which the message in redis stream is considered as errored, this prevents workers from working on wrong requests indefinitely") } func NewProducer[Request any, Response any](client redis.UniversalClient, streamName string, cfg *ProducerConfig) (*Producer[Request, Response], error) { @@ -91,37 +96,58 @@ func NewProducer[Request any, Response any](client redis.UniversalClient, stream }, nil } -func setMaxMsgIdInt(maxMsgIdInt *[2]uint64, msgId string) error { - idParts := strings.Split(msgId, "-") - if len(idParts) != 2 { - return fmt.Errorf("invalid i.d: %v", msgId) +// cmpMsgId compares two msgid's and returns (0) if equal, (-1) if msgId1 < msgId2, (1) if msgId1 > msgId2, (-2) if not comparable (or error) +func cmpMsgId(msgId1, msgId2 string) int { + getUintParts := func(msgId string) ([2]uint64, error) { + idParts := strings.Split(msgId, "-") + if len(idParts) != 2 { + return [2]uint64{}, fmt.Errorf("invalid i.d: %v", msgId) + } + idTimeStamp, err := strconv.ParseUint(idParts[0], 10, 64) + if err != nil { + return [2]uint64{}, fmt.Errorf("invalid i.d: %v err: %w", msgId, err) + } + idSerial, err := strconv.ParseUint(idParts[1], 10, 64) + if err != nil { + return [2]uint64{}, fmt.Errorf("invalid i.d serial: %v err: %w", msgId, err) + } + return [2]uint64{idTimeStamp, idSerial}, nil } - idTimeStamp, err := strconv.ParseUint(idParts[0], 10, 64) + id1, err := getUintParts(msgId1) if err != nil { - return fmt.Errorf("invalid i.d: %v err: %w", msgId, err) - } - if idTimeStamp < maxMsgIdInt[0] { - return nil + log.Trace("error comparing msgIds", "msgId1", msgId1, "msgId2", msgId2) + return -2 } - idSerial, err := strconv.ParseUint(idParts[1], 10, 64) + id2, err := getUintParts(msgId2) if err != nil { - return fmt.Errorf("invalid i.d serial: %v err: %w", msgId, err) + log.Trace("error comparing msgIds", "msgId1", msgId1, "msgId2", msgId2) + return -2 } - if idTimeStamp > maxMsgIdInt[0] { - maxMsgIdInt[0] = idTimeStamp - maxMsgIdInt[1] = idSerial - return nil + if id1[0] < id2[0] { + return -1 + } else if id1[0] > id2[0] { + return 1 + } else if id1[1] < id2[1] { + return -1 + } else if id1[1] > id2[1] { + return 1 } - // idTimeStamp == maxMsgIdInt[0] - if idSerial > maxMsgIdInt[1] { - maxMsgIdInt[1] = idSerial - } - return nil + return 0 } // checkResponses checks iteratively whether response for the promise is ready. func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.Duration { - maxMsgIdInt := [2]uint64{0, 0} + pelData, err := p.client.XPending(ctx, p.redisStream, p.redisGroup).Result() + if err != nil { + log.Error("error getting PEL data from xpending, xtrimming is disabled", "err", err) + } + deletePromise := func(id string) { + // Try deleting UNIQUEID_MSGID_MAP_KEY corresponding to this id from redis + if err := p.client.Del(ctx, MessageKeyFor(p.redisStream, id)+UNIQUEID_MSGID_MAP_KEY).Err(); err != nil { + log.Error("Error deleting key from redis that flags that a request is being processed", "err", err) + } + delete(p.promises, id) + } p.promisesLock.Lock() defer p.promisesLock.Unlock() responded := 0 @@ -135,16 +161,22 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D if err != nil { if !errors.Is(err, redis.Nil) { log.Error("Error reading value in redis", "key", id, "error", err) + } else { + // The request this producer is waiting for has been past its TTL or is older than current PEL's lower, + // so safe to error and stop tracking this promise + allowedOldestID := fmt.Sprintf("%d-0", time.Now().Add(-p.cfg.RequestTimeout).UnixMilli()) + if pelData != nil && pelData.Lower != "" { + allowedOldestID = pelData.Lower + } + if cmpMsgId(msgIDAndPromise.msgID, allowedOldestID) == -1 { + msgIDAndPromise.promise.ProduceError(errors.New("error getting response, request has been waiting for too long")) + log.Error("error getting response, request has been waiting past its TTL") + errored++ + deletePromise(id) + } } continue } - // We keep track of a maxMsgId of a successfully solved request, because messages - // with id lower than this are either ack-ed or in PEL, so its safe to call XTRIMMINID on maxMsgId - errSetId := setMaxMsgIdInt(&maxMsgIdInt, msgIDAndPromise.msgID) - if errSetId != nil { - log.Error("error setting maxMsgId", "err", err) - return p.cfg.CheckResultInterval - } var resp Response if err := json.Unmarshal([]byte(res), &resp); err != nil { msgIDAndPromise.promise.ProduceError(fmt.Errorf("error unmarshalling: %w", err)) @@ -154,21 +186,36 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D msgIDAndPromise.promise.Produce(resp) responded++ } - // Try deleting UNIQUEID_MSGID_MAP_KEY corresponding to this id from redis - if err := p.client.Del(ctx, msgKey+UNIQUEID_MSGID_MAP_KEY).Err(); err != nil { - log.Error("Error deleting key from redis that flags that a request is being processed", "err", err) - } - delete(p.promises, id) + deletePromise(id) } - var trimmed int64 - var trimErr error - maxMsgId := "+" - // If at least response for one promise was found, find the maximum of the found ones and XTRIMMINID from that msg id + 1 - if maxMsgIdInt[0] > 0 { - maxMsgId = fmt.Sprintf("%d-%d", maxMsgIdInt[0], maxMsgIdInt[1]+1) - trimmed, trimErr = p.client.XTrimMinID(ctx, p.redisStream, maxMsgId).Result() + // XDEL on consumer side already deletes acked messages (mark as deleted) but doesnt claim the memory back, XTRIM helps in claiming this memory in normal conditions + // pelData might be outdated when we do the xtrim, but thats ok as the messages are also being trimmed by other producers + if pelData != nil && pelData.Lower != "" { + trimmed, trimErr := p.client.XTrimMinID(ctx, p.redisStream, pelData.Lower).Result() + log.Trace("trimming", "xTrimMinID", pelData.Lower, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr) + // Check if pelData.Lower has been past its TTL and if it is then ack it to remove from PEL and delete it, once + // its taken out from PEL the producer that sent this request will handle the corresponding promise accordingly (if PEL is non-empty) + allowedOldestID := fmt.Sprintf("%d-0", time.Now().Add(-p.cfg.RequestTimeout).UnixMilli()) + if cmpMsgId(pelData.Lower, allowedOldestID) == -1 { + if err := p.client.XClaim(ctx, &redis.XClaimArgs{ + Stream: p.redisStream, + Group: p.redisGroup, + Consumer: p.id, + MinIdle: 0, + Messages: []string{pelData.Lower}, + }).Err(); err != nil { + log.Error("error claiming PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) + return p.cfg.CheckResultInterval + } + if _, err := p.client.XAck(ctx, p.redisStream, p.redisGroup, pelData.Lower).Result(); err != nil { + log.Error("error acking PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) + return p.cfg.CheckResultInterval + } + if _, err := p.client.XDel(ctx, p.redisStream, pelData.Lower).Result(); err != nil { + log.Error("error deleting PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) + } + } } - log.Trace("trimming", "xTrimMinID", maxMsgId, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr) return p.cfg.CheckResultInterval } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index b1ffdca0fd..3883420f4e 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -26,8 +26,9 @@ var ( ) type testRequest struct { - Request string - SelfHash string // Is a unique identifier which can be used to compare any two validationInputs + Request string + IsInvalid bool + SelfHash string // Is a unique identifier which can be used to compare any two validationInputs } // SetSelfHash should be only called once. In the context of redis streams- by the producer @@ -63,6 +64,7 @@ func producerCfg() *ProducerConfig { return &ProducerConfig{ CheckResultInterval: TestProducerConfig.CheckResultInterval, ResponseEntryTimeout: TestProducerConfig.ResponseEntryTimeout, + RequestTimeout: TestProducerConfig.RequestTimeout, } } @@ -136,10 +138,13 @@ func flatten(responses [][]string) []string { return ret } -func produceMessages(ctx context.Context, msgs []string, producer *Producer[testRequest, testResponse], useUniqueIdentifier bool) ([]*containers.Promise[testResponse], error) { +func produceMessages(ctx context.Context, msgs []string, producer *Producer[testRequest, testResponse], useUniqueIdentifier, withInvalidEntries bool) ([]*containers.Promise[testResponse], error) { var promises []*containers.Promise[testResponse] for i := 0; i < len(msgs); i++ { req := testRequest{Request: msgs[i]} + if withInvalidEntries && i%50 == 0 { + req.IsInvalid = true + } if useUniqueIdentifier { req.SetSelfHash() } @@ -194,12 +199,14 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques continue } gotMessages[idx][res.ID] = res.Value.Request - resp := fmt.Sprintf("result for: %v", res.ID) - if err := c.SetResult(ctx, res.Value.SelfHash, res.ID, testResponse{Response: resp}); err != nil { - t.Errorf("Error setting a result: %v", err) + if !res.Value.IsInvalid { + resp := fmt.Sprintf("result for: %v", res.ID) + if err := c.SetResult(ctx, res.Value.SelfHash, res.ID, testResponse{Response: resp}); err != nil { + t.Errorf("Error setting a result: %v", err) + } + wantResponses[idx] = append(wantResponses[idx], resp) } close(ackNotifier) - wantResponses[idx] = append(wantResponses[idx], resp) } }) } @@ -210,45 +217,50 @@ func TestRedisProduceComplex(t *testing.T) { log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) t.Parallel() for _, tc := range []struct { - name string - entries1Count int - entries2Count int - numProducers int - withDuplicates bool // If this is set, then every fourth entry (while generation) of each entries list is equal - killConsumers bool + name string + entriesCount []int + numProducers int + withDuplicates bool // If this is set, then every fourth entry (while generation) of each entries list is equal + killConsumers bool + withInvalidEntries bool // If this is set, then every 50th entry is invalid (requests that can't be solved by any consumer) }{ { - name: "one producer, all consumers are active", - entries1Count: messagesCount, - numProducers: 1, + name: "one producer, all consumers are active", + entriesCount: []int{messagesCount}, + numProducers: 1, }, { name: "one producer, some consumers killed, others should take over their work", - entries1Count: messagesCount, + entriesCount: []int{messagesCount}, numProducers: 1, killConsumers: true, }, { - name: "two producers, all consumers are active, all unique entries", - entries1Count: 20, - entries2Count: 20, - numProducers: 2, + name: "two producers, all consumers are active, all unique entries", + entriesCount: []int{20, 20}, + numProducers: 2, }, { name: "two producers, all consumers are active, some duplicate entries", - entries1Count: 20, - entries2Count: 20, + entriesCount: []int{20, 20}, numProducers: 2, withDuplicates: true, }, { name: "two producers, some consumers killed, others should take over their work, some duplicate entries, unequal number of requests from producers", - entries1Count: messagesCount, - entries2Count: 2 * messagesCount, + entriesCount: []int{messagesCount, 2 * messagesCount}, numProducers: 2, withDuplicates: true, killConsumers: true, }, + { + name: "two producers, some consumers killed, others should take over their work, some duplicate entries, some invalid entries, unequal number of requests from producers", + entriesCount: []int{messagesCount, 2 * messagesCount}, + numProducers: 2, + withDuplicates: true, + killConsumers: true, + withInvalidEntries: true, + }, } { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) @@ -271,15 +283,15 @@ func TestRedisProduceComplex(t *testing.T) { var entries [][]string if tc.numProducers == 2 { - entries = append(entries, wantMessages(tc.entries1Count, "1.", tc.withDuplicates)) - entries = append(entries, wantMessages(tc.entries2Count, "2.", tc.withDuplicates)) + entries = append(entries, wantMessages(tc.entriesCount[0], "1.", tc.withDuplicates)) + entries = append(entries, wantMessages(tc.entriesCount[1], "2.", tc.withDuplicates)) } else { - entries = append(entries, wantMessages(tc.entries1Count, "", tc.withDuplicates)) + entries = append(entries, wantMessages(tc.entriesCount[0], "", tc.withDuplicates)) } var promises [][]*containers.Promise[testResponse] for i := 0; i < tc.numProducers; i++ { - prs, err := produceMessages(ctx, entries[i], producers[i], tc.numProducers == 2) + prs, err := produceMessages(ctx, entries[i], producers[i], tc.numProducers == 2, tc.withInvalidEntries) if err != nil { t.Fatalf("Error producing messages from producer%d: %v", i, err) } @@ -311,8 +323,17 @@ func TestRedisProduceComplex(t *testing.T) { var gotResponses []string for i := 0; i < tc.numProducers; i++ { grs, errIndexes := awaitResponses(ctx, promises[i]) - if len(errIndexes) != 0 { - t.Fatalf("Error awaiting responses from promises%d: %v", i, errIndexes) + if tc.withInvalidEntries { + if errIndexes[len(errIndexes)-1]+50 <= len(entries[i]) { + t.Fatalf("Unexpected number of invalid requests while awaiting responses") + } + for j, idx := range errIndexes { + if idx != j*50 { + t.Fatalf("Invalid request' index mismatch want: %d got %d", j*50, idx) + } + } + } else if len(errIndexes) != 0 { + t.Fatalf("Error awaiting responses from promises %d: %v", i, errIndexes) } gotResponses = append(gotResponses, grs...) } @@ -325,6 +346,7 @@ func TestRedisProduceComplex(t *testing.T) { if err != nil { t.Fatalf("mergeMaps() unexpected error: %v", err) } + got = removeDuplicates(got) var combinedEntries []string for i := 0; i < tc.numProducers; i++ { @@ -384,14 +406,9 @@ func removeDuplicates(list []string) []string { // mergeValues merges maps from the slice and returns their values. // Returns and error if there exists duplicate key. func mergeValues(messages []map[string]string) ([]string, error) { - res := make(map[string]any) var ret []string for _, m := range messages { - for k, v := range m { - if _, found := res[k]; found { - return nil, fmt.Errorf("duplicate key: %v", k) - } - res[k] = v + for _, v := range m { ret = append(ret, v) } } From b5fff687e82b90d97f347b47be5695edb5032e4b Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 26 Aug 2024 13:48:46 +0530 Subject: [PATCH 05/41] increase TestProducerConfig requestTimeout --- pubsub/producer.go | 2 +- pubsub/pubsub_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pubsub/producer.go b/pubsub/producer.go index cf5dfdbd36..ee5d4de528 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -70,7 +70,7 @@ var DefaultProducerConfig = ProducerConfig{ var TestProducerConfig = ProducerConfig{ CheckResultInterval: 5 * time.Millisecond, ResponseEntryTimeout: time.Minute, - RequestTimeout: 2 * time.Second, + RequestTimeout: time.Minute, } func ProducerAddConfigAddOptions(prefix string, f *pflag.FlagSet) { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 3883420f4e..3e03af3f48 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -64,7 +64,7 @@ func producerCfg() *ProducerConfig { return &ProducerConfig{ CheckResultInterval: TestProducerConfig.CheckResultInterval, ResponseEntryTimeout: TestProducerConfig.ResponseEntryTimeout, - RequestTimeout: TestProducerConfig.RequestTimeout, + RequestTimeout: 2 * time.Second, } } From cee4620308d8c3d735e35f9ef9eccd2c2a4eb321 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 26 Aug 2024 14:16:34 +0530 Subject: [PATCH 06/41] fix tests --- pubsub/pubsub_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 3e03af3f48..c4e11b8a72 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -342,11 +342,14 @@ func TestRedisProduceComplex(t *testing.T) { c.StopAndWait() } - got, err := mergeValues(gotMessages) + got, err := mergeValues(gotMessages, tc.withInvalidEntries) if err != nil { t.Fatalf("mergeMaps() unexpected error: %v", err) } - got = removeDuplicates(got) + // Only when there are invalid entries got will have duplicates + if tc.withInvalidEntries { + got = removeDuplicates(got) + } var combinedEntries []string for i := 0; i < tc.numProducers; i++ { @@ -405,10 +408,15 @@ func removeDuplicates(list []string) []string { // mergeValues merges maps from the slice and returns their values. // Returns and error if there exists duplicate key. -func mergeValues(messages []map[string]string) ([]string, error) { +func mergeValues(messages []map[string]string, withInvalidEntries bool) ([]string, error) { + res := make(map[string]any) var ret []string for _, m := range messages { - for _, v := range m { + for k, v := range m { + if _, found := res[k]; found && !withInvalidEntries { + return nil, fmt.Errorf("duplicate key: %v", k) + } + res[k] = v ret = append(ret, v) } } From 2a3dc1d6a805b77f08d6f8e48a1286d0e2c11d44 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 27 Aug 2024 23:35:10 +0530 Subject: [PATCH 07/41] rectify xautoclaim logic and address PR comments --- pubsub/consumer.go | 66 +++++++++++++++++++++++++++-------- pubsub/producer.go | 31 ++++++++-------- validator/validation_entry.go | 5 +++ 3 files changed, 72 insertions(+), 30 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 2c4787101d..20bfccb6a8 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -5,6 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "math" + "math/rand" + "strconv" "time" "github.com/ethereum/go-ethereum/log" @@ -82,23 +85,54 @@ func (c *Consumer[Request, Response]) StreamName() string { return c.redisStream } +func decrementMsgIdByOne(msgId string) string { + id, err := getUintParts(msgId) + if err != nil { + log.Error("Error decrementing start of XAutoClaim by one, defaulting to 0", "err", err) + return "0" + } + if id[1] > 0 { + return strconv.FormatUint(id[0], 10) + "-" + strconv.FormatUint(id[1]-1, 10) + } else if id[0] > 0 { + return strconv.FormatUint(id[0]-1, 10) + "-" + strconv.FormatUint(math.MaxUint64, 10) + } else { + log.Error("Error decrementing start of XAutoClaim by one, defaulting to 0", "err", err) + return "0" + } +} + // Consumer first checks it there exists pending message that is claimed by // unresponsive consumer, if not then reads from the stream. func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Request], chan struct{}, error) { - // First try to XAUTOCLAIM, this prioritizes processing PEL messages - // that have been waiting for more than IdletimeToAutoclaim duration - messages, _, err := c.client.XAutoClaim(ctx, &redis.XAutoClaimArgs{ - Group: c.redisGroup, - Consumer: c.id, - MinIdle: c.cfg.IdletimeToAutoclaim, // Minimum idle time for messages to claim (in milliseconds) - Stream: c.redisStream, - Start: "0", - Count: 5, // Try looking for 50 entries in PEL, this assumes there are a maximum of 50 consumers in this redisGroup - }).Result() - if len(messages) == 0 || err != nil { + // First try to XAUTOCLAIM, with start as a random messageID from PEL with MinIdle as IdletimeToAutoclaim + // this prioritizes processing PEL messages that have been waiting for more than IdletimeToAutoclaim duration + var messages []redis.XMessage + if pendingMsgs, err := c.client.XPendingExt(ctx, &redis.XPendingExtArgs{ + Stream: c.redisStream, + Group: c.redisGroup, + Start: "-", + End: "+", + Count: math.MaxInt64, + Idle: c.cfg.IdletimeToAutoclaim, + }).Result(); err != nil { + if !errors.Is(err, redis.Nil) { + log.Error("Error from XpendingExt in getting PEL for auto claim", "err", err, "penindlen", len(pendingMsgs)) + } + } else if len(pendingMsgs) > 0 { + idx := rand.Intn(len(pendingMsgs)) + messages, _, err = c.client.XAutoClaim(ctx, &redis.XAutoClaimArgs{ + Group: c.redisGroup, + Consumer: c.id, + MinIdle: c.cfg.IdletimeToAutoclaim, // Minimum idle time for messages to claim (in milliseconds) + Stream: c.redisStream, + Start: decrementMsgIdByOne(pendingMsgs[idx].ID), + Count: 1, + }).Result() if err != nil { log.Error("error from xautoclaim", "err", err) } + } + if len(messages) == 0 { // Fallback to reading new messages res, err := c.client.XReadGroup(ctx, &redis.XReadGroupArgs{ Group: c.redisGroup, @@ -126,7 +160,7 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req data, ok = (value).(string) ) if !ok { - return nil, nil, fmt.Errorf("casting request to string: %w", err) + return nil, nil, errors.New("error casting request to string") } var req Request if err := json.Unmarshal([]byte(data), &req); err != nil { @@ -137,14 +171,16 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req for { // Use XClaimJustID so that we would have clear difference between invalid requests that are claimed multiple times due to xautoclaim and // valid requests that are just being claimed in regular intervals to indicate heartbeat - if err := c.client.XClaimJustID(ctx, &redis.XClaimArgs{ + if ids, err := c.client.XClaimJustID(ctx, &redis.XClaimArgs{ Stream: c.redisStream, Group: c.redisGroup, Consumer: c.id, MinIdle: 0, Messages: []string{messages[0].ID}, - }).Err(); err != nil { - log.Error("error claiming message, it might be possible that other consumers might pick this request", "msgID", messages[0].ID) + }).Result(); err != nil { + log.Error("Error claiming message, it might be possible that other consumers might pick this request", "msgID", messages[0].ID) + } else if len(ids) != 1 { + log.Warn("XClaimJustID returned empty response when indicating hearbeat", "msgID", messages[0].ID) } select { case <-ackNotifier: diff --git a/pubsub/producer.go b/pubsub/producer.go index ee5d4de528..74023ad5b0 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -96,23 +96,24 @@ func NewProducer[Request any, Response any](client redis.UniversalClient, stream }, nil } +func getUintParts(msgId string) ([2]uint64, error) { + idParts := strings.Split(msgId, "-") + if len(idParts) != 2 { + return [2]uint64{}, fmt.Errorf("invalid i.d: %v", msgId) + } + idTimeStamp, err := strconv.ParseUint(idParts[0], 10, 64) + if err != nil { + return [2]uint64{}, fmt.Errorf("invalid i.d: %v err: %w", msgId, err) + } + idSerial, err := strconv.ParseUint(idParts[1], 10, 64) + if err != nil { + return [2]uint64{}, fmt.Errorf("invalid i.d serial: %v err: %w", msgId, err) + } + return [2]uint64{idTimeStamp, idSerial}, nil +} + // cmpMsgId compares two msgid's and returns (0) if equal, (-1) if msgId1 < msgId2, (1) if msgId1 > msgId2, (-2) if not comparable (or error) func cmpMsgId(msgId1, msgId2 string) int { - getUintParts := func(msgId string) ([2]uint64, error) { - idParts := strings.Split(msgId, "-") - if len(idParts) != 2 { - return [2]uint64{}, fmt.Errorf("invalid i.d: %v", msgId) - } - idTimeStamp, err := strconv.ParseUint(idParts[0], 10, 64) - if err != nil { - return [2]uint64{}, fmt.Errorf("invalid i.d: %v err: %w", msgId, err) - } - idSerial, err := strconv.ParseUint(idParts[1], 10, 64) - if err != nil { - return [2]uint64{}, fmt.Errorf("invalid i.d serial: %v err: %w", msgId, err) - } - return [2]uint64{idTimeStamp, idSerial}, nil - } id1, err := getUintParts(msgId1) if err != nil { log.Trace("error comparing msgIds", "msgId1", msgId1, "msgId2", msgId2) diff --git a/validator/validation_entry.go b/validator/validation_entry.go index be98698208..05bbe50729 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -7,6 +7,7 @@ import ( "github.com/cespare/xxhash/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" ) @@ -32,6 +33,10 @@ type ValidationInput struct { // SetSelfHash should be only called once. In the context of redis streams- by the producer, before submitting a request func (v *ValidationInput) SetSelfHash() { + if v.SelfHash != "" { + log.Error("SetSelfHash called more then once") + return // exiting early as hash has already been set + } jsonData, err := json.Marshal(v) if err != nil { return From e1bd2cef08fe3c5abd4fc342b4f44d9e3753ceed Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 5 Sep 2024 16:42:18 +0530 Subject: [PATCH 08/41] Remove IPFS support --- cmd/conf/chain.go | 27 ++- cmd/ipfshelper/ipfshelper.bkup_go | 281 ------------------------------ cmd/ipfshelper/ipfshelper_stub.go | 31 ---- cmd/ipfshelper/ipfshelper_test.go | 123 ------------- cmd/nitro/init.go | 23 +-- cmd/nitro/nitro.go | 30 +--- cmd/util/chaininfoutil.go | 29 --- 7 files changed, 18 insertions(+), 526 deletions(-) delete mode 100644 cmd/ipfshelper/ipfshelper.bkup_go delete mode 100644 cmd/ipfshelper/ipfshelper_stub.go delete mode 100644 cmd/ipfshelper/ipfshelper_test.go delete mode 100644 cmd/util/chaininfoutil.go diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index b85f7727b1..28b06aad2b 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -52,23 +52,19 @@ func (c *ParentChainConfig) Validate() error { } type L2Config struct { - ID uint64 `koanf:"id"` - Name string `koanf:"name"` - InfoFiles []string `koanf:"info-files"` - InfoJson string `koanf:"info-json"` - DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` - InfoIpfsUrl string `koanf:"info-ipfs-url"` - InfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` + ID uint64 `koanf:"id"` + Name string `koanf:"name"` + InfoFiles []string `koanf:"info-files"` + InfoJson string `koanf:"info-json"` + DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` } var L2ConfigDefault = L2Config{ - ID: 0, - Name: "", - InfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go - InfoJson: "", - DevWallet: genericconf.WalletConfigDefault, - InfoIpfsUrl: "", - InfoIpfsDownloadPath: "/tmp/", + ID: 0, + Name: "", + InfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go + InfoJson: "", + DevWallet: genericconf.WalletConfigDefault, } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -79,9 +75,6 @@ func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { // Dev wallet does not exist unless specified genericconf.WalletConfigAddOptions(prefix+".dev-wallet", f, "") - f.String(prefix+".info-ipfs-url", L2ConfigDefault.InfoIpfsUrl, "url to download chain info file") - f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.InfoIpfsDownloadPath, "path to save temp downloaded file") - } func (c *L2Config) ResolveDirectoryNames(chain string) { diff --git a/cmd/ipfshelper/ipfshelper.bkup_go b/cmd/ipfshelper/ipfshelper.bkup_go deleted file mode 100644 index ccde492ca6..0000000000 --- a/cmd/ipfshelper/ipfshelper.bkup_go +++ /dev/null @@ -1,281 +0,0 @@ -//go:build ipfs -// +build ipfs - -package ipfshelper - -import ( - "context" - "fmt" - "io" - "math/rand" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/ethereum/go-ethereum/log" - "github.com/ipfs/go-libipfs/files" - coreiface "github.com/ipfs/interface-go-ipfs-core" - "github.com/ipfs/interface-go-ipfs-core/options" - "github.com/ipfs/interface-go-ipfs-core/path" - "github.com/ipfs/kubo/config" - "github.com/ipfs/kubo/core" - "github.com/ipfs/kubo/core/coreapi" - "github.com/ipfs/kubo/core/node/libp2p" - "github.com/ipfs/kubo/plugin/loader" - "github.com/ipfs/kubo/repo" - "github.com/ipfs/kubo/repo/fsrepo" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -const DefaultIpfsProfiles = "" - -type IpfsHelper struct { - api coreiface.CoreAPI - node *core.IpfsNode - cfg *config.Config - repoPath string - repo repo.Repo -} - -func (h *IpfsHelper) createRepo(downloadPath string, profiles string) error { - fileInfo, err := os.Stat(downloadPath) - if err != nil { - return fmt.Errorf("failed to stat ipfs repo directory: %w", err) - } - if !fileInfo.IsDir() { - return fmt.Errorf("%s is not a directory", downloadPath) - } - h.repoPath = filepath.Join(downloadPath, "ipfs-repo") - // Create a config with default options and a 2048 bit key - h.cfg, err = config.Init(io.Discard, 2048) - if err != nil { - return err - } - if len(profiles) > 0 { - for _, profile := range strings.Split(profiles, ",") { - transformer, ok := config.Profiles[profile] - if !ok { - return fmt.Errorf("invalid ipfs configuration profile: %s", profile) - } - - if err := transformer.Transform(h.cfg); err != nil { - return err - } - } - } - // Create the repo with the config - // fsrepo.Init initializes new repo only if it's not initialized yet - err = fsrepo.Init(h.repoPath, h.cfg) - if err != nil { - return fmt.Errorf("failed to init ipfs repo: %w", err) - } - h.repo, err = fsrepo.Open(h.repoPath) - if err != nil { - return fmt.Errorf("failed to open ipfs repo: %w", err) - } - return nil -} - -func (h *IpfsHelper) createNode(ctx context.Context, clientOnly bool) error { - var routing libp2p.RoutingOption - if clientOnly { - routing = libp2p.DHTClientOption - } else { - routing = libp2p.DHTOption - } - nodeOptions := &core.BuildCfg{ - Online: true, - Routing: routing, - Repo: h.repo, - } - var err error - h.node, err = core.NewNode(ctx, nodeOptions) - if err != nil { - return err - } - h.api, err = coreapi.NewCoreAPI(h.node) - return err -} - -func (h *IpfsHelper) connectToPeers(ctx context.Context, peers []string) error { - peerInfos := make(map[peer.ID]*peer.AddrInfo, len(peers)) - for _, addressString := range peers { - address, err := ma.NewMultiaddr(addressString) - if err != nil { - return err - } - addressInfo, err := peer.AddrInfoFromP2pAddr(address) - if err != nil { - return err - } - peerInfo, ok := peerInfos[addressInfo.ID] - if !ok { - peerInfo = &peer.AddrInfo{ID: addressInfo.ID} - peerInfos[peerInfo.ID] = peerInfo - } - peerInfo.Addrs = append(peerInfo.Addrs, addressInfo.Addrs...) - } - var wg sync.WaitGroup - wg.Add(len(peerInfos)) - for _, peerInfo := range peerInfos { - go func(peerInfo *peer.AddrInfo) { - defer wg.Done() - err := h.api.Swarm().Connect(ctx, *peerInfo) - if err != nil { - log.Warn("failed to connect to peer", "peerId", peerInfo.ID, "err", err) - return - } - }(peerInfo) - } - wg.Wait() - return nil -} - -func (h *IpfsHelper) GetPeerHostAddresses() ([]string, error) { - addresses, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(h.node.PeerHost)) - if err != nil { - return []string{}, err - } - addressesStrings := make([]string, len(addresses)) - for i, a := range addresses { - addressesStrings[i] = a.String() - } - return addressesStrings, nil -} - -func normalizeCidString(cidString string) string { - if strings.HasPrefix(cidString, "ipfs://") { - return "/ipfs/" + cidString[7:] - } - if strings.HasPrefix(cidString, "ipns://") { - return "/ipns/" + cidString[7:] - } - return cidString -} - -func (h *IpfsHelper) DownloadFile(ctx context.Context, cidString string, destinationDir string) (string, error) { - cidString = normalizeCidString(cidString) - cidPath := path.New(cidString) - resolvedPath, err := h.api.ResolvePath(ctx, cidPath) - if err != nil { - return "", fmt.Errorf("failed to resolve path: %w", err) - } - // first pin the root node, then all its children nodes in random order to improve sharing with peers started at the same time - if err := h.api.Pin().Add(ctx, resolvedPath, options.Pin.Recursive(false)); err != nil { - return "", fmt.Errorf("failed to pin root path: %w", err) - } - links, err := h.api.Object().Links(ctx, resolvedPath) - if err != nil { - return "", fmt.Errorf("failed to get root links: %w", err) - } - log.Info("Pinning ipfs subtrees...") - printProgress := func(done int, all int) { - if all == 0 { - all = 1 // avoid division by 0 - done = 1 - } - fmt.Printf("\033[2K\rPinned %d / %d subtrees (%.2f%%)", done, all, float32(done)/float32(all)*100) - } - permutation := rand.Perm(len(links)) - printProgress(0, len(links)) - for i, j := range permutation { - link := links[j] - if err := h.api.Pin().Add(ctx, path.IpfsPath(link.Cid), options.Pin.Recursive(true)); err != nil { - return "", fmt.Errorf("failed to pin child path: %w", err) - } - printProgress(i+1, len(links)) - } - fmt.Printf("\n") - rootNodeDirectory, err := h.api.Unixfs().Get(ctx, cidPath) - if err != nil { - return "", fmt.Errorf("could not get file with CID: %w", err) - } - log.Info("Writing file...") - outputFilePath := filepath.Join(destinationDir, resolvedPath.Cid().String()) - _ = os.Remove(outputFilePath) - err = files.WriteTo(rootNodeDirectory, outputFilePath) - if err != nil { - return "", fmt.Errorf("could not write out the fetched CID: %w", err) - } - log.Info("Download done.") - return outputFilePath, nil -} - -func (h *IpfsHelper) AddFile(ctx context.Context, filePath string, includeHidden bool) (path.Resolved, error) { - fileInfo, err := os.Stat(filePath) - if err != nil { - return nil, err - } - fileNode, err := files.NewSerialFile(filePath, includeHidden, fileInfo) - if err != nil { - return nil, err - } - return h.api.Unixfs().Add(ctx, fileNode) -} - -func CreateIpfsHelper(ctx context.Context, downloadPath string, clientOnly bool, peerList []string, profiles string) (*IpfsHelper, error) { - return createIpfsHelperImpl(ctx, downloadPath, clientOnly, peerList, profiles) -} - -func (h *IpfsHelper) Close() error { - return h.node.Close() -} - -func setupPlugins() error { - plugins, err := loader.NewPluginLoader("") - if err != nil { - return fmt.Errorf("error loading plugins: %w", err) - } - // Load preloaded and external plugins - if err := plugins.Initialize(); err != nil { - return fmt.Errorf("error initializing plugins: %w", err) - } - if err := plugins.Inject(); err != nil { - return fmt.Errorf("error initializing plugins: %w", err) - } - return nil -} - -var loadPluginsOnce sync.Once - -func createIpfsHelperImpl(ctx context.Context, downloadPath string, clientOnly bool, peerList []string, profiles string) (*IpfsHelper, error) { - var onceErr error - loadPluginsOnce.Do(func() { - onceErr = setupPlugins() - }) - if onceErr != nil { - return nil, onceErr - } - client := IpfsHelper{} - err := client.createRepo(downloadPath, profiles) - if err != nil { - return nil, err - } - err = client.createNode(ctx, clientOnly) - if err != nil { - return nil, err - } - err = client.connectToPeers(ctx, peerList) - if err != nil { - return nil, err - } - return &client, nil -} - -func CanBeIpfsPath(pathString string) bool { - path := path.New(pathString) - return path.IsValid() == nil || - strings.HasPrefix(pathString, "/ipfs/") || - strings.HasPrefix(pathString, "/ipld/") || - strings.HasPrefix(pathString, "/ipns/") || - strings.HasPrefix(pathString, "ipfs://") || - strings.HasPrefix(pathString, "ipns://") -} - -// TODO break abstraction for now til we figure out what fns are needed -func (h *IpfsHelper) GetAPI() coreiface.CoreAPI { - return h.api -} diff --git a/cmd/ipfshelper/ipfshelper_stub.go b/cmd/ipfshelper/ipfshelper_stub.go deleted file mode 100644 index fa6a451927..0000000000 --- a/cmd/ipfshelper/ipfshelper_stub.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !ipfs -// +build !ipfs - -package ipfshelper - -import ( - "context" - "errors" -) - -type IpfsHelper struct{} - -var ErrIpfsNotSupported = errors.New("ipfs not supported") - -var DefaultIpfsProfiles = "default ipfs profiles stub" - -func CanBeIpfsPath(pathString string) bool { - return false -} - -func CreateIpfsHelper(ctx context.Context, downloadPath string, clientOnly bool, peerList []string, profiles string) (*IpfsHelper, error) { - return nil, ErrIpfsNotSupported -} - -func (h *IpfsHelper) DownloadFile(ctx context.Context, cidString string, destinationDir string) (string, error) { - return "", ErrIpfsNotSupported -} - -func (h *IpfsHelper) Close() error { - return ErrIpfsNotSupported -} diff --git a/cmd/ipfshelper/ipfshelper_test.go b/cmd/ipfshelper/ipfshelper_test.go deleted file mode 100644 index 80f10c21f6..0000000000 --- a/cmd/ipfshelper/ipfshelper_test.go +++ /dev/null @@ -1,123 +0,0 @@ -//go:build ipfs -// +build ipfs - -package ipfshelper - -import ( - "bytes" - "context" - "math/rand" - "os" - "path/filepath" - "testing" - "time" - - "github.com/offchainlabs/nitro/util/testhelpers" -) - -func getTempFileWithData(t *testing.T, data []byte) string { - path := filepath.Join(t.TempDir(), "config.json") - err := os.WriteFile(path, []byte(data), 0600) - testhelpers.RequireImpl(t, err) - return path -} - -func fileDataEqual(t *testing.T, path string, expected []byte) bool { - data, err := os.ReadFile(path) - testhelpers.RequireImpl(t, err) - return bytes.Equal(data, expected) -} - -func TestIpfsHelper(t *testing.T) { - ctx := context.Background() - ipfsA, err := createIpfsHelperImpl(ctx, t.TempDir(), false, []string{}, "test") - testhelpers.RequireImpl(t, err) - // add a test file to node A - testData := make([]byte, 1024*1024) - _, err = rand.Read(testData) - testhelpers.RequireImpl(t, err) - testFile := getTempFileWithData(t, testData) - ipfsTestFilePath, err := ipfsA.AddFile(ctx, testFile, false) - testhelpers.RequireImpl(t, err) - testFileCid := ipfsTestFilePath.Cid().String() - addrsA, err := ipfsA.GetPeerHostAddresses() - testhelpers.RequireImpl(t, err) - // create node B connected to node A - ipfsB, err := createIpfsHelperImpl(ctx, t.TempDir(), false, addrsA, "test") - testhelpers.RequireImpl(t, err) - // download the test file with node B - downloadedFile, err := ipfsB.DownloadFile(ctx, testFileCid, t.TempDir()) - testhelpers.RequireImpl(t, err) - if !fileDataEqual(t, downloadedFile, testData) { - testhelpers.FailImpl(t, "Downloaded file does not contain expected data") - } - // clean up node A and test downloading the file from yet another node C - err = ipfsA.Close() - os.RemoveAll(ipfsA.repoPath) - testhelpers.RequireImpl(t, err) - addrsB, err := ipfsB.GetPeerHostAddresses() - testhelpers.RequireImpl(t, err) - ipfsC, err := createIpfsHelperImpl(ctx, t.TempDir(), false, addrsB, "test") - testhelpers.RequireImpl(t, err) - downloadedFile, err = ipfsC.DownloadFile(ctx, testFileCid, t.TempDir()) - testhelpers.RequireImpl(t, err) - if !fileDataEqual(t, downloadedFile, testData) { - testhelpers.FailImpl(t, "Downloaded file does not contain expected data") - } - // make sure closing B and C nodes (A already closed) will make it impossible to download the test file from new node D - ipfsD, err := createIpfsHelperImpl(ctx, t.TempDir(), false, addrsB, "test") - testhelpers.RequireImpl(t, err) - err = ipfsB.Close() - testhelpers.RequireImpl(t, err) - err = ipfsC.Close() - testhelpers.RequireImpl(t, err) - testTimeout := 300 * time.Millisecond - timeoutCtx, cancel := context.WithTimeout(ctx, testTimeout) - defer cancel() - _, err = ipfsD.DownloadFile(timeoutCtx, testFileCid, t.TempDir()) - if err == nil { - testhelpers.FailImpl(t, "Download attempt did not fail as expected") - } - err = ipfsD.Close() - testhelpers.RequireImpl(t, err) -} - -func TestNormalizeCidString(t *testing.T) { - for _, test := range []struct { - input string - expected string - }{ - {"ipfs://QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", "/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"}, - {"ipns://k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8", "/ipns/k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8"}, - {"ipns://docs.ipfs.tech/introduction/", "/ipns/docs.ipfs.tech/introduction/"}, - {"/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", "/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"}, - {"/ipns/k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8", "/ipns/k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8"}, - {"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", "QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"}, - } { - if res := normalizeCidString(test.input); res != test.expected { - testhelpers.FailImpl(t, "Failed to normalize cid string, input: ", test.input, " got: ", res, " expected: ", test.expected) - } - } -} - -func TestCanBeIpfsPath(t *testing.T) { - correctPaths := []string{ - "QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - "/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - "/ipns/k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8", - "/ipns/docs.ipfs.tech/introduction/", - "ipfs://QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - "ipns://k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8", - } - for _, path := range correctPaths { - if !CanBeIpfsPath(path) { - testhelpers.FailImpl(t, "false negative result for path:", path) - } - } - incorrectPaths := []string{"www.ipfs.tech", "https://www.ipfs.tech", "QmIncorrect"} - for _, path := range incorrectPaths { - if CanBeIpfsPath(path) { - testhelpers.FailImpl(t, "false positive result for path:", path) - } - } -} diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index a8463a7d21..9048ffd609 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -40,7 +40,6 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/conf" - "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/cmd/pruning" "github.com/offchainlabs/nitro/cmd/staterecovery" "github.com/offchainlabs/nitro/execution/gethexec" @@ -58,25 +57,6 @@ func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, err if strings.HasPrefix(initConfig.Url, "file:") { return initConfig.Url[5:], nil } - if ipfshelper.CanBeIpfsPath(initConfig.Url) { - ipfsNode, err := ipfshelper.CreateIpfsHelper(ctx, initConfig.DownloadPath, false, []string{}, ipfshelper.DefaultIpfsProfiles) - if err != nil { - return "", err - } - log.Info("Downloading initial database via IPFS", "url", initConfig.Url) - initFile, downloadErr := ipfsNode.DownloadFile(ctx, initConfig.Url, initConfig.DownloadPath) - closeErr := ipfsNode.Close() - if downloadErr != nil { - if closeErr != nil { - log.Error("Failed to close IPFS node after download error", "err", closeErr) - } - return "", fmt.Errorf("Failed to download file from IPFS: %w", downloadErr) - } - if closeErr != nil { - return "", fmt.Errorf("Failed to close IPFS node: %w", err) - } - return initFile, nil - } log.Info("Downloading initial database", "url", initConfig.Url) if !initConfig.ValidateChecksum { file, err := downloadFile(ctx, initConfig, initConfig.Url, nil) @@ -732,8 +712,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, config.Chain.InfoFiles, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) - chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson) + chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, config.Chain.InfoFiles, config.Chain.InfoJson) if err != nil { return chainDb, nil, err } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 146a0049e7..2c192a1d8e 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -285,8 +285,6 @@ func mainImpl() int { } } - combinedL2ChainInfoFile := aggregateL2ChainInfoFiles(ctx, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) - if nodeConfig.Node.Staker.Enable { if !nodeConfig.Node.ParentChainReader.Enable { flag.Usage() @@ -335,7 +333,7 @@ func mainImpl() int { log.Info("connected to l1 chain", "l1url", nodeConfig.ParentChain.Connection.URL, "l1chainid", nodeConfig.ParentChain.ID) - rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) + rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses", "err", err) } @@ -367,7 +365,7 @@ func mainImpl() int { log.Crit("--node.validator.only-create-wallet-contract conflicts with --node.dangerous.no-l1-listener") } // Just create validator smart wallet if needed then exit - deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) + deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses config", "err", err) } @@ -541,7 +539,7 @@ func mainImpl() int { return 0 } - chainInfo, err := chaininfo.ProcessChainInfo(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) + chainInfo, err := chaininfo.ProcessChainInfo(nodeConfig.Chain.ID, nodeConfig.Chain.Name, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoJson) if err != nil { log.Error("error processing l2 chain info", "err", err) return 1 @@ -888,11 +886,9 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa l2ChainId := k.Int64("chain.id") l2ChainName := k.String("chain.name") - l2ChainInfoIpfsUrl := k.String("chain.info-ipfs-url") - l2ChainInfoIpfsDownloadPath := k.String("chain.info-ipfs-download-path") l2ChainInfoFiles := k.Strings("chain.info-files") l2ChainInfoJson := k.String("chain.info-json") - err = applyChainParameters(ctx, k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) + err = applyChainParameters(k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson) if err != nil { return nil, nil, err } @@ -955,20 +951,8 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa return &nodeConfig, &l2DevWallet, nil } -func aggregateL2ChainInfoFiles(ctx context.Context, l2ChainInfoFiles []string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) []string { - if l2ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) - if err != nil { - log.Error("error getting l2 chain info file from ipfs", "err", err) - } - l2ChainInfoFiles = append(l2ChainInfoFiles, l2ChainInfoIpfsFile) - } - return l2ChainInfoFiles -} - -func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error { - combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, l2ChainInfoFiles, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) - chainInfo, err := chaininfo.ProcessChainInfo(chainId, chainName, combinedL2ChainInfoFiles, l2ChainInfoJson) +func applyChainParameters(k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string) error { + chainInfo, err := chaininfo.ProcessChainInfo(chainId, chainName, l2ChainInfoFiles, l2ChainInfoJson) if err != nil { return err } @@ -977,7 +961,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c parentChainIsArbitrum = *chainInfo.ParentChainIsArbitrum } else { log.Warn("Chain info field parent-chain-is-arbitrum is missing, in the future this will be required", "chainId", chainInfo.ChainConfig.ChainID, "parentChainId", chainInfo.ParentChainId) - _, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", combinedL2ChainInfoFiles, "") + _, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", l2ChainInfoFiles, "") if err == nil { parentChainIsArbitrum = true } diff --git a/cmd/util/chaininfoutil.go b/cmd/util/chaininfoutil.go deleted file mode 100644 index 906aa234ed..0000000000 --- a/cmd/util/chaininfoutil.go +++ /dev/null @@ -1,29 +0,0 @@ -package util - -import ( - "context" - "fmt" - - "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/cmd/ipfshelper" -) - -func GetL2ChainInfoIpfsFile(ctx context.Context, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) (string, error) { - ipfsNode, err := ipfshelper.CreateIpfsHelper(ctx, l2ChainInfoIpfsDownloadPath, false, []string{}, ipfshelper.DefaultIpfsProfiles) - if err != nil { - return "", err - } - log.Info("Downloading l2 info file via IPFS", "url", l2ChainInfoIpfsDownloadPath) - l2ChainInfoFile, downloadErr := ipfsNode.DownloadFile(ctx, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) - closeErr := ipfsNode.Close() - if downloadErr != nil { - if closeErr != nil { - log.Error("Failed to close IPFS node after download error", "err", closeErr) - } - return "", fmt.Errorf("failed to download file from IPFS: %w", downloadErr) - } - if closeErr != nil { - return "", fmt.Errorf("failed to close IPFS node: %w", err) - } - return l2ChainInfoFile, nil -} From 8525ad4228f2f1bfda0397c18a1afa92e4efb36c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 13 Sep 2024 15:17:43 +0530 Subject: [PATCH 09/41] address PR comments --- pubsub/consumer.go | 38 ++++++++++++++++++----------- pubsub/pubsub_test.go | 14 +++++------ validator/validation_entry.go | 2 +- validator/valnode/redis/consumer.go | 13 +++++----- 4 files changed, 37 insertions(+), 30 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 20bfccb6a8..0ec3f11eb7 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -51,8 +51,9 @@ type Consumer[Request any, Response any] struct { } type Message[Request any] struct { - ID string - Value Request + ID string + Value Request + AckNotifier chan struct{} } func NewConsumer[Request any, Response any](client redis.UniversalClient, streamName string, cfg *ConsumerConfig) (*Consumer[Request, Response], error) { @@ -103,7 +104,7 @@ func decrementMsgIdByOne(msgId string) string { // Consumer first checks it there exists pending message that is claimed by // unresponsive consumer, if not then reads from the stream. -func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Request], chan struct{}, error) { +func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Request], error) { // First try to XAUTOCLAIM, with start as a random messageID from PEL with MinIdle as IdletimeToAutoclaim // this prioritizes processing PEL messages that have been waiting for more than IdletimeToAutoclaim duration var messages []redis.XMessage @@ -133,7 +134,7 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req } } if len(messages) == 0 { - // Fallback to reading new messages + // If we fail to autoclaim then we do not retry but instead fallback to reading new messages res, err := c.client.XReadGroup(ctx, &redis.XReadGroupArgs{ Group: c.redisGroup, Consumer: c.id, @@ -144,13 +145,13 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req Block: time.Millisecond, // 0 seems to block the read instead of immediately returning }).Result() if errors.Is(err, redis.Nil) { - return nil, nil, nil + return nil, nil } if err != nil { - return nil, nil, fmt.Errorf("reading message for consumer: %q: %w", c.id, err) + return nil, fmt.Errorf("reading message for consumer: %q: %w", c.id, err) } if len(res) != 1 || len(res[0].Messages) != 1 { - return nil, nil, fmt.Errorf("redis returned entries: %+v, for querying single message", res) + return nil, fmt.Errorf("redis returned entries: %+v, for querying single message", res) } messages = res[0].Messages } @@ -160,11 +161,11 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req data, ok = (value).(string) ) if !ok { - return nil, nil, errors.New("error casting request to string") + return nil, errors.New("error casting request to string") } var req Request if err := json.Unmarshal([]byte(data), &req); err != nil { - return nil, nil, fmt.Errorf("unmarshaling value: %v, error: %w", value, err) + return nil, fmt.Errorf("unmarshaling value: %v, error: %w", value, err) } ackNotifier := make(chan struct{}) c.StopWaiter.LaunchThread(func(ctx context.Context) { @@ -179,14 +180,22 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req Messages: []string{messages[0].ID}, }).Result(); err != nil { log.Error("Error claiming message, it might be possible that other consumers might pick this request", "msgID", messages[0].ID) - } else if len(ids) != 1 { + } else if len(ids) == 0 { log.Warn("XClaimJustID returned empty response when indicating hearbeat", "msgID", messages[0].ID) + } else if len(ids) > 1 { + log.Error("XClaimJustID returned response with more than entry", "msgIDs", ids) } select { case <-ackNotifier: return case <-ctx.Done(): - log.Info("Context done while claiming message to indicate hearbeat", "error", ctx.Err().Error()) + log.Info("Context done while claiming message to indicate hearbeat", "messageID", messages[0].ID, "error", ctx.Err().Error()) + if c.StopWaiter.GetParentContext().Err() == nil { + // Proceeding to set the Idle time of message to IdletimeToAutoclaim to allow it to be picked by other consumers + if err := c.client.Do(c.StopWaiter.GetParentContext(), "XCLAIM", c.redisStream, c.redisGroup, c.id, 0, messages[0].ID, "IDLE", c.cfg.IdletimeToAutoclaim.Milliseconds()).Err(); err != nil { + log.Error("error when trying to set the idle time of currently worked on message to IdletimeToAutoclaim", "messageID", messages[0].ID, "err", err) + } + } return case <-time.After(c.cfg.IdletimeToAutoclaim / 10): } @@ -194,9 +203,10 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req }) log.Debug("Redis stream consuming", "consumer_id", c.id, "message_id", messages[0].ID) return &Message[Request]{ - ID: messages[0].ID, - Value: req, - }, ackNotifier, nil + ID: messages[0].ID, + Value: req, + AckNotifier: ackNotifier, + }, nil } func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, messageID string, result Response) error { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index c4e11b8a72..13258aea3d 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -138,16 +138,14 @@ func flatten(responses [][]string) []string { return ret } -func produceMessages(ctx context.Context, msgs []string, producer *Producer[testRequest, testResponse], useUniqueIdentifier, withInvalidEntries bool) ([]*containers.Promise[testResponse], error) { +func produceMessages(ctx context.Context, msgs []string, producer *Producer[testRequest, testResponse], withInvalidEntries bool) ([]*containers.Promise[testResponse], error) { var promises []*containers.Promise[testResponse] for i := 0; i < len(msgs); i++ { req := testRequest{Request: msgs[i]} if withInvalidEntries && i%50 == 0 { req.IsInvalid = true } - if useUniqueIdentifier { - req.SetSelfHash() - } + req.SetSelfHash() promise, err := producer.Produce(ctx, req.SelfHash, req) if err != nil { return nil, err @@ -187,7 +185,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques func(ctx context.Context) { for { - res, ackNotifier, err := c.Consume(ctx) + res, err := c.Consume(ctx) if err != nil { if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { t.Errorf("Consume() unexpected error: %v", err) @@ -206,7 +204,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques } wantResponses[idx] = append(wantResponses[idx], resp) } - close(ackNotifier) + close(res.AckNotifier) } }) } @@ -291,7 +289,7 @@ func TestRedisProduceComplex(t *testing.T) { var promises [][]*containers.Promise[testResponse] for i := 0; i < tc.numProducers; i++ { - prs, err := produceMessages(ctx, entries[i], producers[i], tc.numProducers == 2, tc.withInvalidEntries) + prs, err := produceMessages(ctx, entries[i], producers[i], tc.withInvalidEntries) if err != nil { t.Fatalf("Error producing messages from producer%d: %v", i, err) } @@ -304,7 +302,7 @@ func TestRedisProduceComplex(t *testing.T) { // that other consumers will claim ownership on those messages. for i := 0; i < len(consumers); i += 3 { consumers[i].Start(ctx) - req, _, err := consumers[i].Consume(ctx) + req, err := consumers[i].Consume(ctx) if err != nil { t.Errorf("Error consuming message: %v", err) } diff --git a/validator/validation_entry.go b/validator/validation_entry.go index 59e2b53300..d51a9cbe55 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -34,7 +34,7 @@ type ValidationInput struct { // SetSelfHash should be only called once. In the context of redis streams- by the producer, before submitting a request func (v *ValidationInput) SetSelfHash() { if v.SelfHash != "" { - log.Error("SetSelfHash called more then once") + log.Warn("SetSelfHash called more then once") return // exiting early as hash has already been set } jsonData, err := json.Marshal(v) diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 5255feb522..558cc6ba12 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -57,9 +57,8 @@ func (s *ValidationServer) Start(ctx_in context.Context) { // Channel that all consumers use to indicate their readiness. readyStreams := make(chan struct{}, len(s.consumers)) type workUnit struct { - req *pubsub.Message[*validator.ValidationInput] - moduleRoot common.Hash - ackNotifier chan struct{} + req *pubsub.Message[*validator.ValidationInput] + moduleRoot common.Hash } workers := s.config.Workers if workers == 0 { @@ -109,7 +108,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { return 0 case <-requestTokenQueue: } - req, ackNotifier, err := c.Consume(ctx) + req, err := c.Consume(ctx) if err != nil { log.Error("Consuming request", "error", err) requestTokenQueue <- struct{}{} @@ -122,7 +121,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { } select { case <-ctx.Done(): - case workQueue <- workUnit{req, moduleRoot, ackNotifier}: + case workQueue <- workUnit{req, moduleRoot}: } return 0 }) @@ -155,11 +154,11 @@ func (s *ValidationServer) Start(ctx_in context.Context) { res, err := valRun.Await(ctx) if err != nil { log.Error("Error validating", "request value", work.req.Value, "error", err) - close(work.ackNotifier) + close(work.req.AckNotifier) } else { err := s.consumers[work.moduleRoot].SetResult(ctx, work.req.Value.SelfHash, work.req.ID, res) // Even in error we close ackNotifier as there's no retry mechanism here and closing it will alow other consumers to autoclaim - close(work.ackNotifier) + close(work.req.AckNotifier) if err != nil { log.Error("Error setting result for request", "id", work.req.ID, "result", res, "error", err) } From a8967d17e2ac92aadc4d294c67b1c059200cbf93 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 16 Sep 2024 12:03:53 +0530 Subject: [PATCH 10/41] remove separate id impl --- pubsub/consumer.go | 14 ++--- pubsub/producer.go | 89 +++++------------------------ pubsub/pubsub_test.go | 67 +++++++--------------- validator/client/redis/producer.go | 3 +- validator/validation_entry.go | 20 ------- validator/valnode/redis/consumer.go | 2 +- 6 files changed, 42 insertions(+), 153 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 0ec3f11eb7..bf8aac8b45 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -113,7 +113,7 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req Group: c.redisGroup, Start: "-", End: "+", - Count: math.MaxInt64, + Count: 50, Idle: c.cfg.IdletimeToAutoclaim, }).Result(); err != nil { if !errors.Is(err, redis.Nil) { @@ -130,7 +130,7 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req Count: 1, }).Result() if err != nil { - log.Error("error from xautoclaim", "err", err) + log.Info("error from xautoclaim", "err", err) } } if len(messages) == 0 { @@ -209,18 +209,14 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req }, nil } -func (c *Consumer[Request, Response]) SetResult(ctx context.Context, id string, messageID string, result Response) error { - if id == "" { - log.Info("Request doesn't have a unique identifier (SelfHash field is not set), defaulting to using redis stream messageId", "msgId", messageID) - id = messageID - } +func (c *Consumer[Request, Response]) SetResult(ctx context.Context, messageID string, result Response) error { resp, err := json.Marshal(result) if err != nil { return fmt.Errorf("marshaling result: %w", err) } - acquired, err := c.client.SetNX(ctx, MessageKeyFor(c.StreamName(), id), resp, c.cfg.ResponseEntryTimeout).Result() + acquired, err := c.client.SetNX(ctx, MessageKeyFor(c.StreamName(), messageID), resp, c.cfg.ResponseEntryTimeout).Result() if err != nil || !acquired { - return fmt.Errorf("setting result for message with message-id in stream: %v, unique request identifier: %v, error: %w", messageID, id, err) + return fmt.Errorf("setting result for message with message-id in stream: %v, error: %w", messageID, err) } if _, err := c.client.XAck(ctx, c.redisStream, c.redisGroup, messageID).Result(); err != nil { return fmt.Errorf("acking message: %v, error: %w", messageID, err) diff --git a/pubsub/producer.go b/pubsub/producer.go index 74023ad5b0..9e354a82ee 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -31,11 +31,6 @@ const ( defaultGroup = "default_consumer_group" ) -type MsgIdAndPromise[Response any] struct { - msgID string - promise *containers.Promise[Response] -} - type Producer[Request any, Response any] struct { stopwaiter.StopWaiter id string @@ -45,7 +40,7 @@ type Producer[Request any, Response any] struct { cfg *ProducerConfig promisesLock sync.RWMutex - promises map[string]*MsgIdAndPromise[Response] + promises map[string]*containers.Promise[Response] // Used for checking responses from consumers iteratively // For the first time when Produce is called. @@ -92,7 +87,7 @@ func NewProducer[Request any, Response any](client redis.UniversalClient, stream redisStream: streamName, redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. cfg: cfg, - promises: make(map[string]*MsgIdAndPromise[Response]), + promises: make(map[string]*containers.Promise[Response]), }, nil } @@ -142,18 +137,11 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D if err != nil { log.Error("error getting PEL data from xpending, xtrimming is disabled", "err", err) } - deletePromise := func(id string) { - // Try deleting UNIQUEID_MSGID_MAP_KEY corresponding to this id from redis - if err := p.client.Del(ctx, MessageKeyFor(p.redisStream, id)+UNIQUEID_MSGID_MAP_KEY).Err(); err != nil { - log.Error("Error deleting key from redis that flags that a request is being processed", "err", err) - } - delete(p.promises, id) - } p.promisesLock.Lock() defer p.promisesLock.Unlock() responded := 0 errored := 0 - for id, msgIDAndPromise := range p.promises { + for id, promise := range p.promises { if ctx.Err() != nil { return 0 } @@ -169,25 +157,25 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D if pelData != nil && pelData.Lower != "" { allowedOldestID = pelData.Lower } - if cmpMsgId(msgIDAndPromise.msgID, allowedOldestID) == -1 { - msgIDAndPromise.promise.ProduceError(errors.New("error getting response, request has been waiting for too long")) + if cmpMsgId(id, allowedOldestID) == -1 { + promise.ProduceError(errors.New("error getting response, request has been waiting for too long")) log.Error("error getting response, request has been waiting past its TTL") errored++ - deletePromise(id) + delete(p.promises, id) } } continue } var resp Response if err := json.Unmarshal([]byte(res), &resp); err != nil { - msgIDAndPromise.promise.ProduceError(fmt.Errorf("error unmarshalling: %w", err)) + promise.ProduceError(fmt.Errorf("error unmarshalling: %w", err)) log.Error("Error unmarshaling", "value", res, "error", err) errored++ } else { - msgIDAndPromise.promise.Produce(resp) + promise.Produce(resp) responded++ } - deletePromise(id) + delete(p.promises, id) } // XDEL on consumer side already deletes acked messages (mark as deleted) but doesnt claim the memory back, XTRIM helps in claiming this memory in normal conditions // pelData might be outdated when we do the xtrim, but thats ok as the messages are also being trimmed by other producers @@ -230,39 +218,7 @@ func (p *Producer[Request, Response]) promisesLen() int { return len(p.promises) } -func (p *Producer[Request, Response]) produce(ctx context.Context, id string, value Request) (*containers.Promise[Response], error) { - if id != "" { - msgKey := MessageKeyFor(p.redisStream, id) - - // If the request has already been solved by a consumer - if res, err := p.client.Get(ctx, msgKey).Result(); err == nil { - var resp Response - if err := json.Unmarshal([]byte(res), &resp); err != nil { - log.Error("Error unmarshaling", "value", res, "error", err) - return nil, fmt.Errorf("error unmarshalling: %w", err) - } else { - pr := containers.NewPromise[Response](nil) - pr.Produce(resp) - return &pr, nil - } - } else if !errors.Is(err, redis.Nil) { - log.Error("error while checking for response to a request in redis", "err", err) - } - - // Check for duplicate unsolved request messages in stream - if res, err := p.client.Get(ctx, msgKey+UNIQUEID_MSGID_MAP_KEY).Result(); err == nil { - log.Info("Request already submitted by another producer", "msgId", res, "requestUniqueId", id) - p.promisesLock.Lock() - defer p.promisesLock.Unlock() - pr := containers.NewPromise[Response](nil) - p.promises[id] = &MsgIdAndPromise[Response]{ - msgID: res, - promise: &pr, - } - return &pr, nil - } - } - +func (p *Producer[Request, Response]) produce(ctx context.Context, value Request) (*containers.Promise[Response], error) { val, err := json.Marshal(value) if err != nil { return nil, fmt.Errorf("marshaling value: %w", err) @@ -277,30 +233,15 @@ func (p *Producer[Request, Response]) produce(ctx context.Context, id string, va if err != nil { return nil, fmt.Errorf("adding values to redis: %w", err) } - - if id == "" { - // If unique id doesn't exist, use the newly created msgId as unique id and follow the same steps as before - log.Info("Request doesn't have a unique identifier (SelfHash field set), defaulting to using redis stream messageId", "msgId", msgId) - id = msgId - } - - // Try adding key that flags that request is being processed - if err := p.client.Set(ctx, MessageKeyFor(p.redisStream, id)+UNIQUEID_MSGID_MAP_KEY, msgId, p.cfg.ResponseEntryTimeout).Err(); err != nil { - log.Error("Error adding key to redis that flags that a request is being processed, stream may encounter duplicate requests", "err", err) - } - - pr := containers.NewPromise[Response](nil) - p.promises[id] = &MsgIdAndPromise[Response]{ - msgID: msgId, - promise: &pr, - } - return &pr, nil + promise := containers.NewPromise[Response](nil) + p.promises[msgId] = &promise + return &promise, nil } -func (p *Producer[Request, Response]) Produce(ctx context.Context, id string, value Request) (*containers.Promise[Response], error) { +func (p *Producer[Request, Response]) Produce(ctx context.Context, value Request) (*containers.Promise[Response], error) { log.Debug("Redis stream producing", "value", value) p.once.Do(func() { p.StopWaiter.CallIteratively(p.checkResponses) }) - return p.produce(ctx, id, value) + return p.produce(ctx, value) } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 13258aea3d..391bd7555c 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -2,9 +2,6 @@ package pubsub import ( "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" "errors" "fmt" "os" @@ -28,17 +25,6 @@ var ( type testRequest struct { Request string IsInvalid bool - SelfHash string // Is a unique identifier which can be used to compare any two validationInputs -} - -// SetSelfHash should be only called once. In the context of redis streams- by the producer -func (t *testRequest) SetSelfHash() { - jsonData, err := json.Marshal(t) - if err != nil { - return - } - hash := sha256.Sum256(jsonData) - t.SelfHash = hex.EncodeToString(hash[:]) } type testResponse struct { @@ -117,13 +103,10 @@ func msgForIndex(idx int) string { return fmt.Sprintf("msg: %d", idx) } -func wantMessages(n int, group string, withDuplicates bool) []string { +func wantMessages(n int, group string) []string { var ret []string for i := 0; i < n; i++ { ret = append(ret, group+msgForIndex(i)) - if withDuplicates && i%3 == 0 { - ret = append(ret, msgForIndex(i)) - } } sort.Strings(ret) return ret @@ -145,8 +128,7 @@ func produceMessages(ctx context.Context, msgs []string, producer *Producer[test if withInvalidEntries && i%50 == 0 { req.IsInvalid = true } - req.SetSelfHash() - promise, err := producer.Produce(ctx, req.SelfHash, req) + promise, err := producer.Produce(ctx, req) if err != nil { return nil, err } @@ -199,7 +181,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques gotMessages[idx][res.ID] = res.Value.Request if !res.Value.IsInvalid { resp := fmt.Sprintf("result for: %v", res.ID) - if err := c.SetResult(ctx, res.Value.SelfHash, res.ID, testResponse{Response: resp}); err != nil { + if err := c.SetResult(ctx, res.ID, testResponse{Response: resp}); err != nil { t.Errorf("Error setting a result: %v", err) } wantResponses[idx] = append(wantResponses[idx], resp) @@ -218,7 +200,6 @@ func TestRedisProduceComplex(t *testing.T) { name string entriesCount []int numProducers int - withDuplicates bool // If this is set, then every fourth entry (while generation) of each entries list is equal killConsumers bool withInvalidEntries bool // If this is set, then every 50th entry is invalid (requests that can't be solved by any consumer) }{ @@ -228,34 +209,27 @@ func TestRedisProduceComplex(t *testing.T) { numProducers: 1, }, { - name: "one producer, some consumers killed, others should take over their work", - entriesCount: []int{messagesCount}, - numProducers: 1, - killConsumers: true, - }, - { - name: "two producers, all consumers are active, all unique entries", + name: "two producers, all consumers are active", entriesCount: []int{20, 20}, numProducers: 2, }, { - name: "two producers, all consumers are active, some duplicate entries", - entriesCount: []int{20, 20}, - numProducers: 2, - withDuplicates: true, + name: "one producer, some consumers killed, others should take over their work", + entriesCount: []int{messagesCount}, + numProducers: 1, + killConsumers: true, }, + { - name: "two producers, some consumers killed, others should take over their work, some duplicate entries, unequal number of requests from producers", - entriesCount: []int{messagesCount, 2 * messagesCount}, - numProducers: 2, - withDuplicates: true, - killConsumers: true, + name: "two producers, some consumers killed, others should take over their work, unequal number of requests from producers", + entriesCount: []int{messagesCount, 2 * messagesCount}, + numProducers: 2, + killConsumers: true, }, { - name: "two producers, some consumers killed, others should take over their work, some duplicate entries, some invalid entries, unequal number of requests from producers", + name: "two producers, some consumers killed, others should take over their work, some invalid entries, unequal number of requests from producers", entriesCount: []int{messagesCount, 2 * messagesCount}, numProducers: 2, - withDuplicates: true, killConsumers: true, withInvalidEntries: true, }, @@ -281,10 +255,10 @@ func TestRedisProduceComplex(t *testing.T) { var entries [][]string if tc.numProducers == 2 { - entries = append(entries, wantMessages(tc.entriesCount[0], "1.", tc.withDuplicates)) - entries = append(entries, wantMessages(tc.entriesCount[1], "2.", tc.withDuplicates)) + entries = append(entries, wantMessages(tc.entriesCount[0], "1.")) + entries = append(entries, wantMessages(tc.entriesCount[1], "2.")) } else { - entries = append(entries, wantMessages(tc.entriesCount[0], "", tc.withDuplicates)) + entries = append(entries, wantMessages(tc.entriesCount[0], "")) } var promises [][]*containers.Promise[testResponse] @@ -322,7 +296,7 @@ func TestRedisProduceComplex(t *testing.T) { for i := 0; i < tc.numProducers; i++ { grs, errIndexes := awaitResponses(ctx, promises[i]) if tc.withInvalidEntries { - if errIndexes[len(errIndexes)-1]+50 <= len(entries[i]) { + if errIndexes[len(errIndexes)-1]+50 < len(entries[i]) { t.Fatalf("Unexpected number of invalid requests while awaiting responses") } for j, idx := range errIndexes { @@ -353,13 +327,12 @@ func TestRedisProduceComplex(t *testing.T) { for i := 0; i < tc.numProducers; i++ { combinedEntries = append(combinedEntries, entries[i]...) } - wantMsgs := removeDuplicates(combinedEntries) + wantMsgs := combinedEntries if diff := cmp.Diff(wantMsgs, got); diff != "" { t.Errorf("Unexpected diff (-want +got):\n%s\n", diff) } - // Consumers are not supposed to get duplicate requests - gotResponses = removeDuplicates(gotResponses) + sort.Strings(gotResponses) wantResp := flatten(wantResponses) if diff := cmp.Diff(wantResp, gotResponses); diff != "" { t.Errorf("Unexpected diff in responses:\n%s\n", diff) diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index ffa6146f81..c5726ffe8b 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -136,8 +136,7 @@ func (c *ValidationClient) Launch(entry *validator.ValidationInput, moduleRoot c errPromise := containers.NewReadyPromise(validator.GoGlobalState{}, fmt.Errorf("no validation is configured for wasm root %v", moduleRoot)) return server_common.NewValRun(errPromise, moduleRoot) } - entry.SetSelfHash() - promise, err := producer.Produce(c.GetContext(), entry.SelfHash, entry) + promise, err := producer.Produce(c.GetContext(), entry) if err != nil { errPromise := containers.NewReadyPromise(validator.GoGlobalState{}, fmt.Errorf("error producing input: %w", err)) return server_common.NewValRun(errPromise, moduleRoot) diff --git a/validator/validation_entry.go b/validator/validation_entry.go index d51a9cbe55..d340993fa2 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -1,13 +1,8 @@ package validator import ( - "encoding/json" - "fmt" - - "github.com/cespare/xxhash/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" ) @@ -27,19 +22,4 @@ type ValidationInput struct { DelayedMsg []byte StartState GoGlobalState DebugChain bool - - SelfHash string // Is a unique identifier which can be used to compare any two instances of validationInput -} - -// SetSelfHash should be only called once. In the context of redis streams- by the producer, before submitting a request -func (v *ValidationInput) SetSelfHash() { - if v.SelfHash != "" { - log.Warn("SetSelfHash called more then once") - return // exiting early as hash has already been set - } - jsonData, err := json.Marshal(v) - if err != nil { - return - } - v.SelfHash = fmt.Sprintf("%d", xxhash.Sum64(jsonData)) } diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 558cc6ba12..4d19905ab7 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -156,7 +156,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { log.Error("Error validating", "request value", work.req.Value, "error", err) close(work.req.AckNotifier) } else { - err := s.consumers[work.moduleRoot].SetResult(ctx, work.req.Value.SelfHash, work.req.ID, res) + err := s.consumers[work.moduleRoot].SetResult(ctx, work.req.ID, res) // Even in error we close ackNotifier as there's no retry mechanism here and closing it will alow other consumers to autoclaim close(work.req.AckNotifier) if err != nil { From 910666f7bd2db9575751ead1f6a9136da34e47b5 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 26 Sep 2024 14:37:51 +0200 Subject: [PATCH 11/41] add MessageRunMode to ProduceBlock parameters --- arbos/block_processor.go | 5 ++++- cmd/replay/main.go | 3 ++- execution/gethexec/block_recorder.go | 2 ++ execution/gethexec/executionengine.go | 6 ++++++ system_tests/state_fuzz_test.go | 7 ++++--- 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index b180405c43..19fc36b351 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -144,6 +144,7 @@ func ProduceBlock( chainContext core.ChainContext, chainConfig *params.ChainConfig, isMsgForPrefetch bool, + runMode core.MessageRunMode, ) (*types.Block, types.Receipts, error) { txes, err := ParseL2Transactions(message, chainConfig.ChainID) if err != nil { @@ -153,7 +154,7 @@ func ProduceBlock( hooks := NoopSequencingHooks() return ProduceBlockAdvanced( - message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, isMsgForPrefetch, + message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, isMsgForPrefetch, runMode, ) } @@ -168,6 +169,7 @@ func ProduceBlockAdvanced( chainConfig *params.ChainConfig, sequencingHooks *SequencingHooks, isMsgForPrefetch bool, + runMode core.MessageRunMode, ) (*types.Block, types.Receipts, error) { state, err := arbosState.OpenSystemArbosState(statedb, nil, true) @@ -318,6 +320,7 @@ func ProduceBlockAdvanced( tx, &header.GasUsed, vm.Config{}, + runMode, func(result *core.ExecutionResult) error { return hooks.PostTxFilter(header, state, tx, sender, dataGas, result) }, diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 0fe56eb4c9..d10d57a9c7 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -291,7 +292,7 @@ func main() { message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) chainContext := WavmChainContext{} - newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false) + newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, core.MessageReplayMode) // TODO verify runMode if err != nil { panic(err) } diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index a31b6b3736..a3af7876a8 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -154,6 +155,7 @@ func (r *BlockRecorder) RecordBlockCreation( chaincontext, chainConfig, false, + core.MessageReplayMode, ) if err != nil { return nil, err diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 8d6484e3c9..d21522d2d4 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -505,6 +505,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. s.bc.Config(), hooks, false, + core.MessageCommitMode, ) if err != nil { return nil, err @@ -661,6 +662,10 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb.StartPrefetcher("TransactionStreamer") defer statedb.StopPrefetcher() + runMode := core.MessageCommitMode + if isMsgForPrefetch { + runMode = core.MessageReplayMode + } block, receipts, err := arbos.ProduceBlock( msg.Message, msg.DelayedMessagesRead, @@ -669,6 +674,7 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith s.bc, s.bc.Config(), isMsgForPrefetch, + runMode, ) return block, statedb, receipts, err diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 24140e480d..8fdfa3a098 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -38,6 +38,7 @@ func BuildBlock( chainConfig *params.ChainConfig, inbox arbstate.InboxBackend, seqBatch []byte, + runMode core.MessageRunMode, // TODO do we need to fuzz runMode? ) (*types.Block, error) { var delayedMessagesRead uint64 if lastBlockHeader != nil { @@ -63,7 +64,7 @@ func BuildBlock( } block, _, err := arbos.ProduceBlock( - l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, + l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, runMode, ) return block, err } @@ -127,7 +128,7 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { } func FuzzStateTransition(f *testing.F) { - f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte) { + f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte, runMode uint8) { if len(seqMsg) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { return } @@ -201,7 +202,7 @@ func FuzzStateTransition(f *testing.F) { positionWithinMessage: 0, delayedMessages: delayedMessages, } - _, err = BuildBlock(statedb, genesis, noopChainContext{}, params.ArbitrumOneChainConfig(), inbox, seqBatch) + _, err = BuildBlock(statedb, genesis, noopChainContext{}, params.ArbitrumOneChainConfig(), inbox, seqBatch, core.MessageRunMode(runMode)) if err != nil { // With the fixed header it shouldn't be possible to read a delayed message, // and no other type of error should be possible. From 05133258c92380005e1492304fbf08e137ef1c5d Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 26 Sep 2024 17:09:49 +0200 Subject: [PATCH 12/41] update geth pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 17cd001675..0c3f6eba21 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 17cd00167543a5a2b0b083e32820051100154c2f +Subproject commit 0c3f6eba21cbe0196b298dfbd3fa7d51dffd627e From ed680537f60ce8af3b6caa660b41698e77f2e007 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 26 Sep 2024 18:16:51 +0200 Subject: [PATCH 13/41] remove outdated todo comments --- cmd/replay/main.go | 2 +- system_tests/state_fuzz_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index d10d57a9c7..661040ea10 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -292,7 +292,7 @@ func main() { message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) chainContext := WavmChainContext{} - newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, core.MessageReplayMode) // TODO verify runMode + newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, core.MessageReplayMode) if err != nil { panic(err) } diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 8fdfa3a098..c0477060ed 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -38,7 +38,7 @@ func BuildBlock( chainConfig *params.ChainConfig, inbox arbstate.InboxBackend, seqBatch []byte, - runMode core.MessageRunMode, // TODO do we need to fuzz runMode? + runMode core.MessageRunMode, ) (*types.Block, error) { var delayedMessagesRead uint64 if lastBlockHeader != nil { From 39e94812606ba64d8d719be9729c955a0907d4d7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 27 Sep 2024 00:51:36 +0200 Subject: [PATCH 14/41] fuzz state transition: skip malformed batch posting report --- system_tests/state_fuzz_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index c0477060ed..2287870cb5 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -60,7 +60,8 @@ func BuildBlock( } err = l1Message.FillInBatchGasCost(batchFetcher) if err != nil { - return nil, err + // skip malformed batch posting report + return nil, nil } block, _, err := arbos.ProduceBlock( From e4d4b971d3ae60b124b220f3e8a6f745faed29cc Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 27 Sep 2024 00:55:45 +0200 Subject: [PATCH 15/41] fuzz state transition: test only existing message run modes --- system_tests/state_fuzz_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 2287870cb5..d722aa4212 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -129,7 +129,7 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { } func FuzzStateTransition(f *testing.F) { - f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte, runMode uint8) { + f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte, runModeSeed uint8) { if len(seqMsg) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { return } @@ -203,7 +203,9 @@ func FuzzStateTransition(f *testing.F) { positionWithinMessage: 0, delayedMessages: delayedMessages, } - _, err = BuildBlock(statedb, genesis, noopChainContext{}, params.ArbitrumOneChainConfig(), inbox, seqBatch, core.MessageRunMode(runMode)) + numberOfMessageRunModes := uint8(core.MessageReplayMode) + 1 // TODO update number of run modes when new mode is added + runMode := core.MessageRunMode(runModeSeed % numberOfMessageRunModes) + _, err = BuildBlock(statedb, genesis, noopChainContext{}, params.ArbitrumOneChainConfig(), inbox, seqBatch, runMode) if err != nil { // With the fixed header it shouldn't be possible to read a delayed message, // and no other type of error should be possible. From 760081d9d0a34cc9acb6a509a953f689a47f36b3 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 27 Sep 2024 01:06:23 +0200 Subject: [PATCH 16/41] make lint happy --- system_tests/state_fuzz_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index d722aa4212..c8312350e6 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -61,6 +61,7 @@ func BuildBlock( err = l1Message.FillInBatchGasCost(batchFetcher) if err != nil { // skip malformed batch posting report + // nolint:nilerr return nil, nil } From 9a96fbf4fc58c598236cb486f22eb4967508c66f Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 27 Sep 2024 16:56:02 +0530 Subject: [PATCH 17/41] address PR comments --- pubsub/common.go | 4 +- pubsub/consumer.go | 20 +++++----- pubsub/producer.go | 62 ++++++++++++++--------------- pubsub/pubsub_test.go | 7 ++-- validator/valnode/redis/consumer.go | 4 +- 5 files changed, 45 insertions(+), 52 deletions(-) diff --git a/pubsub/common.go b/pubsub/common.go index 4b5778b9ba..ad36b6e622 100644 --- a/pubsub/common.go +++ b/pubsub/common.go @@ -9,9 +9,7 @@ import ( "github.com/redis/go-redis/v9" ) -const UNIQUEID_MSGID_MAP_KEY string = ".msgId" // Is used to map unique identifier to msgId of the message consisting request in the stream - -func MessageKeyFor(streamName, id string) string { return fmt.Sprintf("%s.%s", streamName, id) } +func ResultKeyFor(streamName, id string) string { return fmt.Sprintf("%s.%s", streamName, id) } // CreateStream tries to create stream with given name, if it already exists // does not return an error. diff --git a/pubsub/consumer.go b/pubsub/consumer.go index fa1f5894b4..3265744218 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -51,9 +51,9 @@ type Consumer[Request any, Response any] struct { } type Message[Request any] struct { - ID string - Value Request - AckNotifier chan struct{} + ID string + Value Request + Ack func() } func NewConsumer[Request any, Response any](client redis.UniversalClient, streamName string, cfg *ConsumerConfig) (*Consumer[Request, Response], error) { @@ -101,7 +101,7 @@ func decrementMsgIdByOne(msgId string) string { } else if id[0] > 0 { return strconv.FormatUint(id[0]-1, 10) + "-" + strconv.FormatUint(math.MaxUint64, 10) } else { - log.Error("Error decrementing start of XAutoClaim by one, defaulting to 0", "err", err) + log.Error("Error decrementing start of XAutoClaim by one, defaulting to 0") return "0" } } @@ -207,9 +207,9 @@ func (c *Consumer[Request, Response]) Consume(ctx context.Context) (*Message[Req }) log.Debug("Redis stream consuming", "consumer_id", c.id, "message_id", messages[0].ID) return &Message[Request]{ - ID: messages[0].ID, - Value: req, - AckNotifier: ackNotifier, + ID: messages[0].ID, + Value: req, + Ack: func() { close(ackNotifier) }, }, nil } @@ -218,9 +218,9 @@ func (c *Consumer[Request, Response]) SetResult(ctx context.Context, messageID s if err != nil { return fmt.Errorf("marshaling result: %w", err) } - msgKey := MessageKeyFor(c.StreamName(), messageID) - log.Debug("consumer: setting result", "cid", c.id, "msgIdInStream", messageID, "msgKeyInRedis", msgKey) - acquired, err := c.client.SetNX(ctx, msgKey, resp, c.cfg.ResponseEntryTimeout).Result() + resultKey := ResultKeyFor(c.StreamName(), messageID) + log.Debug("consumer: setting result", "cid", c.id, "msgIdInStream", messageID, "resultKeyInRedis", resultKey) + acquired, err := c.client.SetNX(ctx, resultKey, resp, c.cfg.ResponseEntryTimeout).Result() if err != nil || !acquired { return fmt.Errorf("setting result for message with message-id in stream: %v, error: %w", messageID, err) } diff --git a/pubsub/producer.go b/pubsub/producer.go index 932b455d6a..5c87f4f722 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -50,27 +50,22 @@ type Producer[Request any, Response any] struct { type ProducerConfig struct { // Interval duration for checking the result set by consumers. CheckResultInterval time.Duration `koanf:"check-result-interval"` - // Timeout of entry's written to redis by producer - ResponseEntryTimeout time.Duration `koanf:"response-entry-timeout"` // RequestTimeout is a TTL for any message sent to the redis stream RequestTimeout time.Duration `koanf:"request-timeout"` } var DefaultProducerConfig = ProducerConfig{ - CheckResultInterval: 5 * time.Second, - ResponseEntryTimeout: time.Hour, - RequestTimeout: time.Hour, // should we increase this? + CheckResultInterval: 5 * time.Second, + RequestTimeout: 3 * time.Hour, } var TestProducerConfig = ProducerConfig{ - CheckResultInterval: 5 * time.Millisecond, - ResponseEntryTimeout: time.Minute, - RequestTimeout: time.Minute, + CheckResultInterval: 5 * time.Millisecond, + RequestTimeout: time.Minute, } func ProducerAddConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".check-result-interval", DefaultProducerConfig.CheckResultInterval, "interval in which producer checks pending messages whether consumer processing them is inactive") - f.Duration(prefix+".response-entry-timeout", DefaultProducerConfig.ResponseEntryTimeout, "timeout after which responses written from producer to the redis are cleared. Currently used for the key mapping unique request id to redis stream message id") f.Duration(prefix+".request-timeout", DefaultProducerConfig.RequestTimeout, "timeout after which the message in redis stream is considered as errored, this prevents workers from working on wrong requests indefinitely") } @@ -133,39 +128,30 @@ func cmpMsgId(msgId1, msgId2 string) int { // checkResponses checks iteratively whether response for the promise is ready. func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.Duration { - pelData, err := p.client.XPending(ctx, p.redisStream, p.redisGroup).Result() - if err != nil { - log.Error("error getting PEL data from xpending, xtrimming is disabled", "err", err) - } log.Debug("redis producer: check responses starting") p.promisesLock.Lock() defer p.promisesLock.Unlock() responded := 0 errored := 0 checked := 0 + allowedOldestID := fmt.Sprintf("%d-0", time.Now().Add(-p.cfg.RequestTimeout).UnixMilli()) for id, promise := range p.promises { if ctx.Err() != nil { return 0 } checked++ - msgKey := MessageKeyFor(p.redisStream, id) - res, err := p.client.Get(ctx, msgKey).Result() + resultKey := ResultKeyFor(p.redisStream, id) + res, err := p.client.Get(ctx, resultKey).Result() if err != nil { if !errors.Is(err, redis.Nil) { - log.Error("Error reading value in redis", "key", msgKey, "error", err) - } else { + log.Error("Error reading value in redis", "key", resultKey, "error", err) + } else if cmpMsgId(id, allowedOldestID) == -1 { // The request this producer is waiting for has been past its TTL or is older than current PEL's lower, // so safe to error and stop tracking this promise - allowedOldestID := fmt.Sprintf("%d-0", time.Now().Add(-p.cfg.RequestTimeout).UnixMilli()) - if pelData != nil && pelData.Lower != "" { - allowedOldestID = pelData.Lower - } - if cmpMsgId(id, allowedOldestID) == -1 { - promise.ProduceError(errors.New("error getting response, request has been waiting for too long")) - log.Error("error getting response, request has been waiting past its TTL") - errored++ - delete(p.promises, id) - } + promise.ProduceError(errors.New("error getting response, request has been waiting for too long")) + log.Error("error getting response, request has been waiting past its TTL") + errored++ + delete(p.promises, id) } continue } @@ -178,16 +164,25 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D promise.Produce(resp) responded++ } - p.client.Del(ctx, msgKey) + p.client.Del(ctx, resultKey) delete(p.promises, id) } + log.Debug("checkResponses", "responded", responded, "errored", errored, "checked", checked) + return p.cfg.CheckResultInterval +} + +func (p *Producer[Request, Response]) clearMessages(ctx context.Context) time.Duration { + pelData, err := p.client.XPending(ctx, p.redisStream, p.redisGroup).Result() + if err != nil { + log.Error("error getting PEL data from xpending, xtrimming is disabled", "err", err) + } // XDEL on consumer side already deletes acked messages (mark as deleted) but doesnt claim the memory back, XTRIM helps in claiming this memory in normal conditions // pelData might be outdated when we do the xtrim, but thats ok as the messages are also being trimmed by other producers if pelData != nil && pelData.Lower != "" { trimmed, trimErr := p.client.XTrimMinID(ctx, p.redisStream, pelData.Lower).Result() - log.Debug("trimming", "xTrimMinID", pelData.Lower, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr, "checked", checked) + log.Debug("trimming", "xTrimMinID", pelData.Lower, "trimmed", trimmed, "trim-err", trimErr) // Check if pelData.Lower has been past its TTL and if it is then ack it to remove from PEL and delete it, once - // its taken out from PEL the producer that sent this request will handle the corresponding promise accordingly (if PEL is non-empty) + // its taken out from PEL the producer that sent this request will handle the corresponding promise accordingly (as its past TTL) allowedOldestID := fmt.Sprintf("%d-0", time.Now().Add(-p.cfg.RequestTimeout).UnixMilli()) if cmpMsgId(pelData.Lower, allowedOldestID) == -1 { if err := p.client.XClaim(ctx, &redis.XClaimArgs{ @@ -198,18 +193,18 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D Messages: []string{pelData.Lower}, }).Err(); err != nil { log.Error("error claiming PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) - return p.cfg.CheckResultInterval + return 5 * p.cfg.CheckResultInterval } if _, err := p.client.XAck(ctx, p.redisStream, p.redisGroup, pelData.Lower).Result(); err != nil { log.Error("error acking PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) - return p.cfg.CheckResultInterval + return 5 * p.cfg.CheckResultInterval } if _, err := p.client.XDel(ctx, p.redisStream, pelData.Lower).Result(); err != nil { log.Error("error deleting PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) } } } - return p.cfg.CheckResultInterval + return 5 * p.cfg.CheckResultInterval } func (p *Producer[Request, Response]) Start(ctx context.Context) { @@ -246,6 +241,7 @@ func (p *Producer[Request, Response]) Produce(ctx context.Context, value Request log.Debug("Redis stream producing", "value", value) p.once.Do(func() { p.StopWaiter.CallIteratively(p.checkResponses) + p.StopWaiter.CallIteratively(p.clearMessages) }) return p.produce(ctx, value) } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 391bd7555c..8bd1aed25d 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -48,9 +48,8 @@ func destroyRedisGroup(ctx context.Context, t *testing.T, streamName string, cli func producerCfg() *ProducerConfig { return &ProducerConfig{ - CheckResultInterval: TestProducerConfig.CheckResultInterval, - ResponseEntryTimeout: TestProducerConfig.ResponseEntryTimeout, - RequestTimeout: 2 * time.Second, + CheckResultInterval: TestProducerConfig.CheckResultInterval, + RequestTimeout: 2 * time.Second, } } @@ -186,7 +185,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques } wantResponses[idx] = append(wantResponses[idx], resp) } - close(res.AckNotifier) + res.Ack() } }) } diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index c87191e444..4392a3c91e 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -161,12 +161,12 @@ func (s *ValidationServer) Start(ctx_in context.Context) { res, err := valRun.Await(ctx) if err != nil { log.Error("Error validating", "request value", work.req.Value, "error", err) - close(work.req.AckNotifier) + work.req.Ack() } else { log.Debug("done work", "thread", i, "workid", work.req.ID) err := s.consumers[work.moduleRoot].SetResult(ctx, work.req.ID, res) // Even in error we close ackNotifier as there's no retry mechanism here and closing it will alow other consumers to autoclaim - close(work.req.AckNotifier) + work.req.Ack() if err != nil { log.Error("Error setting result for request", "id", work.req.ID, "result", res, "error", err) } From 43a54e7cef089510869deb3afa23c79f4c9db2c5 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 27 Sep 2024 16:58:47 +0530 Subject: [PATCH 18/41] remove unnecessary error log in decrementMsgIdByOne --- pubsub/consumer.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 3265744218..391042bd7e 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -100,10 +100,8 @@ func decrementMsgIdByOne(msgId string) string { return strconv.FormatUint(id[0], 10) + "-" + strconv.FormatUint(id[1]-1, 10) } else if id[0] > 0 { return strconv.FormatUint(id[0]-1, 10) + "-" + strconv.FormatUint(math.MaxUint64, 10) - } else { - log.Error("Error decrementing start of XAutoClaim by one, defaulting to 0") - return "0" } + return "0" } // Consumer first checks it there exists pending message that is claimed by From 14d57e1da2482b8fb145eb94d6e806876cf63a58 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Mon, 30 Sep 2024 11:08:38 -0300 Subject: [PATCH 19/41] DisableStylusCacheMetricsCollection flag --- execution/gethexec/blockchain.go | 30 ++++++++++++----------- execution/gethexec/executionengine.go | 34 +++++++++++++++++++-------- execution/gethexec/node.go | 3 +++ system_tests/common_test.go | 27 +++++++++++---------- 4 files changed, 57 insertions(+), 37 deletions(-) diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index 9b0c1a6f2f..fda8f49093 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -26,20 +26,21 @@ import ( ) type CachingConfig struct { - Archive bool `koanf:"archive"` - BlockCount uint64 `koanf:"block-count"` - BlockAge time.Duration `koanf:"block-age"` - TrieTimeLimit time.Duration `koanf:"trie-time-limit"` - TrieDirtyCache int `koanf:"trie-dirty-cache"` - TrieCleanCache int `koanf:"trie-clean-cache"` - SnapshotCache int `koanf:"snapshot-cache"` - DatabaseCache int `koanf:"database-cache"` - SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` - MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` - MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` - StylusLRUCacheCapacity uint32 `koanf:"stylus-lru-cache-capacity"` - StateScheme string `koanf:"state-scheme"` - StateHistory uint64 `koanf:"state-history"` + Archive bool `koanf:"archive"` + BlockCount uint64 `koanf:"block-count"` + BlockAge time.Duration `koanf:"block-age"` + TrieTimeLimit time.Duration `koanf:"trie-time-limit"` + TrieDirtyCache int `koanf:"trie-dirty-cache"` + TrieCleanCache int `koanf:"trie-clean-cache"` + SnapshotCache int `koanf:"snapshot-cache"` + DatabaseCache int `koanf:"database-cache"` + SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` + MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` + MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` + StylusLRUCacheCapacity uint32 `koanf:"stylus-lru-cache-capacity"` + DisableStylusCacheMetricsCollection bool `koanf:"disable-stylus-cache-metrics-collection"` + StateScheme string `koanf:"state-scheme"` + StateHistory uint64 `koanf:"state-history"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -55,6 +56,7 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues") f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues") f.Uint32(prefix+".stylus-lru-cache-capacity", DefaultCachingConfig.StylusLRUCacheCapacity, "capacity, in megabytes, of the LRU cache that keeps initialized stylus programs") + f.Bool(prefix+".disable-stylus-cache-metrics-collection", DefaultCachingConfig.DisableStylusCacheMetricsCollection, "disable metrics collection for the stylus cache") f.String(prefix+".state-scheme", DefaultCachingConfig.StateScheme, "scheme to use for state trie storage (hash, path)") f.Uint64(prefix+".state-history", DefaultCachingConfig.StateHistory, "number of recent blocks to retain state history for (path state-scheme only)") } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index a0f3a2f59a..b36340757a 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -87,6 +87,8 @@ type ExecutionEngine struct { reorgSequencing bool + disableStylusCacheMetricsCollection bool + prefetchBlock bool cachedL1PriceData *L1PriceData @@ -212,6 +214,16 @@ func (s *ExecutionEngine) EnableReorgSequencing() { s.reorgSequencing = true } +func (s *ExecutionEngine) DisableStylusCacheMetricsCollection() { + if s.Started() { + panic("trying to disable stylus cache metrics collection after start") + } + if s.disableStylusCacheMetricsCollection { + panic("trying to disable stylus cache metrics collection when already set") + } + s.disableStylusCacheMetricsCollection = true +} + func (s *ExecutionEngine) EnablePrefetchBlock() { if s.Started() { panic("trying to enable prefetch block after start") @@ -963,15 +975,17 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) { } } }) - // periodically update stylus lru cache metrics - s.LaunchThread(func(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case <-time.After(time.Minute): - programs.GetWasmLruCacheMetrics() + if !s.disableStylusCacheMetricsCollection { + // periodically update stylus lru cache metrics + s.LaunchThread(func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Minute): + programs.GetWasmLruCacheMetrics() + } } - } - }) + }) + } } diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 5a1efc6d08..1b8b756502 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -187,6 +187,9 @@ func CreateExecutionNode( if config.EnablePrefetchBlock { execEngine.EnablePrefetchBlock() } + if config.Caching.DisableStylusCacheMetricsCollection { + execEngine.DisableStylusCacheMetricsCollection() + } if err != nil { return nil, err } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 5902a670ba..6a4e551906 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -155,19 +155,20 @@ func (tc *TestClient) EnsureTxSucceededWithTimeout(transaction *types.Transactio } var TestCachingConfig = gethexec.CachingConfig{ - Archive: false, - BlockCount: 128, - BlockAge: 30 * time.Minute, - TrieTimeLimit: time.Hour, - TrieDirtyCache: 1024, - TrieCleanCache: 600, - SnapshotCache: 400, - DatabaseCache: 2048, - SnapshotRestoreGasLimit: 300_000_000_000, - MaxNumberOfBlocksToSkipStateSaving: 0, - MaxAmountOfGasToSkipStateSaving: 0, - StylusLRUCacheCapacity: 0, - StateScheme: env.GetTestStateScheme(), + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, + MaxNumberOfBlocksToSkipStateSaving: 0, + MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCacheCapacity: 0, + DisableStylusCacheMetricsCollection: true, + StateScheme: env.GetTestStateScheme(), } var DefaultTestForwarderConfig = gethexec.ForwarderConfig{ From 5e8c4a2535733f5cea6b50b57594b684e01464f6 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Mon, 30 Sep 2024 13:44:24 -0300 Subject: [PATCH 20/41] Stylus long term cache metrics --- arbitrator/stylus/src/cache.rs | 60 +++++++++++++++++++++++---- arbitrator/stylus/src/lib.rs | 16 +++---- arbos/programs/native.go | 59 ++++++++++++++++++++------ execution/gethexec/executionengine.go | 4 +- system_tests/program_test.go | 26 ++++++------ 5 files changed, 121 insertions(+), 44 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index c1fdaaccee..827e2beaa3 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -29,8 +29,16 @@ pub struct LruCounters { pub does_not_fit: u32, } +pub struct LongTermCounters { + pub hits: u32, + pub misses: u32, +} + pub struct InitCache { long_term: HashMap, + long_term_size_bytes: usize, + long_term_counters: LongTermCounters, + lru: CLruCache, lru_counters: LruCounters, } @@ -91,6 +99,20 @@ pub struct LruCacheMetrics { pub does_not_fit: u32, } +#[repr(C)] +pub struct LongTermCacheMetrics { + pub size_bytes: u64, + pub count: u32, + pub hits: u32, + pub misses: u32, +} + +#[repr(C)] +pub struct CacheMetrics { + pub lru: LruCacheMetrics, + pub long_term: LongTermCacheMetrics, +} + pub fn deserialize_module( module: &[u8], version: u16, @@ -117,6 +139,9 @@ impl InitCache { fn new(size_bytes: usize) -> Self { Self { long_term: HashMap::new(), + long_term_size_bytes: 0, + long_term_counters: LongTermCounters { hits: 0, misses: 0 }, + lru: CLruCache::with_config( CLruCacheConfig::new(NonZeroUsize::new(size_bytes).unwrap()) .with_scale(CustomWeightScale), @@ -142,8 +167,11 @@ impl InitCache { // See if the item is in the long term cache if let Some(item) = cache.long_term.get(&key) { - return Some(item.data()); + let data = item.data(); + cache.long_term_counters.hits += 1; + return Some(data); } + cache.long_term_counters.misses += 1; // See if the item is in the LRU cache, promoting if so if let Some(item) = cache.lru.get(&key) { @@ -174,6 +202,7 @@ impl InitCache { if let Some(item) = cache.lru.peek(&key).cloned() { if long_term_tag == Self::ARBOS_TAG { cache.long_term.insert(key, item.clone()); + cache.long_term_size_bytes += item.entry_size_estimate_bytes; } else { // only calls get to move the key to the head of the LRU list cache.lru.get(&key); @@ -195,6 +224,7 @@ impl InitCache { }; } else { cache.long_term.insert(key, item); + cache.long_term_size_bytes += entry_size_estimate_bytes; } Ok(data) } @@ -207,6 +237,7 @@ impl InitCache { let key = CacheKey::new(module_hash, version, debug); let mut cache = cache!(); if let Some(item) = cache.long_term.remove(&key) { + cache.long_term_size_bytes -= item.entry_size_estimate_bytes; if cache.lru.put_with_weight(key, item).is_err() { eprintln!("{}", Self::DOES_NOT_FIT_MSG); } @@ -225,23 +256,32 @@ impl InitCache { eprintln!("{}", Self::DOES_NOT_FIT_MSG); } } + cache.long_term_size_bytes = 0; } - pub fn get_lru_metrics() -> LruCacheMetrics { + pub fn get_metrics() -> CacheMetrics { let mut cache = cache!(); - let count = cache.lru.len(); - let metrics = LruCacheMetrics { - // add 1 to each entry to account that we subtracted 1 in the weight calculation - size_bytes: (cache.lru.weight() + count).try_into().unwrap(), + let lru_count = cache.lru.len(); + let lru_metrics = LruCacheMetrics { + // adds 1 to each entry to account that we subtracted 1 in the weight calculation + size_bytes: (cache.lru.weight() + lru_count).try_into().unwrap(), - count: count.try_into().unwrap(), + count: lru_count.try_into().unwrap(), hits: cache.lru_counters.hits, misses: cache.lru_counters.misses, does_not_fit: cache.lru_counters.does_not_fit, }; + let long_term_metrics = LongTermCacheMetrics { + size_bytes: cache.long_term_size_bytes.try_into().unwrap(), + count: cache.long_term.len().try_into().unwrap(), + + hits: cache.long_term_counters.hits, + misses: cache.long_term_counters.misses, + }; + // Empty counters. // go side, which is the only consumer of this function besides tests, // will read those counters and increment its own prometheus counters with them. @@ -250,8 +290,12 @@ impl InitCache { misses: 0, does_not_fit: 0, }; + cache.long_term_counters = LongTermCounters { hits: 0, misses: 0 }; - metrics + CacheMetrics { + lru: lru_metrics, + long_term: long_term_metrics, + } } // only used for testing diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index abea428167..c16f3d7598 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -11,7 +11,7 @@ use arbutil::{ format::DebugBytes, Bytes32, }; -use cache::{deserialize_module, InitCache, LruCacheMetrics}; +use cache::{deserialize_module, InitCache, CacheMetrics}; use evm_api::NativeRequestHandler; use eyre::ErrReport; use native::NativeInstance; @@ -364,10 +364,10 @@ pub unsafe extern "C" fn stylus_drop_vec(vec: RustBytes) { } } -/// Gets lru cache metrics. +/// Gets cache metrics. #[no_mangle] -pub extern "C" fn stylus_get_lru_cache_metrics() -> LruCacheMetrics { - InitCache::get_lru_metrics() +pub extern "C" fn stylus_get_cache_metrics() -> CacheMetrics { + InitCache::get_metrics() } /// Clears lru cache. @@ -377,18 +377,18 @@ pub extern "C" fn stylus_clear_lru_cache() { InitCache::clear_lru_cache() } -/// Gets lru entry size in bytes. +/// Gets entry size in bytes. /// Only used for testing purposes. #[no_mangle] -pub extern "C" fn stylus_get_lru_entry_size_estimate_bytes( +pub extern "C" fn stylus_get_entry_size_estimate_bytes( module: GoSliceData, version: u16, debug: bool, ) -> u64 { match deserialize_module(module.slice(), version, debug) { Err(error) => panic!("tried to get invalid asm!: {error}"), - Ok((_, _, lru_entry_size_estimate_bytes)) => { - lru_entry_size_estimate_bytes.try_into().unwrap() + Ok((_, _, entry_size_estimate_bytes)) => { + entry_size_estimate_bytes.try_into().unwrap() } } } diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 5fbc512211..38da63013d 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -52,6 +52,11 @@ var ( stylusLRUCacheSizeHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/hits", nil) stylusLRUCacheSizeMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/misses", nil) stylusLRUCacheSizeDoesNotFitCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/does_not_fit", nil) + + stylusLongTermCacheSizeBytesGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/long_term/size_bytes", nil) + stylusLongTermCacheSizeCountGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/long_term/count", nil) + stylusLongTermCacheSizeHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/long_term/hits", nil) + stylusLongTermCacheSizeMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/long_term/misses", nil) ) func activateProgram( @@ -333,24 +338,52 @@ func SetWasmLruCacheCapacity(capacityBytes uint64) { C.stylus_set_cache_lru_capacity(u64(capacityBytes)) } -// exported for testing +func UpdateWasmCacheMetrics() { + metrics := C.stylus_get_cache_metrics() + + stylusLRUCacheSizeBytesGauge.Update(int64(metrics.lru.size_bytes)) + stylusLRUCacheSizeCountGauge.Update(int64(metrics.lru.count)) + stylusLRUCacheSizeHitsCounter.Inc(int64(metrics.lru.hits)) + stylusLRUCacheSizeMissesCounter.Inc(int64(metrics.lru.misses)) + stylusLRUCacheSizeDoesNotFitCounter.Inc(int64(metrics.lru.does_not_fit)) + + stylusLongTermCacheSizeBytesGauge.Update(int64(metrics.long_term.size_bytes)) + stylusLongTermCacheSizeCountGauge.Update(int64(metrics.long_term.count)) + stylusLongTermCacheSizeHitsCounter.Inc(int64(metrics.long_term.hits)) + stylusLongTermCacheSizeMissesCounter.Inc(int64(metrics.long_term.misses)) +} + +// Used for testing type WasmLruCacheMetrics struct { SizeBytes uint64 Count uint32 } -func GetWasmLruCacheMetrics() *WasmLruCacheMetrics { - metrics := C.stylus_get_lru_cache_metrics() +// Used for testing +type WasmLongTermCacheMetrics struct { + SizeBytes uint64 + Count uint32 +} - stylusLRUCacheSizeBytesGauge.Update(int64(metrics.size_bytes)) - stylusLRUCacheSizeCountGauge.Update(int64(metrics.count)) - stylusLRUCacheSizeHitsCounter.Inc(int64(metrics.hits)) - stylusLRUCacheSizeMissesCounter.Inc(int64(metrics.misses)) - stylusLRUCacheSizeDoesNotFitCounter.Inc(int64(metrics.does_not_fit)) +// Used for testing +type WasmCacheMetrics struct { + Lru WasmLruCacheMetrics + LongTerm WasmLongTermCacheMetrics +} - return &WasmLruCacheMetrics{ - SizeBytes: uint64(metrics.size_bytes), - Count: uint32(metrics.count), +// Used for testing +func GetWasmCacheMetrics() *WasmCacheMetrics { + metrics := C.stylus_get_cache_metrics() + + return &WasmCacheMetrics{ + Lru: WasmLruCacheMetrics{ + SizeBytes: uint64(metrics.lru.size_bytes), + Count: uint32(metrics.lru.count), + }, + LongTerm: WasmLongTermCacheMetrics{ + SizeBytes: uint64(metrics.long_term.size_bytes), + Count: uint32(metrics.long_term.count), + }, } } @@ -360,8 +393,8 @@ func ClearWasmLruCache() { } // Used for testing -func GetLruEntrySizeEstimateBytes(module []byte, version uint16, debug bool) uint64 { - return uint64(C.stylus_get_lru_entry_size_estimate_bytes(goSlice(module), u16(version), cbool(debug))) +func GetEntrySizeEstimateBytes(module []byte, version uint16, debug bool) uint64 { + return uint64(C.stylus_get_entry_size_estimate_bytes(goSlice(module), u16(version), cbool(debug))) } const DefaultTargetDescriptionArm = "arm64-linux-unknown+neon" diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index b36340757a..23573a0277 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -976,14 +976,14 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) { } }) if !s.disableStylusCacheMetricsCollection { - // periodically update stylus lru cache metrics + // periodically update stylus cache metrics s.LaunchThread(func(ctx context.Context) { for { select { case <-ctx.Done(): return case <-time.After(time.Minute): - programs.GetWasmLruCacheMetrics() + programs.UpdateWasmCacheMetrics() } } }) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 1cbbf268f1..51a35b9981 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2008,7 +2008,7 @@ func checkWasmStoreContent(t *testing.T, wasmDb ethdb.KeyValueStore, targets []s } } -func deployWasmAndGetLruEntrySizeEstimateBytes( +func deployWasmAndGetEntrySizeEstimateBytes( t *testing.T, builder *NodeBuilder, auth bind.TransactOpts, @@ -2039,12 +2039,12 @@ func deployWasmAndGetLruEntrySizeEstimateBytes( module, err := statedb.TryGetActivatedAsm(rawdb.LocalTarget(), log.ModuleHash) Require(t, err, ", wasmName:", wasmName) - lruEntrySizeEstimateBytes := programs.GetLruEntrySizeEstimateBytes(module, log.Version, true) + entrySizeEstimateBytes := programs.GetEntrySizeEstimateBytes(module, log.Version, true) // just a sanity check - if lruEntrySizeEstimateBytes == 0 { - Fatal(t, "lruEntrySizeEstimateBytes is 0, wasmName:", wasmName) + if entrySizeEstimateBytes == 0 { + Fatal(t, "entrySizeEstimateBytes is 0, wasmName:", wasmName) } - return programAddress, lruEntrySizeEstimateBytes + return programAddress, entrySizeEstimateBytes } func TestWasmLruCache(t *testing.T) { @@ -2057,9 +2057,9 @@ func TestWasmLruCache(t *testing.T) { auth.GasLimit = 32000000 auth.Value = oneEth - fallibleProgramAddress, fallibleLruEntrySizeEstimateBytes := deployWasmAndGetLruEntrySizeEstimateBytes(t, builder, auth, "fallible") - keccakProgramAddress, keccakLruEntrySizeEstimateBytes := deployWasmAndGetLruEntrySizeEstimateBytes(t, builder, auth, "keccak") - mathProgramAddress, mathLruEntrySizeEstimateBytes := deployWasmAndGetLruEntrySizeEstimateBytes(t, builder, auth, "math") + fallibleProgramAddress, fallibleLruEntrySizeEstimateBytes := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "fallible") + keccakProgramAddress, keccakLruEntrySizeEstimateBytes := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "keccak") + mathProgramAddress, mathLruEntrySizeEstimateBytes := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "math") t.Log( "lruEntrySizeEstimateBytes, ", "fallible:", fallibleLruEntrySizeEstimateBytes, @@ -2068,7 +2068,7 @@ func TestWasmLruCache(t *testing.T) { ) programs.ClearWasmLruCache() - lruMetrics := programs.GetWasmLruCacheMetrics() + lruMetrics := programs.GetWasmCacheMetrics().Lru if lruMetrics.Count != 0 { t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 0, lruMetrics.Count) } @@ -2082,7 +2082,7 @@ func TestWasmLruCache(t *testing.T) { Require(t, l2client.SendTransaction(ctx, tx)) _, err := EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - lruMetrics = programs.GetWasmLruCacheMetrics() + lruMetrics = programs.GetWasmCacheMetrics().Lru if lruMetrics.Count != 0 { t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 0, lruMetrics.Count) } @@ -2098,7 +2098,7 @@ func TestWasmLruCache(t *testing.T) { Require(t, l2client.SendTransaction(ctx, tx)) _, err = EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - lruMetrics = programs.GetWasmLruCacheMetrics() + lruMetrics = programs.GetWasmCacheMetrics().Lru if lruMetrics.Count != 1 { t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 1, lruMetrics.Count) } @@ -2111,7 +2111,7 @@ func TestWasmLruCache(t *testing.T) { Require(t, l2client.SendTransaction(ctx, tx)) _, err = EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - lruMetrics = programs.GetWasmLruCacheMetrics() + lruMetrics = programs.GetWasmCacheMetrics().Lru if lruMetrics.Count != 2 { t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 2, lruMetrics.Count) } @@ -2124,7 +2124,7 @@ func TestWasmLruCache(t *testing.T) { Require(t, l2client.SendTransaction(ctx, tx)) _, err = EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - lruMetrics = programs.GetWasmLruCacheMetrics() + lruMetrics = programs.GetWasmCacheMetrics().Lru if lruMetrics.Count != 2 { t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 2, lruMetrics.Count) } From 02f1dc0508af91e8c548c82202d14a271442cd1a Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Mon, 30 Sep 2024 17:35:04 -0300 Subject: [PATCH 21/41] Rust lint --- arbitrator/stylus/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index c16f3d7598..f5598198eb 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -11,7 +11,7 @@ use arbutil::{ format::DebugBytes, Bytes32, }; -use cache::{deserialize_module, InitCache, CacheMetrics}; +use cache::{deserialize_module, CacheMetrics, InitCache}; use evm_api::NativeRequestHandler; use eyre::ErrReport; use native::NativeInstance; @@ -387,8 +387,6 @@ pub extern "C" fn stylus_get_entry_size_estimate_bytes( ) -> u64 { match deserialize_module(module.slice(), version, debug) { Err(error) => panic!("tried to get invalid asm!: {error}"), - Ok((_, _, entry_size_estimate_bytes)) => { - entry_size_estimate_bytes.try_into().unwrap() - } + Ok((_, _, entry_size_estimate_bytes)) => entry_size_estimate_bytes.try_into().unwrap(), } } From a4784603c38e0a2b8d52c21ef4ef09e16376a83c Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 1 Oct 2024 18:50:37 +0200 Subject: [PATCH 22/41] InitCache: add items found in LRU to long term cache (if long_term_tag is 1) --- arbitrator/stylus/src/cache.rs | 17 ++++++++++++----- arbitrator/stylus/src/native.rs | 7 +++---- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index c1fdaaccee..21933c51cd 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -136,9 +136,10 @@ impl InitCache { } /// Retrieves a cached value, updating items as necessary. - pub fn get(module_hash: Bytes32, version: u16, debug: bool) -> Option<(Module, Store)> { - let mut cache = cache!(); + /// If long_term_tag is 1 and the item is only in LRU will insert to long term cache. + pub fn get(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) -> Option<(Module, Store)> { let key = CacheKey::new(module_hash, version, debug); + let mut cache = cache!(); // See if the item is in the long term cache if let Some(item) = cache.long_term.get(&key) { @@ -146,12 +147,18 @@ impl InitCache { } // See if the item is in the LRU cache, promoting if so - if let Some(item) = cache.lru.get(&key) { - let data = item.data(); + if let Some(item) = cache.lru.peek(&key).cloned() { cache.lru_counters.hits += 1; - return Some(data); + if long_term_tag == Self::ARBOS_TAG { + cache.long_term.insert(key, item.clone()); + } else { + // only calls get to move the key to the head of the LRU list + cache.lru.get(&key); + } + return Some(item.data()); } cache.lru_counters.misses += 1; + None } diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 516c6602e7..c751a670cc 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -121,13 +121,12 @@ impl> NativeInstance { let compile = CompileConfig::version(version, debug); let env = WasmEnv::new(compile, None, evm, evm_data); let module_hash = env.evm_data.module_hash; - - if let Some((module, store)) = InitCache::get(module_hash, version, debug) { - return Self::from_module(module, store, env); - } if !env.evm_data.cached { long_term_tag = 0; } + if let Some((module, store)) = InitCache::get(module_hash, version, long_term_tag, debug) { + return Self::from_module(module, store, env); + } let (module, store) = InitCache::insert(module_hash, module, version, long_term_tag, debug)?; Self::from_module(module, store, env) From d61710fa1b2ef90486d69edb0b3c9623034c6ca4 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 1 Oct 2024 22:21:21 +0200 Subject: [PATCH 23/41] rustfmt InitCache.get --- arbitrator/stylus/src/cache.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 21933c51cd..d849d39be0 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -137,7 +137,12 @@ impl InitCache { /// Retrieves a cached value, updating items as necessary. /// If long_term_tag is 1 and the item is only in LRU will insert to long term cache. - pub fn get(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) -> Option<(Module, Store)> { + pub fn get( + module_hash: Bytes32, + version: u16, + long_term_tag: u32, + debug: bool, + ) -> Option<(Module, Store)> { let key = CacheKey::new(module_hash, version, debug); let mut cache = cache!(); From 328a386f6eb020b2ee3a35752b8a381508729ce1 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 1 Oct 2024 22:21:43 +0200 Subject: [PATCH 24/41] update geth pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 0c3f6eba21..b1075d3786 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 0c3f6eba21cbe0196b298dfbd3fa7d51dffd627e +Subproject commit b1075d3786b28a6a3a06fe0e0ab8d1cdecc72f55 From da58307a6b051005c632e01f33be6a8599beb917 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 3 Oct 2024 02:46:11 +0200 Subject: [PATCH 25/41] system_tests: fix cache tag used when wrapping wasm database in test --- system_tests/common_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 209e7c2d35..fba6aa2fc6 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -1309,7 +1309,7 @@ func createNonL1BlockChainWithStackConfig( Require(t, err) wasmData, err := stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0, execConfig.StylusTarget.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 1, execConfig.StylusTarget.WasmTargets()) arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) @@ -1401,7 +1401,7 @@ func Create2ndNodeWithConfig( Require(t, err) wasmData, err := chainStack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0, execConfig.StylusTarget.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 1, execConfig.StylusTarget.WasmTargets()) arbDb, err := chainStack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) From 6c69a96856c4be0118a84b554640b09b7d353ef3 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 3 Oct 2024 02:52:45 +0200 Subject: [PATCH 26/41] add stylus wasm long term cache test --- arbitrator/stylus/src/lib.rs | 7 +++ arbos/programs/native.go | 5 ++ system_tests/program_test.go | 106 ++++++++++++++++++++++++++++++++++- 3 files changed, 117 insertions(+), 1 deletion(-) diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index f5598198eb..feac828989 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -377,6 +377,13 @@ pub extern "C" fn stylus_clear_lru_cache() { InitCache::clear_lru_cache() } +/// Clears long term cache. +/// Only used for testing purposes. +#[no_mangle] +pub extern "C" fn stylus_clear_long_term_cache(arbos_tag: u32) { + InitCache::clear_long_term(arbos_tag); +} + /// Gets entry size in bytes. /// Only used for testing purposes. #[no_mangle] diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 38da63013d..5baacea381 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -392,6 +392,11 @@ func ClearWasmLruCache() { C.stylus_clear_lru_cache() } +// Used for testing +func ClearWasmLongTermCache(arbos_tag uint32) { + C.stylus_clear_long_term_cache(u32(arbos_tag)) +} + // Used for testing func GetEntrySizeEstimateBytes(module []byte, version uint16, debug bool) uint64 { return uint64(C.stylus_get_entry_size_estimate_bytes(goSlice(module), u16(version), cbool(debug))) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 21081dc341..1686e212bf 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -1385,7 +1385,7 @@ func TestProgramCacheManager(t *testing.T) { isManager, err := arbWasmCache.IsCacheManager(nil, manager) assert(!isManager, err) - // athorize the manager + // authorize the manager ensure(arbOwner.AddWasmCacheManager(&ownerAuth, manager)) assert(arbWasmCache.IsCacheManager(nil, manager)) all, err := arbWasmCache.AllCacheManagers(nil) @@ -2137,3 +2137,107 @@ func TestWasmLruCache(t *testing.T) { t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", keccakLruEntrySizeEstimateBytes+mathLruEntrySizeEstimateBytes, lruMetrics.SizeBytes) } } + +func TestWasmLongTermCache(t *testing.T) { + builder, ownerAuth, cleanup := setupProgramTest(t, true) + ctx := builder.ctx + l2info := builder.L2Info + l2client := builder.L2.Client + defer cleanup() + + ensure := func(tx *types.Transaction, err error) *types.Receipt { + t.Helper() + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + return receipt + } + + manager, tx, _, err := mocksgen.DeploySimpleCacheManager(&ownerAuth, l2client) + ensure(tx, err) + + arbWasmCache, err := pgen.NewArbWasmCache(types.ArbWasmCacheAddress, builder.L2.Client) + Require(t, err) + arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, builder.L2.Client) + Require(t, err) + ensure(arbOwner.SetInkPrice(&ownerAuth, 10_000)) + + ownerAuth.GasLimit = 32000000 + ownerAuth.Value = oneEth + + fallibleProgramAddress, fallibleEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "fallible") + keccakProgramAddress, keccakEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "keccak") + mathProgramAddress, mathEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "math") + t.Log( + "lruEntrySizeEstimateBytes, ", + "fallible:", fallibleEntrySize, + "keccak:", keccakEntrySize, + "math:", mathEntrySize, + ) + + isManager, err := arbWasmCache.IsCacheManager(nil, manager) + Require(t, err) + t.Log("isManager", isManager) + ownerAuth.Value = common.Big0 + ensure(arbOwner.AddWasmCacheManager(&ownerAuth, manager)) + + checkLongTermMetrics := func(expected programs.WasmLongTermCacheMetrics) { + t.Helper() + longTermMetrics := programs.GetWasmCacheMetrics().LongTerm + if longTermMetrics.Count != expected.Count { + t.Fatalf("longTermMetrics.Count, expected: %v, actual: %v", expected.Count, longTermMetrics.Count) + } + if longTermMetrics.SizeBytes != expected.SizeBytes { + t.Fatalf("longTermMetrics.SizeBytes, expected: %v, actual: %v", expected.SizeBytes, longTermMetrics.SizeBytes) + } + } + + programs.ClearWasmLongTermCache(1) + checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + + // fallible wasm program will not be cached since caching is not set for this program + tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + + ensure(arbWasmCache.CacheProgram(&ownerAuth, fallibleProgramAddress)) + // fallible wasm program will be cached + tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + Count: 1, + SizeBytes: fallibleEntrySize, + }) + + // keccak wasm program will be cached + ensure(arbWasmCache.CacheProgram(&ownerAuth, keccakProgramAddress)) + tx = l2info.PrepareTxTo("Owner", &keccakProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + keccakEntrySize, + }) + + // math wasm program will not be cached + tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + keccakEntrySize, + }) + + // math wasm program will be cached + ensure(arbWasmCache.CacheProgram(&ownerAuth, mathProgramAddress)) + tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + Count: 3, + SizeBytes: fallibleEntrySize + keccakEntrySize + mathEntrySize, + }) +} From 80c931869df8820a5f9ae3fe3242936f52b440db Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 3 Oct 2024 17:05:05 +0200 Subject: [PATCH 27/41] system_test: use stylus long term cache only in specific tests --- system_tests/common_test.go | 30 +++++++++++++++++++++++------- system_tests/program_test.go | 4 +++- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index fba6aa2fc6..d2fda0e135 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -249,6 +249,7 @@ type NodeBuilder struct { initMessage *arbostypes.ParsedInitMessage l3InitMessage *arbostypes.ParsedInitMessage withProdConfirmPeriodBlocks bool + wasmCacheTag uint32 // Created nodes L1 *TestClient @@ -352,6 +353,15 @@ func (b *NodeBuilder) WithExtraArchs(targets []string) *NodeBuilder { return b } +func (b *NodeBuilder) WithStylusLongTermCache(enabled bool) *NodeBuilder { + if enabled { + b.wasmCacheTag = 1 + } else { + b.wasmCacheTag = 0 + } + return b +} + func (b *NodeBuilder) Build(t *testing.T) func() { b.CheckConfig(t) if b.withL1 { @@ -425,6 +435,8 @@ func buildOnParentChain( initMessage *arbostypes.ParsedInitMessage, addresses *chaininfo.RollupAddresses, + + wasmCacheTag uint32, ) *TestClient { if parentChainTestClient == nil { t.Fatal("must build parent chain before building chain") @@ -436,7 +448,7 @@ func buildOnParentChain( var arbDb ethdb.Database var blockchain *core.BlockChain _, chainTestClient.Stack, chainDb, arbDb, blockchain = createNonL1BlockChainWithStackConfig( - t, chainInfo, dataDir, chainConfig, initMessage, stackConfig, execConfig) + t, chainInfo, dataDir, chainConfig, initMessage, stackConfig, execConfig, wasmCacheTag) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc @@ -524,6 +536,8 @@ func (b *NodeBuilder) BuildL3OnL2(t *testing.T) func() { b.l3InitMessage, b.l3Addresses, + + b.wasmCacheTag, ) return func() { @@ -552,6 +566,8 @@ func (b *NodeBuilder) BuildL2OnL1(t *testing.T) func() { b.initMessage, b.addresses, + + b.wasmCacheTag, ) return func() { @@ -573,7 +589,7 @@ func (b *NodeBuilder) BuildL2(t *testing.T) func() { var arbDb ethdb.Database var blockchain *core.BlockChain b.L2Info, b.L2.Stack, chainDb, arbDb, blockchain = createL2BlockChain( - t, b.L2Info, b.dataDir, b.chainConfig, b.execConfig) + t, b.L2Info, b.dataDir, b.chainConfig, b.execConfig, b.wasmCacheTag) Require(t, b.execConfig.Validate()) execConfig := b.execConfig @@ -624,7 +640,7 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) { } b.L2.cleanup() - l2info, stack, chainDb, arbDb, blockchain := createNonL1BlockChainWithStackConfig(t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, b.execConfig) + l2info, stack, chainDb, arbDb, blockchain := createNonL1BlockChainWithStackConfig(t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, b.execConfig, b.wasmCacheTag) execConfigFetcher := func() *gethexec.Config { return b.execConfig } execNode, err := gethexec.CreateExecutionNode(b.ctx, stack, chainDb, blockchain, nil, execConfigFetcher) @@ -1284,13 +1300,13 @@ func deployOnParentChain( } func createL2BlockChain( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, execConfig *gethexec.Config, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, execConfig *gethexec.Config, wasmCacheTag uint32, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { - return createNonL1BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, execConfig) + return createNonL1BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, execConfig, wasmCacheTag) } func createNonL1BlockChainWithStackConfig( - t *testing.T, info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, execConfig *gethexec.Config, + t *testing.T, info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, execConfig *gethexec.Config, wasmCacheTag uint32, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if info == nil { info = NewArbTestInfo(t, chainConfig.ChainID) @@ -1309,7 +1325,7 @@ func createNonL1BlockChainWithStackConfig( Require(t, err) wasmData, err := stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 1, execConfig.StylusTarget.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, wasmCacheTag, execConfig.StylusTarget.WasmTargets()) arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 1686e212bf..a8c325daf3 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2139,7 +2139,9 @@ func TestWasmLruCache(t *testing.T) { } func TestWasmLongTermCache(t *testing.T) { - builder, ownerAuth, cleanup := setupProgramTest(t, true) + builder, ownerAuth, cleanup := setupProgramTest(t, true, func(builder *NodeBuilder) { + builder.WithStylusLongTermCache(true) + }) ctx := builder.ctx l2info := builder.L2Info l2client := builder.L2.Client From 4676459bf41d35cdc59a3dac3dfec6026a2e9923 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 3 Oct 2024 23:04:42 +0200 Subject: [PATCH 28/41] fix tracing long term cache size when adding item from lru --- arbitrator/stylus/src/cache.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 1502fa82e6..0a4b73c2a6 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -184,6 +184,7 @@ impl InitCache { cache.lru_counters.hits += 1; if long_term_tag == Self::ARBOS_TAG { cache.long_term.insert(key, item.clone()); + cache.long_term_size_bytes += item.entry_size_estimate_bytes; } else { // only calls get to move the key to the head of the LRU list cache.lru.get(&key); From 0ab06c87cce2e8199580b8b69349711589ec2f37 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 3 Oct 2024 23:10:36 +0200 Subject: [PATCH 29/41] add test for adding wasm from lru cache to long term cache --- system_tests/program_test.go | 222 +++++++++++++++++++++++++++++++---- 1 file changed, 197 insertions(+), 25 deletions(-) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index a8c325daf3..6ca04976e5 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2138,6 +2138,28 @@ func TestWasmLruCache(t *testing.T) { } } +func checkLongTermCacheMetrics(t *testing.T, expected programs.WasmLongTermCacheMetrics) { + t.Helper() + longTermMetrics := programs.GetWasmCacheMetrics().LongTerm + if longTermMetrics.Count != expected.Count { + t.Fatalf("longTermMetrics.Count, expected: %v, actual: %v", expected.Count, longTermMetrics.Count) + } + if longTermMetrics.SizeBytes != expected.SizeBytes { + t.Fatalf("longTermMetrics.SizeBytes, expected: %v, actual: %v", expected.SizeBytes, longTermMetrics.SizeBytes) + } +} + +func checkLruCacheMetrics(t *testing.T, expected programs.WasmLruCacheMetrics) { + t.Helper() + lruMetrics := programs.GetWasmCacheMetrics().Lru + if lruMetrics.Count != expected.Count { + t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", expected.Count, lruMetrics.Count) + } + if lruMetrics.SizeBytes != expected.SizeBytes { + t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", expected.SizeBytes, lruMetrics.SizeBytes) + } +} + func TestWasmLongTermCache(t *testing.T) { builder, ownerAuth, cleanup := setupProgramTest(t, true, func(builder *NodeBuilder) { builder.WithStylusLongTermCache(true) @@ -2155,9 +2177,6 @@ func TestWasmLongTermCache(t *testing.T) { return receipt } - manager, tx, _, err := mocksgen.DeploySimpleCacheManager(&ownerAuth, l2client) - ensure(tx, err) - arbWasmCache, err := pgen.NewArbWasmCache(types.ArbWasmCacheAddress, builder.L2.Client) Require(t, err) arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, builder.L2.Client) @@ -2177,33 +2196,18 @@ func TestWasmLongTermCache(t *testing.T) { "math:", mathEntrySize, ) - isManager, err := arbWasmCache.IsCacheManager(nil, manager) - Require(t, err) - t.Log("isManager", isManager) ownerAuth.Value = common.Big0 - ensure(arbOwner.AddWasmCacheManager(&ownerAuth, manager)) - - checkLongTermMetrics := func(expected programs.WasmLongTermCacheMetrics) { - t.Helper() - longTermMetrics := programs.GetWasmCacheMetrics().LongTerm - if longTermMetrics.Count != expected.Count { - t.Fatalf("longTermMetrics.Count, expected: %v, actual: %v", expected.Count, longTermMetrics.Count) - } - if longTermMetrics.SizeBytes != expected.SizeBytes { - t.Fatalf("longTermMetrics.SizeBytes, expected: %v, actual: %v", expected.SizeBytes, longTermMetrics.SizeBytes) - } - } programs.ClearWasmLongTermCache(1) - checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 0, SizeBytes: 0, }) // fallible wasm program will not be cached since caching is not set for this program - tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + tx := l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) ensure(tx, l2client.SendTransaction(ctx, tx)) - checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 0, SizeBytes: 0, }) @@ -2212,7 +2216,7 @@ func TestWasmLongTermCache(t *testing.T) { // fallible wasm program will be cached tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) ensure(tx, l2client.SendTransaction(ctx, tx)) - checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 1, SizeBytes: fallibleEntrySize, }) @@ -2221,7 +2225,7 @@ func TestWasmLongTermCache(t *testing.T) { ensure(arbWasmCache.CacheProgram(&ownerAuth, keccakProgramAddress)) tx = l2info.PrepareTxTo("Owner", &keccakProgramAddress, l2info.TransferGas, nil, []byte{0x01}) ensure(tx, l2client.SendTransaction(ctx, tx)) - checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 2, SizeBytes: fallibleEntrySize + keccakEntrySize, }) @@ -2229,7 +2233,7 @@ func TestWasmLongTermCache(t *testing.T) { // math wasm program will not be cached tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) ensure(tx, l2client.SendTransaction(ctx, tx)) - checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 2, SizeBytes: fallibleEntrySize + keccakEntrySize, }) @@ -2238,8 +2242,176 @@ func TestWasmLongTermCache(t *testing.T) { ensure(arbWasmCache.CacheProgram(&ownerAuth, mathProgramAddress)) tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) ensure(tx, l2client.SendTransaction(ctx, tx)) - checkLongTermMetrics(programs.WasmLongTermCacheMetrics{ + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 3, SizeBytes: fallibleEntrySize + keccakEntrySize + mathEntrySize, }) + + statedb, err := builder.L2.ExecNode.Backend.ArbInterface().BlockChain().State() + Require(t, err) + fallibleProgramHash := statedb.GetCodeHash(fallibleProgramAddress) + keccakProgramHash := statedb.GetCodeHash(keccakProgramAddress) + mathProgramHash := statedb.GetCodeHash(mathProgramAddress) + + ensure(arbWasmCache.EvictCodehash(&ownerAuth, keccakProgramHash)) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + mathEntrySize, + }) + + // keccak wasm program will not be cached + tx = l2info.PrepareTxTo("Owner", &keccakProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + mathEntrySize, + }) + + // keccak wasm program will be cached + ensure(arbWasmCache.CacheProgram(&ownerAuth, keccakProgramAddress)) + tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 3, + SizeBytes: fallibleEntrySize + keccakEntrySize + mathEntrySize, + }) + + ensure(arbWasmCache.EvictCodehash(&ownerAuth, fallibleProgramHash)) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 2, + SizeBytes: keccakEntrySize + mathEntrySize, + }) + + ensure(arbWasmCache.EvictCodehash(&ownerAuth, mathProgramHash)) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 1, + SizeBytes: keccakEntrySize, + }) + + ensure(arbWasmCache.EvictCodehash(&ownerAuth, keccakProgramHash)) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) +} + +func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { + builder, ownerAuth, cleanup := setupProgramTest(t, true, func(builder *NodeBuilder) { + builder.WithStylusLongTermCache(true) + }) + ctx := builder.ctx + l2info := builder.L2Info + l2client := builder.L2.Client + defer cleanup() + + ensure := func(tx *types.Transaction, err error) *types.Receipt { + t.Helper() + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + return receipt + } + + arbWasmCache, err := pgen.NewArbWasmCache(types.ArbWasmCacheAddress, builder.L2.Client) + Require(t, err) + arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, builder.L2.Client) + Require(t, err) + ensure(arbOwner.SetInkPrice(&ownerAuth, 10_000)) + + ownerAuth.GasLimit = 32000000 + ownerAuth.Value = oneEth + + fallibleProgramAddress, fallibleEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "fallible") + keccakProgramAddress, keccakEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "keccak") + mathProgramAddress, mathEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "math") + + ownerAuth.Value = common.Big0 + + programs.ClearWasmLongTermCache(1) + programs.ClearWasmLruCache() + // only 2 out of 3 programs should fit lru + programs.SetWasmLruCacheCapacity( + fallibleEntrySize + keccakEntrySize + mathEntrySize - 1, + ) + + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + + ensure(arbWasmCache.CacheProgram(&ownerAuth, fallibleProgramAddress)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 1, + SizeBytes: fallibleEntrySize, + }) + + // clear long term cache to emulate restart + programs.ClearWasmLongTermCache(1) + programs.ClearWasmLruCache() + + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + + nonce := builder.L2Info.GetInfoWithPrivKey("Owner").Nonce.Load() + tx := l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + _, err = arbutil.SendTxAsCall(ctx, l2client, tx, l2info.GetAddress("Owner"), nil, true) + Require(t, err) + // restore nonce in L2Info + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce.Store(nonce) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 1, + SizeBytes: fallibleEntrySize, + }) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + + tx = l2info.PrepareTxTo("Owner", &keccakProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + keccakEntrySize, + }) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) + + tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + keccakEntrySize, + }) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 1, + SizeBytes: fallibleEntrySize, + }) + + // mathProgram should end up in lru cache and as result fallibleProgram should be evicted as least recently used item (tx that restores the program back to long term cache shouldn't promote the lru item); fallibleProgram should remain in long term cache + tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 2, + SizeBytes: keccakEntrySize + mathEntrySize, + }) + checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ + Count: 1, + SizeBytes: fallibleEntrySize, + }) } From f65156d6a6f3d61110503b5ba97fefa50c02405e Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 3 Oct 2024 23:38:16 +0200 Subject: [PATCH 30/41] don't clone cache item twice --- arbitrator/stylus/src/cache.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 0a4b73c2a6..208f45e26c 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -182,14 +182,15 @@ impl InitCache { // See if the item is in the LRU cache, promoting if so if let Some(item) = cache.lru.peek(&key).cloned() { cache.lru_counters.hits += 1; + let data = item.data(); if long_term_tag == Self::ARBOS_TAG { - cache.long_term.insert(key, item.clone()); cache.long_term_size_bytes += item.entry_size_estimate_bytes; + cache.long_term.insert(key, item); } else { // only calls get to move the key to the head of the LRU list cache.lru.get(&key); } - return Some(item.data()); + return Some(data); } cache.lru_counters.misses += 1; From d5f5f11c6e880dca803fb9ef141f0971b41ef743 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 4 Oct 2024 13:14:28 +0200 Subject: [PATCH 31/41] refactor wasm lru cache test --- system_tests/program_test.go | 99 ++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 56 deletions(-) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 6ca04976e5..aab207e0f6 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2059,83 +2059,68 @@ func TestWasmLruCache(t *testing.T) { l2client := builder.L2.Client defer cleanup() + ensure := func(tx *types.Transaction, err error) *types.Receipt { + t.Helper() + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + return receipt + } + auth.GasLimit = 32000000 auth.Value = oneEth - fallibleProgramAddress, fallibleLruEntrySizeEstimateBytes := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "fallible") - keccakProgramAddress, keccakLruEntrySizeEstimateBytes := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "keccak") - mathProgramAddress, mathLruEntrySizeEstimateBytes := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "math") + fallibleProgramAddress, fallibleEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "fallible") + keccakProgramAddress, keccakEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "keccak") + mathProgramAddress, mathEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "math") t.Log( "lruEntrySizeEstimateBytes, ", - "fallible:", fallibleLruEntrySizeEstimateBytes, - "keccak:", keccakLruEntrySizeEstimateBytes, - "math:", mathLruEntrySizeEstimateBytes, + "fallible:", fallibleEntrySize, + "keccak:", keccakEntrySize, + "math:", mathEntrySize, ) programs.ClearWasmLruCache() - lruMetrics := programs.GetWasmCacheMetrics().Lru - if lruMetrics.Count != 0 { - t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 0, lruMetrics.Count) - } - if lruMetrics.SizeBytes != 0 { - t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", 0, lruMetrics.SizeBytes) - } + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) - programs.SetWasmLruCacheCapacity(fallibleLruEntrySizeEstimateBytes - 1) + programs.SetWasmLruCacheCapacity(fallibleEntrySize - 1) // fallible wasm program will not be cached since its size is greater than lru cache capacity tx := l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err := EnsureTxSucceeded(ctx, l2client, tx) - Require(t, err) - lruMetrics = programs.GetWasmCacheMetrics().Lru - if lruMetrics.Count != 0 { - t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 0, lruMetrics.Count) - } - if lruMetrics.SizeBytes != 0 { - t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", 0, lruMetrics.SizeBytes) - } + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 0, + SizeBytes: 0, + }) programs.SetWasmLruCacheCapacity( - fallibleLruEntrySizeEstimateBytes + keccakLruEntrySizeEstimateBytes + mathLruEntrySizeEstimateBytes - 1, + fallibleEntrySize + keccakEntrySize + mathEntrySize - 1, ) // fallible wasm program will be cached tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01}) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) - Require(t, err) - lruMetrics = programs.GetWasmCacheMetrics().Lru - if lruMetrics.Count != 1 { - t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 1, lruMetrics.Count) - } - if lruMetrics.SizeBytes != fallibleLruEntrySizeEstimateBytes { - t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", fallibleLruEntrySizeEstimateBytes, lruMetrics.SizeBytes) - } + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 1, + SizeBytes: fallibleEntrySize, + }) // keccak wasm program will be cached tx = l2info.PrepareTxTo("Owner", &keccakProgramAddress, l2info.TransferGas, nil, []byte{0x01}) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) - Require(t, err) - lruMetrics = programs.GetWasmCacheMetrics().Lru - if lruMetrics.Count != 2 { - t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 2, lruMetrics.Count) - } - if lruMetrics.SizeBytes != fallibleLruEntrySizeEstimateBytes+keccakLruEntrySizeEstimateBytes { - t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", fallibleLruEntrySizeEstimateBytes+keccakLruEntrySizeEstimateBytes, lruMetrics.SizeBytes) - } + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 2, + SizeBytes: fallibleEntrySize + keccakEntrySize, + }) // math wasm program will be cached, but fallible will be evicted since (fallible + keccak + math) > lruCacheCapacity tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) - Require(t, err) - lruMetrics = programs.GetWasmCacheMetrics().Lru - if lruMetrics.Count != 2 { - t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 2, lruMetrics.Count) - } - if lruMetrics.SizeBytes != keccakLruEntrySizeEstimateBytes+mathLruEntrySizeEstimateBytes { - t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", keccakLruEntrySizeEstimateBytes+mathLruEntrySizeEstimateBytes, lruMetrics.SizeBytes) - } + ensure(tx, l2client.SendTransaction(ctx, tx)) + checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ + Count: 2, + SizeBytes: keccakEntrySize + mathEntrySize, + }) } func checkLongTermCacheMetrics(t *testing.T, expected programs.WasmLongTermCacheMetrics) { @@ -2403,7 +2388,9 @@ func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { SizeBytes: fallibleEntrySize, }) - // mathProgram should end up in lru cache and as result fallibleProgram should be evicted as least recently used item (tx that restores the program back to long term cache shouldn't promote the lru item); fallibleProgram should remain in long term cache + // mathProgram should end up in lru cache and + // as result fallibleProgram should be evicted as least recently used item (tx that restores the program back to long term cache shouldn't promote the lru item); + // fallibleProgram should remain in long term cache tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01}) ensure(tx, l2client.SendTransaction(ctx, tx)) checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ From fe2233ffb46f98a95398335c793c2725490da595 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 4 Oct 2024 18:55:49 +0200 Subject: [PATCH 32/41] avoid unncessary cloning of cache item data --- arbitrator/stylus/src/cache.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 208f45e26c..6192a30eff 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -182,15 +182,14 @@ impl InitCache { // See if the item is in the LRU cache, promoting if so if let Some(item) = cache.lru.peek(&key).cloned() { cache.lru_counters.hits += 1; - let data = item.data(); if long_term_tag == Self::ARBOS_TAG { cache.long_term_size_bytes += item.entry_size_estimate_bytes; - cache.long_term.insert(key, item); + cache.long_term.insert(key, item.clone()); } else { // only calls get to move the key to the head of the LRU list cache.lru.get(&key); } - return Some(data); + return Some((item.module, Store::new(item.engine))); } cache.lru_counters.misses += 1; From 1d643fde37ed978d6911038f93ca5b560342a3b1 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 7 Oct 2024 12:48:04 -0600 Subject: [PATCH 33/41] document the fact OCL does not support google cloud storage We are happy to merge community code, but we don't consider it tested or supported. --- das/google_cloud_storage_service.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/das/google_cloud_storage_service.go b/das/google_cloud_storage_service.go index 2c490f346c..829f4b5265 100644 --- a/das/google_cloud_storage_service.go +++ b/das/google_cloud_storage_service.go @@ -1,21 +1,24 @@ package das import ( - googlestorage "cloud.google.com/go/storage" "context" "fmt" + "io" + "math" + "sort" + "time" + + googlestorage "cloud.google.com/go/storage" + "github.com/google/go-cmp/cmp" + flag "github.com/spf13/pflag" + "google.golang.org/api/option" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/google/go-cmp/cmp" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" - "google.golang.org/api/option" - "io" - "math" - "sort" - "time" ) type GoogleCloudStorageOperator interface { @@ -69,7 +72,7 @@ type GoogleCloudStorageServiceConfig struct { var DefaultGoogleCloudStorageServiceConfig = GoogleCloudStorageServiceConfig{} func GoogleCloudConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultGoogleCloudStorageServiceConfig.Enable, "enable storage/retrieval of sequencer batch data from an Google Cloud Storage bucket") + f.Bool(prefix+".enable", DefaultGoogleCloudStorageServiceConfig.Enable, "EXPERIMENTAL/unsupported - enable storage/retrieval of sequencer batch data from an Google Cloud Storage bucket") f.String(prefix+".access-token", DefaultGoogleCloudStorageServiceConfig.AccessToken, "Google Cloud Storage access token") f.String(prefix+".bucket", DefaultGoogleCloudStorageServiceConfig.Bucket, "Google Cloud Storage bucket") f.String(prefix+".object-prefix", DefaultGoogleCloudStorageServiceConfig.ObjectPrefix, "prefix to add to Google Cloud Storage objects") From 8c655e68db19eccdd959b3814ee236dc0836c0e9 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 8 Oct 2024 09:46:40 +0530 Subject: [PATCH 34/41] merge master and address PR comments --- .github/workflows/arbitrator-ci.yml | 6 +- .github/workflows/ci.yml | 60 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/gotestsum.sh | 83 +++ .github/workflows/submodule-pin-check.yml | 3 +- Dockerfile | 4 +- Makefile | 8 +- arbitrator/Cargo.lock | 78 +-- arbitrator/arbutil/src/types.rs | 101 +++ arbitrator/bench/Cargo.toml | 5 - arbitrator/bench/src/bin.rs | 6 +- arbitrator/bench/src/lib.rs | 2 - arbitrator/bench/src/parse_input.rs | 76 --- arbitrator/prover/Cargo.toml | 2 +- arbitrator/prover/src/lib.rs | 2 + arbitrator/prover/src/main.rs | 174 ++--- arbitrator/prover/src/parse_input.rs | 112 +++ arbitrator/{bench => prover}/src/prepare.rs | 13 +- .../stylus/tests/hostio-test/Cargo.lock | 636 ++++++++++++++++++ .../stylus/tests/hostio-test/Cargo.toml | 17 + .../stylus/tests/hostio-test/src/main.rs | 207 ++++++ arbitrator/wasm-libraries/Cargo.lock | 276 ++++++-- arbnode/api.go | 7 + arbnode/dataposter/data_poster.go | 4 +- arbnode/dataposter/dataposter_test.go | 63 +- arbnode/delayed.go | 5 +- arbnode/inbox_reader.go | 5 +- arbnode/inbox_tracker.go | 5 +- arbnode/node.go | 5 +- arbnode/sequencer_inbox.go | 9 +- arbnode/transaction_streamer.go | 4 +- arbos/tx_processor.go | 18 +- arbos/util/storage_cache.go | 5 + arbos/util/storage_cache_test.go | 3 +- arbutil/correspondingl1blocknumber.go | 6 +- arbutil/transaction_data.go | 5 +- arbutil/wait_for_l1.go | 24 +- broadcastclient/broadcastclient.go | 12 + cmd/nitro/init.go | 4 +- cmd/pruning/pruning.go | 5 +- contracts | 2 +- das/aggregator.go | 4 +- das/chain_fetch_das.go | 4 +- das/das.go | 8 +- das/dasRpcClient.go | 31 +- das/factory.go | 13 +- das/google_cloud_storage_service.go | 202 ++++++ das/google_cloud_storage_service_test.go | 84 +++ das/rpc_aggregator.go | 4 +- das/sign_after_store_das_writer.go | 1 + das/syncing_fallback_storage.go | 3 +- execution/gethexec/node.go | 3 +- go-ethereum | 2 +- go.mod | 98 +-- go.sum | 234 ++++--- pubsub/producer.go | 1 + staker/block_validator.go | 32 +- staker/l1_validator.go | 5 +- staker/rollup_watcher.go | 44 +- staker/staker.go | 18 +- staker/stateless_block_validator.go | 15 + staker/txbuilder/builder.go | 14 +- staker/validatorwallet/contract.go | 4 +- staker/validatorwallet/eoa.go | 8 +- staker/validatorwallet/noop.go | 8 +- system_tests/common_test.go | 73 +- system_tests/das_test.go | 100 ++- system_tests/eth_sync_test.go | 2 +- system_tests/full_challenge_impl_test.go | 5 +- system_tests/program_gas_test.go | 458 +++++++++++++ system_tests/program_test.go | 7 +- system_tests/validation_mock_test.go | 4 - system_tests/wrap_transaction_test.go | 13 +- util/headerreader/blob_client.go | 6 +- util/headerreader/header_reader.go | 7 +- validator/client/validation_client.go | 13 - validator/inputs/writer.go | 141 ++++ validator/inputs/writer_test.go | 92 +++ validator/interface.go | 1 - validator/server_api/json.go | 13 +- validator/server_arb/validator_spawner.go | 139 +--- validator/server_jit/jit_machine.go | 2 +- validator/valnode/validation_api.go | 9 - 83 files changed, 3149 insertions(+), 840 deletions(-) create mode 100755 .github/workflows/gotestsum.sh delete mode 100644 arbitrator/bench/src/lib.rs delete mode 100644 arbitrator/bench/src/parse_input.rs create mode 100644 arbitrator/prover/src/parse_input.rs rename arbitrator/{bench => prover}/src/prepare.rs (85%) create mode 100644 arbitrator/stylus/tests/hostio-test/Cargo.lock create mode 100644 arbitrator/stylus/tests/hostio-test/Cargo.toml create mode 100644 arbitrator/stylus/tests/hostio-test/src/main.rs create mode 100644 das/google_cloud_storage_service.go create mode 100644 das/google_cloud_storage_service_test.go create mode 100644 system_tests/program_gas_test.go create mode 100644 validator/inputs/writer.go create mode 100644 validator/inputs/writer_test.go diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 392eb876c0..47646017ac 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -50,15 +50,13 @@ jobs: - name: Install go uses: actions/setup-go@v4 with: - go-version: 1.21.x + go-version: 1.23.x - name: Install custom go-ethereum run: | cd /tmp - git clone --branch v1.13.8 --depth 1 https://github.com/ethereum/go-ethereum.git + git clone --branch v1.14.11 --depth 1 https://github.com/ethereum/go-ethereum.git cd go-ethereum - # Enable KZG point evaluation precompile early - sed -i 's#var PrecompiledContractsBerlin = map\[common.Address\]PrecompiledContract{#\0 common.BytesToAddress([]byte{0x0a}): \&kzgPointEvaluation{},#g' core/vm/contracts.go go build -o /usr/local/bin/geth ./cmd/geth - name: Setup nodejs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b439fe4aec..a944f08f40 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,7 +46,7 @@ jobs: - name: Install go uses: actions/setup-go@v4 with: - go-version: 1.21.x + go-version: 1.23.x - name: Install wasm-ld run: | @@ -145,76 +145,42 @@ jobs: env: TEST_STATE_SCHEME: path run: | - packages=`go list ./...` - for package in $packages; do - echo running tests for $package - if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m -tags=cionly > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then - exit 1 - fi - done + echo "Running tests with Path Scheme" >> full.log + ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 20m --cover - name: run tests without race detection and hash state scheme if: matrix.test-mode == 'defaults' env: TEST_STATE_SCHEME: hash run: | - packages=`go list ./...` - for package in $packages; do - echo running tests for $package - if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -timeout 20m -tags=cionly; then - exit 1 - fi - done + echo "Running tests with Hash Scheme" >> full.log + ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 20m - name: run tests with race detection and hash state scheme if: matrix.test-mode == 'race' env: TEST_STATE_SCHEME: hash run: | - packages=`go list ./...` - for package in $packages; do - echo running tests for $package - if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -race -timeout 30m; then - exit 1 - fi - done + echo "Running tests with Hash Scheme" >> full.log + ${{ github.workspace }}/.github/workflows/gotestsum.sh --race --timeout 30m - name: run redis tests if: matrix.test-mode == 'defaults' - run: TEST_REDIS=redis://localhost:6379/0 gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./... + run: | + echo "Running redis tests" >> full.log + TEST_REDIS=redis://localhost:6379/0 gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./... - name: run challenge tests if: matrix.test-mode == 'challenge' - run: | - packages=`go list ./...` - for package in $packages; do - echo running tests for $package - if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=challengetest -run=TestChallenge > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then - exit 1 - fi - done + run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags challengetest --run TestChallenge --cover - name: run stylus tests if: matrix.test-mode == 'stylus' - run: | - packages=`go list ./...` - for package in $packages; do - echo running tests for $package - if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=stylustest -run="TestProgramArbitrator" > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then - exit 1 - fi - done + run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags stylustest --run TestProgramArbitrator --timeout 60m --cover - name: run long stylus tests if: matrix.test-mode == 'long' - run: | - packages=`go list ./...` - for package in $packages; do - echo running tests for $package - if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=stylustest -run="TestProgramLong" > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then - exit 1 - fi - done + run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags stylustest --run TestProgramLong --timeout 60m --cover - name: Archive detailed run log uses: actions/upload-artifact@v3 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1cde8f06b9..26447947d4 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -73,7 +73,7 @@ jobs: - name: Install go uses: actions/setup-go@v4 with: - go-version: 1.21.x + go-version: 1.23.x - name: Install rust stable uses: dtolnay/rust-toolchain@stable diff --git a/.github/workflows/gotestsum.sh b/.github/workflows/gotestsum.sh new file mode 100755 index 0000000000..ed631847b7 --- /dev/null +++ b/.github/workflows/gotestsum.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +check_missing_value() { + if [[ $1 -eq 0 || $2 == -* ]]; then + echo "missing $3 argument value" + exit 1 + fi +} + +timeout="" +tags="" +run="" +race=false +cover=false +while [[ $# -gt 0 ]]; do + case $1 in + --timeout) + shift + check_missing_value $# "$1" "--timeout" + timeout=$1 + shift + ;; + --tags) + shift + check_missing_value $# "$1" "--tags" + tags=$1 + shift + ;; + --run) + shift + check_missing_value $# "$1" "--run" + run=$1 + shift + ;; + --race) + race=true + shift + ;; + --cover) + cover=true + shift + ;; + *) + echo "Invalid argument: $1" + exit 1 + ;; + esac +done + +packages=$(go list ./...) +for package in $packages; do + cmd="stdbuf -oL gotestsum --format short-verbose --packages=\"$package\" --rerun-fails=2 --no-color=false --" + + if [ "$timeout" != "" ]; then + cmd="$cmd -timeout $timeout" + fi + + if [ "$tags" != "" ]; then + cmd="$cmd -tags=$tags" + fi + + if [ "$run" != "" ]; then + cmd="$cmd -run=$run" + fi + + if [ "$race" == true ]; then + cmd="$cmd -race" + fi + + if [ "$cover" == true ]; then + cmd="$cmd -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/..." + fi + + cmd="$cmd > >(stdbuf -oL tee -a full.log | grep -vE \"INFO|seal\")" + + echo "" + echo running tests for "$package" + echo "$cmd" + + if ! eval "$cmd"; then + exit 1 + fi +done diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 90419b530e..60dd8ad827 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -18,7 +18,8 @@ jobs: with: fetch-depth: 0 submodules: true - ref: "${{ github.event.pull_request.merge_commit_sha }}" + persist-credentials: false + ref: "${{ github.event.pull_request.head.sha }}" - name: Check all submodules are ancestors of origin/HEAD or configured branch run: | diff --git a/Dockerfile b/Dockerfile index 9138ed30ad..aba5432254 100644 --- a/Dockerfile +++ b/Dockerfile @@ -66,7 +66,7 @@ COPY --from=wasm-libs-builder /workspace/ / FROM wasm-base AS wasm-bin-builder # pinned go version -RUN curl -L https://golang.org/dl/go1.21.10.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - +RUN curl -L https://golang.org/dl/go1.23.1.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - COPY ./Makefile ./go.mod ./go.sum ./ COPY ./arbcompress ./arbcompress COPY ./arbos ./arbos @@ -220,7 +220,7 @@ RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c24 RUN ./download-machine.sh consensus-v31 0x260f5fa5c3176a856893642e149cf128b5a8de9f828afec8d11184415dd8dc69 RUN ./download-machine.sh consensus-v32 0x184884e1eb9fefdc158f6c8ac912bb183bf3cf83f0090317e0bc4ac5860baa39 -FROM golang:1.21.10-bookworm AS node-builder +FROM golang:1.23.1-bookworm AS node-builder WORKDIR /workspace ARG version="" ARG datetime="" diff --git a/Makefile b/Makefile index c3cf1a5144..88bbd8dabe 100644 --- a/Makefile +++ b/Makefile @@ -149,8 +149,10 @@ stylus_test_erc20_wasm = $(call get_stylus_test_wasm,erc20) stylus_test_erc20_src = $(call get_stylus_test_rust,erc20) stylus_test_read-return-data_wasm = $(call get_stylus_test_wasm,read-return-data) stylus_test_read-return-data_src = $(call get_stylus_test_rust,read-return-data) +stylus_test_hostio-test_wasm = $(call get_stylus_test_wasm,hostio-test) +stylus_test_hostio-test_src = $(call get_stylus_test_rust,hostio-test) -stylus_test_wasms = $(stylus_test_keccak_wasm) $(stylus_test_keccak-100_wasm) $(stylus_test_fallible_wasm) $(stylus_test_storage_wasm) $(stylus_test_multicall_wasm) $(stylus_test_log_wasm) $(stylus_test_create_wasm) $(stylus_test_math_wasm) $(stylus_test_sdk-storage_wasm) $(stylus_test_erc20_wasm) $(stylus_test_read-return-data_wasm) $(stylus_test_evm-data_wasm) $(stylus_test_bfs:.b=.wasm) +stylus_test_wasms = $(stylus_test_keccak_wasm) $(stylus_test_keccak-100_wasm) $(stylus_test_fallible_wasm) $(stylus_test_storage_wasm) $(stylus_test_multicall_wasm) $(stylus_test_log_wasm) $(stylus_test_create_wasm) $(stylus_test_math_wasm) $(stylus_test_sdk-storage_wasm) $(stylus_test_erc20_wasm) $(stylus_test_read-return-data_wasm) $(stylus_test_evm-data_wasm) $(stylus_test_hostio-test_wasm) $(stylus_test_bfs:.b=.wasm) stylus_benchmarks = $(wildcard $(stylus_dir)/*.toml $(stylus_dir)/src/*.rs) $(stylus_test_wasms) # user targets @@ -482,6 +484,10 @@ $(stylus_test_erc20_wasm): $(stylus_test_erc20_src) $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) @touch -c $@ # cargo might decide to not rebuild the binary +$(stylus_test_hostio-test_wasm): $(stylus_test_hostio-test_src) + $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo) + @touch -c $@ # cargo might decide to not rebuild the binary + contracts/test/prover/proofs/float%.json: $(arbitrator_cases)/float%.wasm $(prover_bin) $(output_latest)/soft-float.wasm $(prover_bin) $< -l $(output_latest)/soft-float.wasm -o $@ -b --allow-hostapi --require-success diff --git a/arbitrator/Cargo.lock b/arbitrator/Cargo.lock index 6048733acb..2b437968fa 100644 --- a/arbitrator/Cargo.lock +++ b/arbitrator/Cargo.lock @@ -215,7 +215,6 @@ dependencies = [ "prover", "serde", "serde_json", - "serde_with 3.9.0", ] [[package]] @@ -711,38 +710,14 @@ dependencies = [ "typenum", ] -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -759,24 +734,13 @@ dependencies = [ "syn 2.0.72", ] -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.20.10", + "darling_core", "quote", "syn 2.0.72", ] @@ -934,7 +898,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242" dependencies = [ - "darling 0.20.10", + "darling", "proc-macro2", "quote", "syn 2.0.72", @@ -1756,7 +1720,7 @@ dependencies = [ "rustc-demangle", "serde", "serde_json", - "serde_with 1.14.0", + "serde_with", "sha2 0.9.9", "sha3 0.9.1", "smallvec", @@ -2079,16 +2043,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" version = "3.9.0" @@ -2103,29 +2057,17 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "serde_with_macros 3.9.0", + "serde_with_macros", "time", ] -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_with_macros" version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ - "darling 0.20.10", + "darling", "proc-macro2", "quote", "syn 2.0.72", @@ -2232,12 +2174,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" diff --git a/arbitrator/arbutil/src/types.rs b/arbitrator/arbutil/src/types.rs index 6cf1d6cdf7..722a89b81e 100644 --- a/arbitrator/arbutil/src/types.rs +++ b/arbitrator/arbutil/src/types.rs @@ -8,6 +8,7 @@ use std::{ borrow::Borrow, fmt, ops::{Deref, DerefMut}, + str::FromStr, }; // These values must be kept in sync with `arbutil/preimage_type.go`, @@ -83,6 +84,32 @@ impl From for Bytes32 { } } +impl FromStr for Bytes32 { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + // Remove the "0x" prefix if present + let s = s.strip_prefix("0x").unwrap_or(s); + + // Pad with leading zeros if the string is shorter than 64 characters (32 bytes) + let padded = format!("{:0>64}", s); + + // Decode the hex string using the hex crate + let decoded_bytes = hex::decode(padded).map_err(|_| "Invalid hex string")?; + + // Ensure the decoded bytes is exactly 32 bytes + if decoded_bytes.len() != 32 { + return Err("Hex string too long for Bytes32"); + } + + // Create a 32-byte array and fill it with the decoded bytes. + let mut b = [0u8; 32]; + b.copy_from_slice(&decoded_bytes); + + Ok(Bytes32(b)) + } +} + impl TryFrom<&[u8]> for Bytes32 { type Error = std::array::TryFromSliceError; @@ -249,3 +276,77 @@ impl From for Bytes20 { <[u8; 20]>::from(x).into() } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_bytes32() { + let b = Bytes32::from(0x12345678u32); + let expected = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x12, 0x34, 0x56, 0x78, + ]; + assert_eq!(b, Bytes32(expected)); + } + + #[test] + fn test_from_str_short() { + // Short hex string + let b = Bytes32::from_str("0x12345678").unwrap(); + let expected = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x12, 0x34, 0x56, 0x78, + ]; + assert_eq!(b, Bytes32(expected)); + } + + #[test] + fn test_from_str_very_short() { + // Short hex string + let b = Bytes32::from_str("0x1").unwrap(); + let expected = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0x1, + ]; + assert_eq!(b, Bytes32(expected)); + } + + #[test] + fn test_from_str_no_prefix() { + // Short hex string + let b = Bytes32::from_str("12345678").unwrap(); + let expected = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x12, 0x34, 0x56, 0x78, + ]; + assert_eq!(b, Bytes32(expected)); + } + + #[test] + fn test_from_str_full() { + // Full-length hex string + let b = + Bytes32::from_str("0x0000000000000000000000000000000000000000000000000000000012345678") + .unwrap(); + let expected = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x12, 0x34, 0x56, 0x78, + ]; + assert_eq!(b, Bytes32(expected)); + } + + #[test] + fn test_from_str_invalid_non_hex() { + let s = "0x123g5678"; // Invalid character 'g' + assert!(Bytes32::from_str(s).is_err()); + } + + #[test] + fn test_from_str_too_big() { + let s = + "0123456789ABCDEF0123456789ABCDEF01234567890123456789ABCDEF01234567890123456789ABCDEF0"; // 65 characters + assert!(Bytes32::from_str(s).is_err()); + } +} diff --git a/arbitrator/bench/Cargo.toml b/arbitrator/bench/Cargo.toml index 3ab5b99b08..74b948aca8 100644 --- a/arbitrator/bench/Cargo.toml +++ b/arbitrator/bench/Cargo.toml @@ -3,10 +3,6 @@ name = "bench" version = "0.1.0" edition = "2021" -[lib] -name = "bench" -path = "src/lib.rs" - [[bin]] name = "benchbin" path = "src/bin.rs" @@ -20,7 +16,6 @@ clap = { version = "4.4.8", features = ["derive"] } gperftools = { version = "0.2.0", optional = true } serde = { version = "1.0.130", features = ["derive", "rc"] } serde_json = "1.0.67" -serde_with = { version = "3.8.1", features = ["base64"] } [features] counters = [] diff --git a/arbitrator/bench/src/bin.rs b/arbitrator/bench/src/bin.rs index f7e69f5373..60a7036e2b 100644 --- a/arbitrator/bench/src/bin.rs +++ b/arbitrator/bench/src/bin.rs @@ -1,6 +1,5 @@ use std::{path::PathBuf, time::Duration}; -use bench::prepare::*; use clap::Parser; use eyre::bail; @@ -10,11 +9,12 @@ use gperftools::profiler::PROFILER; #[cfg(feature = "heapprof")] use gperftools::heap_profiler::HEAP_PROFILER; -use prover::machine::MachineStatus; - #[cfg(feature = "counters")] use prover::{machine, memory, merkle}; +use prover::machine::MachineStatus; +use prover::prepare::prepare_machine; + #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { diff --git a/arbitrator/bench/src/lib.rs b/arbitrator/bench/src/lib.rs deleted file mode 100644 index 5f7c024094..0000000000 --- a/arbitrator/bench/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod parse_input; -pub mod prepare; diff --git a/arbitrator/bench/src/parse_input.rs b/arbitrator/bench/src/parse_input.rs deleted file mode 100644 index decc67372a..0000000000 --- a/arbitrator/bench/src/parse_input.rs +++ /dev/null @@ -1,76 +0,0 @@ -use arbutil::Bytes32; -use serde::{Deserialize, Serialize}; -use serde_json; -use serde_with::base64::Base64; -use serde_with::As; -use serde_with::DisplayFromStr; -use std::{ - collections::HashMap, - io::{self, BufRead}, -}; - -mod prefixed_hex { - use serde::{self, Deserialize, Deserializer, Serializer}; - - pub fn serialize(bytes: &Vec, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&format!("0x{}", hex::encode(bytes))) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - if let Some(s) = s.strip_prefix("0x") { - hex::decode(s).map_err(serde::de::Error::custom) - } else { - Err(serde::de::Error::custom("missing 0x prefix")) - } - } -} - -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct PreimageMap(HashMap>); - -#[derive(Debug, Clone, Deserialize, Serialize)] -#[serde(rename_all = "PascalCase")] -pub struct BatchInfo { - pub number: u64, - #[serde(with = "As::")] - pub data_b64: Vec, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "PascalCase")] -pub struct StartState { - #[serde(with = "prefixed_hex")] - pub block_hash: Vec, - #[serde(with = "prefixed_hex")] - pub send_root: Vec, - pub batch: u64, - pub pos_in_batch: u64, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "PascalCase")] -pub struct FileData { - pub id: u64, - pub has_delayed_msg: bool, - pub delayed_msg_nr: u64, - #[serde(with = "As::>>")] - pub preimages_b64: HashMap>>, - pub batch_info: Vec, - #[serde(with = "As::")] - pub delayed_msg_b64: Vec, - pub start_state: StartState, -} - -impl FileData { - pub fn from_reader(mut reader: R) -> io::Result { - let data = serde_json::from_reader(&mut reader)?; - Ok(data) - } -} diff --git a/arbitrator/prover/Cargo.toml b/arbitrator/prover/Cargo.toml index 5475647765..da329b1cb5 100644 --- a/arbitrator/prover/Cargo.toml +++ b/arbitrator/prover/Cargo.toml @@ -19,10 +19,10 @@ num = "0.4" rustc-demangle = "0.1.21" serde = { version = "1.0.130", features = ["derive", "rc"] } serde_json = "1.0.67" +serde_with = { version = "3.8.1", features = ["base64"] } sha3 = "0.9.1" static_assertions = "1.1.0" structopt = "0.3.23" -serde_with = "1.12.1" parking_lot = "0.12.1" lazy_static.workspace = true itertools = "0.10.5" diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 0f537478eb..08473c2598 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -11,6 +11,8 @@ pub mod machine; /// cbindgen:ignore pub mod memory; pub mod merkle; +pub mod parse_input; +pub mod prepare; mod print; pub mod programs; mod reinterpret; diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index dba32e0e72..a889cc60f3 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -8,6 +8,7 @@ use eyre::{eyre, Context, Result}; use fnv::{FnvHashMap as HashMap, FnvHashSet as HashSet}; use prover::{ machine::{GlobalState, InboxIdentifier, Machine, MachineStatus, PreimageResolver, ProofInfo}, + prepare::prepare_machine, utils::{file_bytes, hash_preimage, CBytes}, wavm::Opcode, }; @@ -86,6 +87,10 @@ struct Opts { skip_until_host_io: bool, #[structopt(long)] max_steps: Option, + // JSON inputs supercede any of the command-line inputs which could + // be specified in the JSON file. + #[structopt(long)] + json_inputs: Option, } fn file_with_stub_header(path: &Path, headerlength: usize) -> Result> { @@ -135,83 +140,8 @@ fn main() -> Result<()> { } } } - let mut inbox_contents = HashMap::default(); - let mut inbox_position = opts.inbox_position; - let mut delayed_position = opts.delayed_inbox_position; - let inbox_header_len; - let delayed_header_len; - if opts.inbox_add_stub_headers { - inbox_header_len = INBOX_HEADER_LEN; - delayed_header_len = DELAYED_HEADER_LEN + 1; - } else { - inbox_header_len = 0; - delayed_header_len = 0; - } - - for path in opts.inbox { - inbox_contents.insert( - (InboxIdentifier::Sequencer, inbox_position), - file_with_stub_header(&path, inbox_header_len)?, - ); - println!("read file {:?} to seq. inbox {}", &path, inbox_position); - inbox_position += 1; - } - for path in opts.delayed_inbox { - inbox_contents.insert( - (InboxIdentifier::Delayed, delayed_position), - file_with_stub_header(&path, delayed_header_len)?, - ); - delayed_position += 1; - } - let mut preimages: HashMap> = HashMap::default(); - if let Some(path) = opts.preimages { - let mut file = BufReader::new(File::open(path)?); - loop { - let mut ty_buf = [0u8; 1]; - match file.read_exact(&mut ty_buf) { - Ok(()) => {} - Err(e) if e.kind() == ErrorKind::UnexpectedEof => break, - Err(e) => return Err(e.into()), - } - let preimage_ty: PreimageType = ty_buf[0].try_into()?; - - let mut size_buf = [0u8; 8]; - file.read_exact(&mut size_buf)?; - let size = u64::from_le_bytes(size_buf) as usize; - let mut buf = vec![0u8; size]; - file.read_exact(&mut buf)?; - - let hash = hash_preimage(&buf, preimage_ty)?; - preimages - .entry(preimage_ty) - .or_default() - .insert(hash.into(), buf.as_slice().into()); - } - } - let preimage_resolver = - Arc::new(move |_, ty, hash| preimages.get(&ty).and_then(|m| m.get(&hash)).cloned()) - as PreimageResolver; - - let last_block_hash = decode_hex_arg(&opts.last_block_hash, "--last-block-hash")?; - let last_send_root = decode_hex_arg(&opts.last_send_root, "--last-send-root")?; - - let global_state = GlobalState { - u64_vals: [opts.inbox_position, opts.position_within_message], - bytes32_vals: [last_block_hash, last_send_root], - }; - - let mut mach = Machine::from_paths( - &opts.libraries, - &opts.binary, - true, - opts.allow_hostapi, - opts.debug_funcs, - true, - global_state, - inbox_contents, - preimage_resolver, - )?; + let mut mach = initialize_machine(&opts)?; for path in &opts.stylus_modules { let err = || eyre!("failed to read module at {}", path.to_string_lossy().red()); @@ -414,6 +344,13 @@ fn main() -> Result<()> { }); } + println!( + "End GlobalState:\n BlockHash: {:?}\n SendRoot: {:?}\n Batch: {}\n PosInBatch: {}", + mach.get_global_state().bytes32_vals[0], + mach.get_global_state().bytes32_vals[1], + mach.get_global_state().u64_vals[0], + mach.get_global_state().u64_vals[1] + ); println!("End machine status: {:?}", mach.get_status()); println!("End machine hash: {}", mach.hash()); println!("End machine stack: {:?}", mach.get_data_stack()); @@ -462,7 +399,6 @@ fn main() -> Result<()> { } } } - let opts_binary = opts.binary; let opts_libraries = opts.libraries; let format_pc = |module_num: usize, func_num: usize| -> (String, String) { @@ -543,3 +479,87 @@ fn main() -> Result<()> { } Ok(()) } + +fn initialize_machine(opts: &Opts) -> eyre::Result { + if let Some(json_inputs) = opts.json_inputs.clone() { + prepare_machine(json_inputs, opts.binary.clone()) + } else { + let mut inbox_contents = HashMap::default(); + let mut inbox_position = opts.inbox_position; + let mut delayed_position = opts.delayed_inbox_position; + let inbox_header_len; + let delayed_header_len; + if opts.inbox_add_stub_headers { + inbox_header_len = INBOX_HEADER_LEN; + delayed_header_len = DELAYED_HEADER_LEN + 1; + } else { + inbox_header_len = 0; + delayed_header_len = 0; + } + + for path in opts.inbox.clone() { + inbox_contents.insert( + (InboxIdentifier::Sequencer, inbox_position), + file_with_stub_header(&path, inbox_header_len)?, + ); + println!("read file {:?} to seq. inbox {}", &path, inbox_position); + inbox_position += 1; + } + for path in opts.delayed_inbox.clone() { + inbox_contents.insert( + (InboxIdentifier::Delayed, delayed_position), + file_with_stub_header(&path, delayed_header_len)?, + ); + delayed_position += 1; + } + + let mut preimages: HashMap> = HashMap::default(); + if let Some(path) = opts.preimages.clone() { + let mut file = BufReader::new(File::open(path)?); + loop { + let mut ty_buf = [0u8; 1]; + match file.read_exact(&mut ty_buf) { + Ok(()) => {} + Err(e) if e.kind() == ErrorKind::UnexpectedEof => break, + Err(e) => return Err(e.into()), + } + let preimage_ty: PreimageType = ty_buf[0].try_into()?; + + let mut size_buf = [0u8; 8]; + file.read_exact(&mut size_buf)?; + let size = u64::from_le_bytes(size_buf) as usize; + let mut buf = vec![0u8; size]; + file.read_exact(&mut buf)?; + + let hash = hash_preimage(&buf, preimage_ty)?; + preimages + .entry(preimage_ty) + .or_default() + .insert(hash.into(), buf.as_slice().into()); + } + } + let preimage_resolver = + Arc::new(move |_, ty, hash| preimages.get(&ty).and_then(|m| m.get(&hash)).cloned()) + as PreimageResolver; + + let last_block_hash = decode_hex_arg(&opts.last_block_hash, "--last-block-hash")?; + let last_send_root = decode_hex_arg(&opts.last_send_root, "--last-send-root")?; + + let global_state = GlobalState { + u64_vals: [opts.inbox_position, opts.position_within_message], + bytes32_vals: [last_block_hash, last_send_root], + }; + + Machine::from_paths( + &opts.libraries, + &opts.binary, + true, + opts.allow_hostapi, + opts.debug_funcs, + true, + global_state, + inbox_contents, + preimage_resolver, + ) + } +} diff --git a/arbitrator/prover/src/parse_input.rs b/arbitrator/prover/src/parse_input.rs new file mode 100644 index 0000000000..fa7adb4c41 --- /dev/null +++ b/arbitrator/prover/src/parse_input.rs @@ -0,0 +1,112 @@ +use arbutil::Bytes32; +use serde::Deserialize; +use serde_json; +use serde_with::base64::Base64; +use serde_with::As; +use serde_with::DisplayFromStr; +use std::{ + collections::HashMap, + io::{self, BufRead}, +}; + +/// prefixed_hex deserializes hex strings which are prefixed with `0x` +/// +/// The default hex deserializer does not support prefixed hex strings. +/// +/// It is an error to use this deserializer on a string that does not +/// begin with `0x`. +mod prefixed_hex { + use serde::{self, Deserialize, Deserializer}; + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + if let Some(s) = s.strip_prefix("0x") { + hex::decode(s).map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom("missing 0x prefix")) + } + } +} + +#[derive(Debug)] +pub struct UserWasm(Vec); + +/// UserWasm is a wrapper around Vec +/// +/// It is useful for decompressing a brotli-compressed wasm module. +/// +/// Note: The wrapped Vec is already Base64 decoded before +/// from(Vec) is called by serde. +impl UserWasm { + /// as_vec returns the decompressed wasm module as a Vec + pub fn as_vec(&self) -> Vec { + self.0.clone() + } +} + +impl AsRef<[u8]> for UserWasm { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// The Vec is compressed using brotli, and must be decompressed before use. +impl From> for UserWasm { + fn from(data: Vec) -> Self { + let decompressed = brotli::decompress(&data, brotli::Dictionary::Empty).unwrap(); + Self(decompressed) + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct BatchInfo { + pub number: u64, + #[serde(with = "As::")] + pub data_b64: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct StartState { + #[serde(with = "prefixed_hex")] + pub block_hash: Vec, + #[serde(with = "prefixed_hex")] + pub send_root: Vec, + pub batch: u64, + pub pos_in_batch: u64, +} + +/// FileData is the deserialized form of the input JSON file. +/// +/// The go JSON library in json.go uses some custom serialization and +/// compression logic that needs to be reversed when deserializing the +/// JSON in rust. +/// +/// Note: It is important to change this file whenever the go JSON +/// serialization changes. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct FileData { + pub id: u64, + pub has_delayed_msg: bool, + pub delayed_msg_nr: u64, + #[serde(with = "As::>>")] + pub preimages_b64: HashMap>>, + pub batch_info: Vec, + #[serde(with = "As::")] + pub delayed_msg_b64: Vec, + pub start_state: StartState, + #[serde(with = "As::>>")] + pub user_wasms: HashMap>, +} + +impl FileData { + pub fn from_reader(mut reader: R) -> io::Result { + let data = serde_json::from_reader(&mut reader)?; + Ok(data) + } +} diff --git a/arbitrator/bench/src/prepare.rs b/arbitrator/prover/src/prepare.rs similarity index 85% rename from arbitrator/bench/src/prepare.rs rename to arbitrator/prover/src/prepare.rs index 741a7350ac..a485267f39 100644 --- a/arbitrator/bench/src/prepare.rs +++ b/arbitrator/prover/src/prepare.rs @@ -1,13 +1,13 @@ use arbutil::{Bytes32, PreimageType}; -use prover::machine::{argument_data_to_inbox, GlobalState, Machine}; -use prover::utils::CBytes; use std::collections::HashMap; use std::fs::File; use std::io::BufReader; use std::path::{Path, PathBuf}; use std::sync::Arc; +use crate::machine::{argument_data_to_inbox, GlobalState, Machine}; use crate::parse_input::*; +use crate::utils::CBytes; pub fn prepare_machine(preimages: PathBuf, machines: PathBuf) -> eyre::Result { let file = File::open(preimages)?; @@ -40,6 +40,15 @@ pub fn prepare_machine(preimages: PathBuf, machines: PathBuf) -> eyre::Result = std::result::Result>; + +// These are not available as hostios in the sdk, so we import them directly. +#[link(wasm_import_module = "vm_hooks")] +extern "C" { + fn math_div(value: *mut u8, divisor: *const u8); + fn math_mod(value: *mut u8, modulus: *const u8); + fn math_pow(value: *mut u8, exponent: *const u8); + fn math_add_mod(value: *mut u8, addend: *const u8, modulus: *const u8); + fn math_mul_mod(value: *mut u8, multiplier: *const u8, modulus: *const u8); + fn transient_load_bytes32(key: *const u8, dest: *mut u8); + fn transient_store_bytes32(key: *const u8, value: *const u8); + fn exit_early(status: u32); +} + +#[external] +impl HostioTest { + fn exit_early() -> Result<()> { + unsafe { + exit_early(0); + } + Ok(()) + } + + fn transient_load_bytes32(key: B256) -> Result { + let mut result = B256::ZERO; + unsafe { + transient_load_bytes32(key.as_ptr(), result.as_mut_ptr()); + } + Ok(result) + } + + fn transient_store_bytes32(key: B256, value: B256) { + unsafe { + transient_store_bytes32(key.as_ptr(), value.as_ptr()); + } + } + + fn return_data_size() -> Result { + unsafe { Ok(hostio::return_data_size().try_into().unwrap()) } + } + + fn emit_log(data: Bytes, n: i8, t1: B256, t2: B256, t3: B256, t4: B256) -> Result<()> { + let topics = &[t1, t2, t3, t4]; + evm::raw_log(&topics[0..n as usize], data.as_slice())?; + Ok(()) + } + + fn account_balance(account: Address) -> Result { + Ok(account.balance()) + } + + fn account_code(account: Address) -> Result> { + let mut size = 10000; + let mut code = vec![0; size]; + unsafe { + size = hostio::account_code(account.as_ptr(), 0, size, code.as_mut_ptr()); + } + code.resize(size, 0); + Ok(code) + } + + fn account_code_size(account: Address) -> Result { + Ok(account.code_size().try_into().unwrap()) + } + + fn account_codehash(account: Address) -> Result { + Ok(account.codehash()) + } + + fn evm_gas_left() -> Result { + Ok(evm::gas_left().try_into().unwrap()) + } + + fn evm_ink_left() -> Result { + Ok(tx::ink_to_gas(evm::ink_left()).try_into().unwrap()) + } + + fn block_basefee() -> Result { + Ok(block::basefee()) + } + + fn chainid() -> Result { + Ok(block::chainid().try_into().unwrap()) + } + + fn block_coinbase() -> Result
{ + Ok(block::coinbase()) + } + + fn block_gas_limit() -> Result { + Ok(block::gas_limit().try_into().unwrap()) + } + + fn block_number() -> Result { + Ok(block::number().try_into().unwrap()) + } + + fn block_timestamp() -> Result { + Ok(block::timestamp().try_into().unwrap()) + } + + fn contract_address() -> Result
{ + Ok(contract::address()) + } + + fn math_div(a: U256, b: U256) -> Result { + let mut a_bytes: B256 = a.into(); + let b_bytes: B256 = b.into(); + unsafe { + math_div(a_bytes.as_mut_ptr(), b_bytes.as_ptr()); + } + Ok(a_bytes.into()) + } + + fn math_mod(a: U256, b: U256) -> Result { + let mut a_bytes: B256 = a.into(); + let b_bytes: B256 = b.into(); + unsafe { + math_mod(a_bytes.as_mut_ptr(), b_bytes.as_ptr()); + } + Ok(a_bytes.into()) + } + + fn math_pow(a: U256, b: U256) -> Result { + let mut a_bytes: B256 = a.into(); + let b_bytes: B256 = b.into(); + unsafe { + math_pow(a_bytes.as_mut_ptr(), b_bytes.as_ptr()); + } + Ok(a_bytes.into()) + } + + fn math_add_mod(a: U256, b: U256, c: U256) -> Result { + let mut a_bytes: B256 = a.into(); + let b_bytes: B256 = b.into(); + let c_bytes: B256 = c.into(); + unsafe { + math_add_mod(a_bytes.as_mut_ptr(), b_bytes.as_ptr(), c_bytes.as_ptr()); + } + Ok(a_bytes.into()) + } + + fn math_mul_mod(a: U256, b: U256, c: U256) -> Result { + let mut a_bytes: B256 = a.into(); + let b_bytes: B256 = b.into(); + let c_bytes: B256 = c.into(); + unsafe { + math_mul_mod(a_bytes.as_mut_ptr(), b_bytes.as_ptr(), c_bytes.as_ptr()); + } + Ok(a_bytes.into()) + } + + fn msg_sender() -> Result
{ + Ok(msg::sender()) + } + + fn msg_value() -> Result { + Ok(msg::value()) + } + + fn keccak(preimage: Bytes) -> Result { + let mut result = B256::ZERO; + unsafe { + hostio::native_keccak256(preimage.as_ptr(), preimage.len(), result.as_mut_ptr()); + } + Ok(result) + } + + fn tx_gas_price() -> Result { + Ok(tx::gas_price()) + } + + fn tx_ink_price() -> Result { + Ok(tx::ink_to_gas(tx::ink_price().into()).try_into().unwrap()) + } + + fn tx_origin() -> Result
{ + Ok(tx::origin()) + } +} diff --git a/arbitrator/wasm-libraries/Cargo.lock b/arbitrator/wasm-libraries/Cargo.lock index 7620ff538b..a5a066e5c9 100644 --- a/arbitrator/wasm-libraries/Cargo.lock +++ b/arbitrator/wasm-libraries/Cargo.lock @@ -31,6 +31,21 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -91,6 +106,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "bincode" version = "1.3.3" @@ -203,6 +224,15 @@ dependencies = [ "rand_pcg", ] +[[package]] +name = "cc" +version = "1.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9540e661f81799159abee814118cc139a2004b3a3aa3ea37724a1b66530b90e0" +dependencies = [ + "shlex", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -215,6 +245,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-targets", +] + [[package]] name = "clap" version = "2.34.0" @@ -236,6 +279,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + [[package]] name = "cpufeatures" version = "0.2.12" @@ -261,38 +310,14 @@ dependencies = [ "typenum", ] -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -305,29 +330,29 @@ dependencies = [ "ident_case", "proc-macro2", "quote", + "strsim 0.11.1", "syn 2.0.72", ] [[package]] name = "darling_macro" -version = "0.13.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.13.4", + "darling_core", "quote", - "syn 1.0.109", + "syn 2.0.72", ] [[package]] -name = "darling_macro" -version = "0.20.10" +name = "deranged" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ - "darling_core 0.20.10", - "quote", - "syn 2.0.72", + "powerfmt", + "serde", ] [[package]] @@ -434,7 +459,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242" dependencies = [ - "darling 0.20.10", + "darling", "proc-macro2", "quote", "syn 2.0.72", @@ -548,6 +573,29 @@ dependencies = [ "caller-env", ] +[[package]] +name = "iana-time-zone" +version = "0.1.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -568,6 +616,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] @@ -578,6 +627,7 @@ checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", "hashbrown 0.14.5", + "serde", ] [[package]] @@ -595,6 +645,15 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + [[package]] name = "keccak" version = "0.1.5" @@ -632,6 +691,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + [[package]] name = "lru" version = "0.12.4" @@ -719,6 +784,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" version = "0.4.2" @@ -832,6 +903,12 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -1115,24 +1192,32 @@ dependencies = [ [[package]] name = "serde_with" -version = "1.14.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.3.0", "serde", + "serde_derive", + "serde_json", "serde_with_macros", + "time", ] [[package]] name = "serde_with_macros" -version = "1.5.2" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.72", ] [[package]] @@ -1181,6 +1266,12 @@ dependencies = [ "keccak", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "simdutf8" version = "0.1.4" @@ -1216,9 +1307,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "structopt" @@ -1307,6 +1398,37 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -1445,6 +1567,61 @@ dependencies = [ "wee_alloc", ] +[[package]] +name = "wasm-bindgen" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.72", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" + [[package]] name = "wasm-encoder" version = "0.215.0" @@ -1535,6 +1712,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-targets" version = "0.52.6" diff --git a/arbnode/api.go b/arbnode/api.go index 228ad51cf8..2dabd41bff 100644 --- a/arbnode/api.go +++ b/arbnode/api.go @@ -7,9 +7,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethdb" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" + "github.com/offchainlabs/nitro/validator/server_api" ) type BlockValidatorAPI struct { @@ -54,3 +56,8 @@ func (a *BlockValidatorDebugAPI) ValidateMessageNumber( result.Valid = valid return result, err } + +func (a *BlockValidatorDebugAPI) ValidationInputsAt(ctx context.Context, msgNum hexutil.Uint64, target ethdb.WasmTarget, +) (server_api.InputJSON, error) { + return a.val.ValidationInputsAt(ctx, arbutil.MessageIndex(msgNum), target) +} diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index c2c94b8c10..373d247696 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -38,7 +39,6 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" @@ -69,7 +69,7 @@ var ( type DataPoster struct { stopwaiter.StopWaiter headerReader *headerreader.HeaderReader - client arbutil.L1Interface + client *ethclient.Client auth *bind.TransactOpts signer signerFn config ConfigFetcher diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index d2c49427be..7bf0f86e6f 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -2,17 +2,18 @@ package dataposter import ( "context" + "errors" "fmt" "math/big" "testing" "time" "github.com/Knetic/govaluate" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" @@ -152,46 +153,36 @@ func TestMaxFeeCapFormulaCalculation(t *testing.T) { } } -type stubL1Client struct { +type stubL1ClientInner struct { senderNonce uint64 suggestedGasTipCap *big.Int - - // Define most of the required methods that aren't used by feeAndTipCaps - backends.SimulatedBackend -} - -func (c *stubL1Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { - return c.senderNonce, nil -} - -func (c *stubL1Client) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - return c.suggestedGasTipCap, nil -} - -// Not used but we need to define -func (c *stubL1Client) BlockNumber(ctx context.Context) (uint64, error) { - return 0, nil -} - -func (c *stubL1Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) { - return []byte{}, nil } -func (c *stubL1Client) CodeAtHash(ctx context.Context, address common.Address, blockHash common.Hash) ([]byte, error) { - return []byte{}, nil +func (c *stubL1ClientInner) CallContext(ctx_in context.Context, result interface{}, method string, args ...interface{}) error { + switch method { + case "eth_getTransactionCount": + ptr, ok := result.(*hexutil.Uint64) + if !ok { + return errors.New("result is not a *hexutil.Uint64") + } + *ptr = hexutil.Uint64(c.senderNonce) + case "eth_maxPriorityFeePerGas": + ptr, ok := result.(*hexutil.Big) + if !ok { + return errors.New("result is not a *hexutil.Big") + } + *ptr = hexutil.Big(*c.suggestedGasTipCap) + } + return nil } -func (c *stubL1Client) ChainID(ctx context.Context) (*big.Int, error) { +func (c *stubL1ClientInner) EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*rpc.ClientSubscription, error) { return nil, nil } - -func (c *stubL1Client) Client() rpc.ClientInterface { +func (c *stubL1ClientInner) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { return nil } - -func (c *stubL1Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) { - return common.Address{}, nil -} +func (c *stubL1ClientInner) Close() {} func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T) { conf := func() *DataPosterConfig { @@ -223,10 +214,10 @@ func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T extraBacklog: func() uint64 { return 0 }, balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)), usingNoOpStorage: false, - client: &stubL1Client{ + client: ethclient.NewClient(&stubL1ClientInner{ senderNonce: 1, suggestedGasTipCap: big.NewInt(2 * params.GWei), - }, + }), auth: &bind.TransactOpts{ From: common.Address{}, }, @@ -354,10 +345,10 @@ func TestFeeAndTipCaps_RBF_RisingBlobFee_FallingBaseFee(t *testing.T) { extraBacklog: func() uint64 { return 0 }, balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)), usingNoOpStorage: false, - client: &stubL1Client{ + client: ethclient.NewClient(&stubL1ClientInner{ senderNonce: 1, suggestedGasTipCap: big.NewInt(2 * params.GWei), - }, + }), auth: &bind.TransactOpts{ From: common.Address{}, }, diff --git a/arbnode/delayed.go b/arbnode/delayed.go index 082f0ecf9d..354fa671b3 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" @@ -58,11 +59,11 @@ type DelayedBridge struct { con *bridgegen.IBridge address common.Address fromBlock uint64 - client arbutil.L1Interface + client *ethclient.Client messageProviders map[common.Address]*bridgegen.IDelayedMessageProvider } -func NewDelayedBridge(client arbutil.L1Interface, addr common.Address, fromBlock uint64) (*DelayedBridge, error) { +func NewDelayedBridge(client *ethclient.Client, addr common.Address, fromBlock uint64) (*DelayedBridge, error) { con, err := bridgegen.NewIBridge(addr, client) if err != nil { return nil, err diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index c596cfa9b0..98104b2ea7 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -14,6 +14,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -93,7 +94,7 @@ type InboxReader struct { delayedBridge *DelayedBridge sequencerInbox *SequencerInbox caughtUpChan chan struct{} - client arbutil.L1Interface + client *ethclient.Client l1Reader *headerreader.HeaderReader // Atomic @@ -101,7 +102,7 @@ type InboxReader struct { lastReadBatchCount atomic.Uint64 } -func NewInboxReader(tracker *InboxTracker, client arbutil.L1Interface, l1Reader *headerreader.HeaderReader, firstMessageBlock *big.Int, delayedBridge *DelayedBridge, sequencerInbox *SequencerInbox, config InboxReaderConfigFetcher) (*InboxReader, error) { +func NewInboxReader(tracker *InboxTracker, client *ethclient.Client, l1Reader *headerreader.HeaderReader, firstMessageBlock *big.Int, delayedBridge *DelayedBridge, sequencerInbox *SequencerInbox, config InboxReaderConfigFetcher) (*InboxReader, error) { err := config().Validate() if err != nil { return nil, err diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index fe4149c80e..7686fe413f 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -13,6 +13,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -599,7 +600,7 @@ type multiplexerBackend struct { positionWithinMessage uint64 ctx context.Context - client arbutil.L1Interface + client *ethclient.Client inbox *InboxTracker } @@ -639,7 +640,7 @@ func (b *multiplexerBackend) ReadDelayedInbox(seqNum uint64) (*arbostypes.L1Inco var delayedMessagesMismatch = errors.New("sequencer batch delayed messages missing or different") -func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L1Interface, batches []*SequencerInboxBatch) error { +func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client *ethclient.Client, batches []*SequencerInboxBatch) error { var nextAcc common.Hash var prevbatchmeta BatchMetadata sequenceNumberToKeep := uint64(0) diff --git a/arbnode/node.go b/arbnode/node.go index a9da4ea24b..c5b3bbe071 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -407,7 +408,7 @@ func createNodeImpl( arbDb ethdb.Database, configFetcher ConfigFetcher, l2Config *params.ChainConfig, - l1client arbutil.L1Interface, + l1client *ethclient.Client, deployInfo *chaininfo.RollupAddresses, txOptsValidator *bind.TransactOpts, txOptsBatchPoster *bind.TransactOpts, @@ -781,7 +782,7 @@ func CreateNode( arbDb ethdb.Database, configFetcher ConfigFetcher, l2Config *params.ChainConfig, - l1client arbutil.L1Interface, + l1client *ethclient.Client, deployInfo *chaininfo.RollupAddresses, txOptsValidator *bind.TransactOpts, txOptsBatchPoster *bind.TransactOpts, diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index 73e52ded53..81146ed46e 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" @@ -52,10 +53,10 @@ type SequencerInbox struct { con *bridgegen.SequencerInbox address common.Address fromBlock int64 - client arbutil.L1Interface + client *ethclient.Client } -func NewSequencerInbox(client arbutil.L1Interface, addr common.Address, fromBlock int64) (*SequencerInbox, error) { +func NewSequencerInbox(client *ethclient.Client, addr common.Address, fromBlock int64) (*SequencerInbox, error) { con, err := bridgegen.NewSequencerInbox(addr, client) if err != nil { return nil, err @@ -111,7 +112,7 @@ type SequencerInboxBatch struct { serialized []byte // nil if serialization isn't cached yet } -func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbutil.L1Interface) ([]byte, error) { +func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client *ethclient.Client) ([]byte, error) { switch m.dataLocation { case batchDataTxInput: data, err := arbutil.GetLogEmitterTxData(ctx, client, m.rawLog) @@ -169,7 +170,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut } } -func (m *SequencerInboxBatch) Serialize(ctx context.Context, client arbutil.L1Interface) ([]byte, error) { +func (m *SequencerInboxBatch) Serialize(ctx context.Context, client *ethclient.Client) ([]byte, error) { if m.serialized != nil { return m.serialized, nil } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 24a0564b97..38b1c003db 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -1140,7 +1140,7 @@ func (s *TransactionStreamer) storeResult( // exposed for testing // return value: true if should be called again immediately -func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution.ExecutionSequencer) bool { +func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context) bool { if ctx.Err() != nil { return false } @@ -1212,7 +1212,7 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution } func (s *TransactionStreamer) executeMessages(ctx context.Context, ignored struct{}) time.Duration { - if s.ExecuteNextMsg(ctx, s.exec) { + if s.ExecuteNextMsg(ctx) { return 0 } return s.config().ExecuteMessageLoopDelay diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index b08c7c5d30..d6c35339f6 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -532,6 +532,20 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { refund := func(refundFrom common.Address, amount *big.Int) { const errLog = "fee address doesn't have enough funds to give user refund" + logMissingRefund := func(err error) { + if !errors.Is(err, vm.ErrInsufficientBalance) { + log.Error("unexpected error refunding balance", "err", err, "feeAddress", refundFrom) + return + } + logLevel := log.Error + isContract := p.evm.StateDB.GetCodeSize(refundFrom) > 0 + if isContract { + // It's expected that the balance might not still be in this address if it's a contract. + logLevel = log.Debug + } + logLevel(errLog, "err", err, "feeAddress", refundFrom) + } + // Refund funds to the fee refund address without overdrafting the L1 deposit. toRefundAddr := takeFunds(maxRefund, amount) err = util.TransferBalance(&refundFrom, &inner.RefundTo, toRefundAddr, p.evm, scenario, "refund") @@ -539,13 +553,13 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { // Normally the network fee address should be holding any collected fees. // However, in theory, they could've been transferred out during the redeem attempt. // If the network fee address doesn't have the necessary balance, log an error and don't give a refund. - log.Error(errLog, "err", err, "feeAddress", refundFrom) + logMissingRefund(err) } // Any extra refund can't be given to the fee refund address if it didn't come from the L1 deposit. // Instead, give the refund to the retryable from address. err = util.TransferBalance(&refundFrom, &inner.From, arbmath.BigSub(amount, toRefundAddr), p.evm, scenario, "refund") if err != nil { - log.Error(errLog, "err", err, "feeAddress", refundFrom) + logMissingRefund(err) } } diff --git a/arbos/util/storage_cache.go b/arbos/util/storage_cache.go index bf05a5824d..9573d1ffc7 100644 --- a/arbos/util/storage_cache.go +++ b/arbos/util/storage_cache.go @@ -5,6 +5,7 @@ package util import ( "github.com/ethereum/go-ethereum/common" + "slices" ) type storageCacheEntry struct { @@ -67,6 +68,10 @@ func (s *storageCache) Flush() []storageCacheStores { }) } } + sortFunc := func(a, b storageCacheStores) int { + return a.Key.Cmp(b.Key) + } + slices.SortFunc(stores, sortFunc) return stores } diff --git a/arbos/util/storage_cache_test.go b/arbos/util/storage_cache_test.go index 1cc4ea14ec..9fd452851d 100644 --- a/arbos/util/storage_cache_test.go +++ b/arbos/util/storage_cache_test.go @@ -4,7 +4,6 @@ package util import ( - "bytes" "slices" "testing" @@ -76,7 +75,7 @@ func TestStorageCache(t *testing.T) { {Key: keys[2], Value: values[2]}, } sortFunc := func(a, b storageCacheStores) int { - return bytes.Compare(a.Key.Bytes(), b.Key.Bytes()) + return a.Key.Cmp(b.Key) } slices.SortFunc(stores, sortFunc) slices.SortFunc(expected, sortFunc) diff --git a/arbutil/correspondingl1blocknumber.go b/arbutil/correspondingl1blocknumber.go index d654e471e2..c8770e2034 100644 --- a/arbutil/correspondingl1blocknumber.go +++ b/arbutil/correspondingl1blocknumber.go @@ -19,7 +19,11 @@ func ParentHeaderToL1BlockNumber(header *types.Header) uint64 { return header.Number.Uint64() } -func CorrespondingL1BlockNumber(ctx context.Context, client L1Interface, parentBlockNumber uint64) (uint64, error) { +type ParentHeaderFetcher interface { + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) +} + +func CorrespondingL1BlockNumber(ctx context.Context, client ParentHeaderFetcher, parentBlockNumber uint64) (uint64, error) { // #nosec G115 header, err := client.HeaderByNumber(ctx, big.NewInt(int64(parentBlockNumber))) if err != nil { diff --git a/arbutil/transaction_data.go b/arbutil/transaction_data.go index 8270a628bd..c5728967c7 100644 --- a/arbutil/transaction_data.go +++ b/arbutil/transaction_data.go @@ -8,9 +8,10 @@ import ( "fmt" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" ) -func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) (*types.Transaction, error) { +func GetLogTransaction(ctx context.Context, client *ethclient.Client, log types.Log) (*types.Transaction, error) { tx, err := client.TransactionInBlock(ctx, log.BlockHash, log.TxIndex) if err != nil { return nil, err @@ -22,7 +23,7 @@ func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) ( } // GetLogEmitterTxData requires that the tx's data is at least 4 bytes long -func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { +func GetLogEmitterTxData(ctx context.Context, client *ethclient.Client, log types.Log) ([]byte, error) { tx, err := GetLogTransaction(ctx, client, log) if err != nil { return nil, err diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index 4b4819156d..80dd356b24 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -10,27 +10,13 @@ import ( "math/big" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/ethclient" ) -type L1Interface interface { - bind.ContractBackend - bind.BlockHashContractCaller - ethereum.ChainReader - ethereum.ChainStateReader - ethereum.TransactionReader - TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) - BlockNumber(ctx context.Context) (uint64, error) - PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) - ChainID(ctx context.Context) (*big.Int, error) - Client() rpc.ClientInterface -} - -func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) { +func SendTxAsCall(ctx context.Context, client *ethclient.Client, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) { var gas uint64 if unlimitedGas { gas = 0 @@ -50,7 +36,7 @@ func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction return client.CallContract(ctx, callMsg, blockNum) } -func GetPendingCallBlockNumber(ctx context.Context, client L1Interface) (*big.Int, error) { +func GetPendingCallBlockNumber(ctx context.Context, client *ethclient.Client) (*big.Int, error) { msg := ethereum.CallMsg{ // Pretend to be a contract deployment to execute EVM code without calling a contract. To: nil, @@ -70,7 +56,7 @@ func GetPendingCallBlockNumber(ctx context.Context, client L1Interface) (*big.In return new(big.Int).SetBytes(callRes), nil } -func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transaction, txRes *types.Receipt) error { +func DetailTxError(ctx context.Context, client *ethclient.Client, tx *types.Transaction, txRes *types.Receipt) error { // Re-execute the transaction as a call to get a better error if ctx.Err() != nil { return ctx.Err() @@ -96,7 +82,7 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash()) } -func DetailTxErrorUsingCallMsg(ctx context.Context, client L1Interface, txHash common.Hash, txRes *types.Receipt, callMsg ethereum.CallMsg) error { +func DetailTxErrorUsingCallMsg(ctx context.Context, client *ethclient.Client, txHash common.Hash, txRes *types.Receipt, callMsg ethereum.CallMsg) error { // Re-execute the transaction as a call to get a better error if ctx.Err() != nil { return ctx.Err() diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 7d27c57fe9..4e97ca8cd0 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -280,6 +280,18 @@ func (bc *BroadcastClient) connect(ctx context.Context, nextSeqNum arbutil.Messa MinVersion: tls.VersionTLS12, }, Extensions: extensions, + NetDial: func(ctx context.Context, network, addr string) (net.Conn, error) { + var netDialer net.Dialer + // For tcp connections, prefer IPv4 over IPv6 to avoid rate limiting issues + if network == "tcp" { + conn, err := netDialer.DialContext(ctx, "tcp4", addr) + if err == nil { + return conn, nil + } + return netDialer.DialContext(ctx, "tcp6", addr) + } + return netDialer.DialContext(ctx, network, addr) + }, } if bc.isShuttingDown() { diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 07c74cb802..9e3ecec747 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -37,7 +38,6 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/ipfshelper" @@ -560,7 +560,7 @@ func rebuildLocalWasm(ctx context.Context, config *gethexec.Config, l2BlockChain return chainDb, l2BlockChain, nil } -func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, targetConfig *gethexec.StylusTargetConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { +func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, targetConfig *gethexec.StylusTargetConfig, persistentConfig *conf.PersistentConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, config.Persistent.Ancient, "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions("l2chaindata")); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 6fc7741478..0755f5ff9e 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -80,7 +81,7 @@ func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error { var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{64})*$") // Finds important roots to retain while proving -func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { +func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { chainConfig := gethexec.TryReadStoredChainConfig(chainDb) if chainConfig == nil { return nil, errors.New("database doesn't have a chain config (was this node initialized?)") @@ -233,7 +234,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return roots.roots, nil } -func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { +func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { if cacheConfig.StateScheme == rawdb.PathScheme { return nil } diff --git a/contracts b/contracts index 7396313311..b140ed63ac 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7396313311ab17cb30e2eef27cccf96f0a9e8f7f +Subproject commit b140ed63acdb53cb906ffd1fa3c36fdbd474364e diff --git a/das/aggregator.go b/das/aggregator.go index e7460fa371..372e448e76 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -15,11 +15,11 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbstate/daprovider" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -114,7 +114,7 @@ func NewAggregator(ctx context.Context, config DataAvailabilityConfig, services func NewAggregatorWithL1Info( config DataAvailabilityConfig, services []ServiceDetails, - l1client arbutil.L1Interface, + l1client *ethclient.Client, seqInboxAddress common.Address, ) (*Aggregator, error) { seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client) diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 465b54f400..4de6c981cf 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -42,7 +42,7 @@ type KeysetFetcher struct { keysetCache syncedKeysetCache } -func NewKeysetFetcher(l1client arbutil.L1Interface, seqInboxAddr common.Address) (*KeysetFetcher, error) { +func NewKeysetFetcher(l1client *ethclient.Client, seqInboxAddr common.Address) (*KeysetFetcher, error) { seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, l1client) if err != nil { return nil, err diff --git a/das/das.go b/das/das.go index 6bd02fbc75..0b03c05ad6 100644 --- a/das/das.go +++ b/das/das.go @@ -41,9 +41,10 @@ type DataAvailabilityConfig struct { LocalCache CacheConfig `koanf:"local-cache"` RedisCache RedisConfig `koanf:"redis-cache"` - LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` - LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` - S3Storage S3StorageServiceConfig `koanf:"s3-storage"` + LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` + LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` + S3Storage S3StorageServiceConfig `koanf:"s3-storage"` + GoogleCloudStorage GoogleCloudStorageServiceConfig `koanf:"google-cloud-storage"` MigrateLocalDBToFileStorage bool `koanf:"migrate-local-db-to-file-storage"` @@ -114,6 +115,7 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { LocalDBStorageConfigAddOptions(prefix+".local-db-storage", f) LocalFileStorageConfigAddOptions(prefix+".local-file-storage", f) S3ConfigAddOptions(prefix+".s3-storage", f) + GoogleCloudConfigAddOptions(prefix+".google-cloud-storage", f) f.Bool(prefix+".migrate-local-db-to-file-storage", DefaultDataAvailabilityConfig.MigrateLocalDBToFileStorage, "daserver will migrate all data on startup from local-db-storage to local-file-storage, then mark local-db-storage as unusable") // Key config for storage diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 7d48ed796d..241f2196b1 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "golang.org/x/sync/errgroup" "github.com/ethereum/go-ethereum/rpc" @@ -21,6 +22,17 @@ import ( "github.com/offchainlabs/nitro/util/signature" ) +var ( + rpcClientStoreRequestGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/requests", nil) + rpcClientStoreSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/success", nil) + rpcClientStoreFailureGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/failure", nil) + rpcClientStoreStoredBytesGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/bytes", nil) + rpcClientStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpcclient/store/duration", nil, metrics.NewBoundedHistogramSample()) + + rpcClientSendChunkSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/sendchunk/success", nil) + rpcClientSendChunkFailureGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/sendchunk/failure", nil) +) + type DASRPCClient struct { // implements DataAvailabilityService clnt *rpc.Client url string @@ -58,8 +70,20 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + rpcClientStoreRequestGauge.Inc(1) + start := time.Now() + success := false + defer func() { + if success { + rpcClientStoreSuccessGauge.Inc(1) + } else { + rpcClientStoreFailureGauge.Inc(1) + } + rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) + }() + // #nosec G115 - timestamp := uint64(time.Now().Unix()) + timestamp := uint64(start.Unix()) nChunks := uint64(len(message)) / c.chunkSize lastChunkSize := uint64(len(message)) % c.chunkSize if lastChunkSize > 0 { @@ -116,6 +140,9 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 return nil, err } + rpcClientStoreStoredBytesGauge.Inc(int64(len(message))) + success = true + return &daprovider.DataAvailabilityCertificate{ DataHash: common.BytesToHash(storeResult.DataHash), Timeout: uint64(storeResult.Timeout), @@ -133,8 +160,10 @@ func (c *DASRPCClient) sendChunk(ctx context.Context, batchId, i uint64, chunk [ } if err := c.clnt.CallContext(ctx, nil, "das_sendChunk", hexutil.Uint64(batchId), hexutil.Uint64(i), hexutil.Bytes(chunk), hexutil.Bytes(chunkReqSig)); err != nil { + rpcClientSendChunkFailureGauge.Inc(1) return err } + rpcClientSendChunkSuccessGauge.Inc(1) return nil } diff --git a/das/factory.go b/das/factory.go index 7f696912b3..3e9771f932 100644 --- a/das/factory.go +++ b/das/factory.go @@ -9,8 +9,8 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" @@ -65,6 +65,15 @@ func CreatePersistentStorageService( storageServices = append(storageServices, s) } + if config.GoogleCloudStorage.Enable { + s, err := NewGoogleCloudStorageService(config.GoogleCloudStorage) + if err != nil { + return nil, nil, err + } + lifecycleManager.Register(s) + storageServices = append(storageServices, s) + } + if len(storageServices) > 1 { s, err := NewRedundantStorageService(ctx, storageServices) if err != nil { @@ -112,7 +121,7 @@ func CreateBatchPosterDAS( ctx context.Context, config *DataAvailabilityConfig, dataSigner signature.DataSignerFunc, - l1Reader arbutil.L1Interface, + l1Reader *ethclient.Client, sequencerInboxAddr common.Address, ) (DataAvailabilityServiceWriter, DataAvailabilityServiceReader, *KeysetFetcher, *LifecycleManager, error) { if !config.Enable { diff --git a/das/google_cloud_storage_service.go b/das/google_cloud_storage_service.go new file mode 100644 index 0000000000..2c490f346c --- /dev/null +++ b/das/google_cloud_storage_service.go @@ -0,0 +1,202 @@ +package das + +import ( + googlestorage "cloud.google.com/go/storage" + "context" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/google/go-cmp/cmp" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/pretty" + flag "github.com/spf13/pflag" + "google.golang.org/api/option" + "io" + "math" + "sort" + "time" +) + +type GoogleCloudStorageOperator interface { + Bucket(name string) *googlestorage.BucketHandle + Upload(ctx context.Context, bucket, objectPrefix string, value []byte) error + Download(ctx context.Context, bucket, objectPrefix string, key common.Hash) ([]byte, error) + Close(ctx context.Context) error +} + +type GoogleCloudStorageClient struct { + client *googlestorage.Client +} + +func (g *GoogleCloudStorageClient) Bucket(name string) *googlestorage.BucketHandle { + return g.client.Bucket(name) +} + +func (g *GoogleCloudStorageClient) Upload(ctx context.Context, bucket, objectPrefix string, value []byte) error { + obj := g.client.Bucket(bucket).Object(objectPrefix + EncodeStorageServiceKey(dastree.Hash(value))) + w := obj.NewWriter(ctx) + + if _, err := fmt.Fprintln(w, value); err != nil { + return err + } + return w.Close() + +} + +func (g *GoogleCloudStorageClient) Download(ctx context.Context, bucket, objectPrefix string, key common.Hash) ([]byte, error) { + obj := g.client.Bucket(bucket).Object(objectPrefix + EncodeStorageServiceKey(key)) + reader, err := obj.NewReader(ctx) + if err != nil { + return nil, err + } + return io.ReadAll(reader) +} + +func (g *GoogleCloudStorageClient) Close(ctx context.Context) error { + return g.client.Close() +} + +type GoogleCloudStorageServiceConfig struct { + Enable bool `koanf:"enable"` + AccessToken string `koanf:"access-token"` + Bucket string `koanf:"bucket"` + ObjectPrefix string `koanf:"object-prefix"` + EnableExpiry bool `koanf:"enable-expiry"` + MaxRetention time.Duration `koanf:"max-retention"` +} + +var DefaultGoogleCloudStorageServiceConfig = GoogleCloudStorageServiceConfig{} + +func GoogleCloudConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultGoogleCloudStorageServiceConfig.Enable, "enable storage/retrieval of sequencer batch data from an Google Cloud Storage bucket") + f.String(prefix+".access-token", DefaultGoogleCloudStorageServiceConfig.AccessToken, "Google Cloud Storage access token") + f.String(prefix+".bucket", DefaultGoogleCloudStorageServiceConfig.Bucket, "Google Cloud Storage bucket") + f.String(prefix+".object-prefix", DefaultGoogleCloudStorageServiceConfig.ObjectPrefix, "prefix to add to Google Cloud Storage objects") + f.Bool(prefix+".enable-expiry", DefaultLocalFileStorageConfig.EnableExpiry, "enable expiry of batches") + f.Duration(prefix+".max-retention", DefaultLocalFileStorageConfig.MaxRetention, "store requests with expiry times farther in the future than max-retention will be rejected") + +} + +type GoogleCloudStorageService struct { + operator GoogleCloudStorageOperator + bucket string + objectPrefix string + enableExpiry bool + maxRetention time.Duration +} + +func NewGoogleCloudStorageService(config GoogleCloudStorageServiceConfig) (StorageService, error) { + var client *googlestorage.Client + var err error + // Note that if the credentials are not specified, the client library will find credentials using ADC(Application Default Credentials) + // https://cloud.google.com/docs/authentication/provide-credentials-adc. + if config.AccessToken == "" { + client, err = googlestorage.NewClient(context.Background()) + } else { + client, err = googlestorage.NewClient(context.Background(), option.WithCredentialsJSON([]byte(config.AccessToken))) + } + if err != nil { + return nil, fmt.Errorf("error creating Google Cloud Storage client: %w", err) + } + service := &GoogleCloudStorageService{ + operator: &GoogleCloudStorageClient{client: client}, + bucket: config.Bucket, + objectPrefix: config.ObjectPrefix, + enableExpiry: config.EnableExpiry, + maxRetention: config.MaxRetention, + } + if config.EnableExpiry { + lifecycleRule := googlestorage.LifecycleRule{ + Action: googlestorage.LifecycleAction{Type: "Delete"}, + Condition: googlestorage.LifecycleCondition{AgeInDays: int64(config.MaxRetention.Hours() / 24)}, // Objects older than 30 days + } + ctx := context.Background() + bucket := service.operator.Bucket(service.bucket) + // check if bucket exists (and others), and update expiration policy if enabled + attrs, err := bucket.Attrs(ctx) + if err != nil { + return nil, fmt.Errorf("error getting bucket attributes: %w", err) + } + attrs.Lifecycle.Rules = append(attrs.Lifecycle.Rules, lifecycleRule) + + bucketAttrsToUpdate := googlestorage.BucketAttrsToUpdate{ + Lifecycle: &attrs.Lifecycle, + } + if _, err := bucket.Update(ctx, bucketAttrsToUpdate); err != nil { + return nil, fmt.Errorf("failed to update bucket lifecycle: %w", err) + } + } + return service, nil +} + +func (gcs *GoogleCloudStorageService) Put(ctx context.Context, data []byte, expiry uint64) error { + logPut("das.GoogleCloudStorageService.Store", data, expiry, gcs) + if expiry > math.MaxInt64 { + return fmt.Errorf("request expiry time (%v) exceeds max int64", expiry) + } + // #nosec G115 + expiryTime := time.Unix(int64(expiry), 0) + currentTimePlusRetention := time.Now().Add(gcs.maxRetention) + if expiryTime.After(currentTimePlusRetention) { + return fmt.Errorf("requested expiry time (%v) exceeds current time plus maximum allowed retention period(%v)", expiryTime, currentTimePlusRetention) + } + if err := gcs.operator.Upload(ctx, gcs.bucket, gcs.objectPrefix, data); err != nil { + log.Error("das.GoogleCloudStorageService.Store", "err", err) + return err + } + return nil +} + +func (gcs *GoogleCloudStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.GoogleCloudStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", gcs) + buf, err := gcs.operator.Download(ctx, gcs.bucket, gcs.objectPrefix, key) + if err != nil { + log.Error("das.GoogleCloudStorageService.GetByHash", "err", err) + return nil, err + } + return buf, nil +} + +func (gcs *GoogleCloudStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + if gcs.enableExpiry { + return daprovider.KeepForever, nil + } + return daprovider.DiscardAfterDataTimeout, nil +} + +func (gcs *GoogleCloudStorageService) Sync(ctx context.Context) error { + return nil +} + +func (gcs *GoogleCloudStorageService) Close(ctx context.Context) error { + return gcs.operator.Close(ctx) +} + +func (gcs *GoogleCloudStorageService) String() string { + return fmt.Sprintf("GoogleCloudStorageService(:%s)", gcs.bucket) +} + +func (gcs *GoogleCloudStorageService) HealthCheck(ctx context.Context) error { + bucket := gcs.operator.Bucket(gcs.bucket) + // check if we have bucket permissions + permissions := []string{ + "storage.buckets.get", + "storage.buckets.list", + "storage.objects.create", + "storage.objects.delete", + "storage.objects.list", + "storage.objects.get", + } + perms, err := bucket.IAM().TestPermissions(ctx, permissions) + if err != nil { + return fmt.Errorf("could not check permissions: %w", err) + } + sort.Strings(permissions) + sort.Strings(perms) + if !cmp.Equal(perms, permissions) { + return fmt.Errorf("permissions mismatch (-want +got):\n%s", cmp.Diff(permissions, perms)) + } + + return nil +} diff --git a/das/google_cloud_storage_service_test.go b/das/google_cloud_storage_service_test.go new file mode 100644 index 0000000000..799d999bad --- /dev/null +++ b/das/google_cloud_storage_service_test.go @@ -0,0 +1,84 @@ +package das + +import ( + "bytes" + googlestorage "cloud.google.com/go/storage" + "context" + "errors" + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/das/dastree" + "testing" + "time" +) + +type mockGCSClient struct { + storage map[string][]byte +} + +func (c *mockGCSClient) Bucket(name string) *googlestorage.BucketHandle { + return nil +} + +func (c *mockGCSClient) Download(ctx context.Context, bucket, objectPrefix string, key common.Hash) ([]byte, error) { + value, ok := c.storage[objectPrefix+EncodeStorageServiceKey(key)] + if !ok { + return nil, ErrNotFound + } + return value, nil +} + +func (c *mockGCSClient) Close(ctx context.Context) error { + return nil +} + +func (c *mockGCSClient) Upload(ctx context.Context, bucket, objectPrefix string, value []byte) error { + key := objectPrefix + EncodeStorageServiceKey(dastree.Hash(value)) + c.storage[key] = value + return nil +} + +func NewTestGoogleCloudStorageService(ctx context.Context, googleCloudStorageConfig GoogleCloudStorageServiceConfig) (StorageService, error) { + return &GoogleCloudStorageService{ + bucket: googleCloudStorageConfig.Bucket, + objectPrefix: googleCloudStorageConfig.ObjectPrefix, + operator: &mockGCSClient{ + storage: make(map[string][]byte), + }, + maxRetention: googleCloudStorageConfig.MaxRetention, + }, nil +} + +func TestNewGoogleCloudStorageService(t *testing.T) { + ctx := context.Background() + // #nosec G115 + expiry := uint64(time.Now().Add(time.Hour).Unix()) + googleCloudStorageServiceConfig := DefaultGoogleCloudStorageServiceConfig + googleCloudStorageServiceConfig.Enable = true + googleCloudStorageServiceConfig.MaxRetention = time.Hour * 24 + googleCloudService, err := NewTestGoogleCloudStorageService(ctx, googleCloudStorageServiceConfig) + Require(t, err) + + val1 := []byte("The first value") + val1CorrectKey := dastree.Hash(val1) + val2IncorrectKey := dastree.Hash(append(val1, 0)) + + _, err = googleCloudService.GetByHash(ctx, val1CorrectKey) + if !errors.Is(err, ErrNotFound) { + t.Fatal(err) + } + + err = googleCloudService.Put(ctx, val1, expiry) + Require(t, err) + + _, err = googleCloudService.GetByHash(ctx, val2IncorrectKey) + if !errors.Is(err, ErrNotFound) { + t.Fatal(err) + } + + val, err := googleCloudService.GetByHash(ctx, val1CorrectKey) + Require(t, err) + if !bytes.Equal(val, val1) { + t.Fatal(val, val1) + } + +} diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 1b3e2b8f44..9cf481e015 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -21,7 +21,7 @@ import ( "github.com/offchainlabs/nitro/util/signature" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbutil" + "github.com/ethereum/go-ethereum/ethclient" ) type BackendConfig struct { @@ -83,7 +83,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig, signer return NewAggregator(ctx, config, services) } -func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address, signer signature.DataSignerFunc) (*Aggregator, error) { +func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client *ethclient.Client, seqInboxAddress common.Address, signer signature.DataSignerFunc) (*Aggregator, error) { services, err := ParseServices(config.RPCAggregator, signer) if err != nil { return nil, err diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 40b03847d8..b09ed091cc 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -81,6 +81,7 @@ func NewSignAfterStoreDASWriter(ctx context.Context, config DataAvailabilityConf if err != nil { return nil, err } + log.Info("DAS public key used for signing", "key", hexutil.Encode(blsSignatures.PublicKeyToBytes(publicKey))) keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 43ae6160d7..0670a29c73 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" @@ -243,7 +244,7 @@ func FindDASDataFromLog( inboxContract *bridgegen.SequencerInbox, deliveredEvent *bridgegen.SequencerInboxSequencerBatchDelivered, inboxAddr common.Address, - l1Client arbutil.L1Interface, + l1Client *ethclient.Client, batchDeliveredLog types.Log) ([]byte, error) { data := []byte{} if deliveredEvent.DataLocation == uint8(batchDataSeparateEvent) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 5a1efc6d08..cb06a58e74 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/filters" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -179,7 +180,7 @@ func CreateExecutionNode( stack *node.Node, chainDB ethdb.Database, l2BlockChain *core.BlockChain, - l1client arbutil.L1Interface, + l1client *ethclient.Client, configFetcher ConfigFetcher, ) (*ExecutionNode, error) { config := configFetcher() diff --git a/go-ethereum b/go-ethereum index 17cd001675..b068464bf5 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 17cd00167543a5a2b0b083e32820051100154c2f +Subproject commit b068464bf59ab5414f72c2d4aba855b8af5edc17 diff --git a/go.mod b/go.mod index 3826016948..488d455f44 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,22 @@ module github.com/offchainlabs/nitro -go 1.21 +go 1.23 replace github.com/VictoriaMetrics/fastcache => ./fastcache replace github.com/ethereum/go-ethereum => ./go-ethereum require ( + cloud.google.com/go/storage v1.43.0 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/alicebob/miniredis/v2 v2.32.1 github.com/andybalholm/brotli v1.0.4 - github.com/aws/aws-sdk-go-v2 v1.21.2 - github.com/aws/aws-sdk-go-v2/config v1.18.45 - github.com/aws/aws-sdk-go-v2/credentials v1.13.43 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 - github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 + github.com/aws/aws-sdk-go-v2 v1.31.0 + github.com/aws/aws-sdk-go-v2/config v1.27.40 + github.com/aws/aws-sdk-go-v2/credentials v1.17.38 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27 + github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1 github.com/cavaliergopher/grab/v3 v3.0.1 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/codeclysm/extract/v3 v3.0.2 @@ -29,7 +30,7 @@ require ( github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484 github.com/google/btree v1.1.2 github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/holiman/uint256 v1.2.4 github.com/knadh/koanf v1.4.0 @@ -42,18 +43,37 @@ require ( github.com/spf13/pflag v1.0.5 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/crypto v0.21.0 + golang.org/x/crypto v0.24.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 - golang.org/x/tools v0.16.0 + golang.org/x/sys v0.21.0 + golang.org/x/term v0.21.0 + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d + google.golang.org/api v0.187.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) require ( + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.6.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/gomega v1.18.1 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/grpc v1.64.0 // indirect ) require ( @@ -62,24 +82,24 @@ require ( github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect - github.com/aws/smithy-go v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.31.4 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect github.com/cockroachdb/redact v1.1.3 // indirect @@ -98,7 +118,6 @@ require ( github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.1 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect @@ -111,9 +130,9 @@ require ( github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.0.0 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v1.12.1 // indirect github.com/google/go-github/v62 v62.0.0 @@ -126,7 +145,6 @@ require ( github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect github.com/klauspost/compress v1.17.2 // indirect @@ -163,13 +181,13 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - go.opencensus.io v0.22.5 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/oauth2 v0.22.0 - golang.org/x/sync v0.5.0 - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/sync v0.7.0 + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 2fceedb715..d11610724e 100644 --- a/go.sum +++ b/go.sum @@ -13,14 +13,26 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38= +cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -30,6 +42,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -70,64 +84,53 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.16.3/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= -github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= -github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= +github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= +github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5/go.mod h1:wYSv6iDS621sEFLfKvpPE2ugjTuGlAG7iROg0hLOkfc= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/config v1.15.5/go.mod h1:ZijHHh0xd/A+ZY53az0qzC5tT46kt4JVCePf2NX9Lk4= -github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes= -github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= +github.com/aws/aws-sdk-go-v2/config v1.27.40 h1:sie4mPBGFOO+Z27+yHzvyN31G20h/bf2xb5mCbpLv2Q= +github.com/aws/aws-sdk-go-v2/config v1.27.40/go.mod h1:4KW7Aa5tNo+0VHnuLnnE1vPHtwMurlNZNS65IdcewHA= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/credentials v1.12.0/go.mod h1:9YWk7VW+eyKsoIL6/CljkTrNVWBSK9pkqOPUuijid4A= -github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= -github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.38 h1:iM90eRhCeZtlkzCNCG1JysOzJXGYf5rx80aD1lUgNDU= +github.com/aws/aws-sdk-go-v2/credentials v1.17.38/go.mod h1:TCVYPZeQuLaYNEkf/TVn6k5k/zdVZZ7xH9po548VNNg= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4/go.mod h1:u/s5/Z+ohUQOPXl00m2yJVyioWDECsbpXTQlaqSlufc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 h1:JL7cY85hyjlgfA29MMyAlItX+JYIH9XsxgMBS7jtlqA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10/go.mod h1:p+ul5bLZSDRRXCZ/vePvfmZBH9akozXBJA5oMshWa5U= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10/go.mod h1:F+EZtuIwjlv35kRJPyBGcsA4f7bnSoz15zOQ2lJq1Z4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4/go.mod h1:8glyUqVIM4AmeenIsPo0oVh3+NUwnsQml2OFupfQW+0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27 h1:1oLpQSTuqbizOUEYdxAwH+Eveg+FOCOkg84Yijba6Kc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27/go.mod h1:afo0vF9P3pjy1ny+cb45lzBjtKeEb5t5MPRxeTXpujw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11/go.mod h1:0MR+sS1b/yxsfAPvAESrw8NfwUoxMinDyw6EYR9BS2U= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 h1:C21IDZCm9Yu5xqjb3fKmxDoYvJXtw1DNlOmLZEIlY1M= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1/go.mod h1:l/BbcfqDCT3hePawhy4ZRtewjtdkl6GWtd9/U+1penQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 h1:OWYvKL53l1rbsUmW7bQyJVsYU/Ii3bbAAQIIFNbM0Tk= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18/go.mod h1:CUx0G1v3wG6l01tUB+j7Y8kclA8NSqK4ef0YG79a4cg= github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 h1:9LSZqt4v1JiehyZTrQnRFf2mY/awmyYNNY/b7zqtduU= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5/go.mod h1:S8TVP66AAkMMdYYCNZGvrdEq9YRm+qLXjio4FqRnrEE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 h1:rTWjG6AvWekO2B1LHeM3ktU7MqyX9rzWQ7hgzneZW7E= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20/go.mod h1:RGW2DDpVc8hu6Y6yG8G5CHVmVOAn1oV8rNKOHRJyswg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4/go.mod h1:uKkN7qmSIsNJVyMtxNQoCEYMvFEXbOg9fwCJPdfp2u8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 h1:RE/DlZLYrz1OOmq8F28IXHLksuuvlpzUbvJ+SESCZBI= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4/go.mod h1:oudbsSdDtazNj47z1ut1n37re9hDsKpk2ZI3v7KSxq0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 h1:LCQKnopq2t4oQS3VKivlYTzAHCTJZZoQICM9fny7KHY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9/go.mod h1:iMYipLPXlWpBJ0KFX7QJHZ84rBydHBY8as2aQICTPWk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 h1:eb+tFOIl9ZsUe2259/BKPeniKuz4/02zZFH/i4Nf8Rg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18/go.mod h1:GVCC2IJNJTmdlyEsSmofEy7EfJncP7DNnXDzRjJ5Keg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1 h1:jjHf+M6vCp/WzbyFEroY4/Nx8dJac520A0EPwlYk0Do= +github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.4/go.mod h1:cPDwJwsP4Kff9mldCXAmddjJL6JGQqtA3Mzer2zyr88= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.4 h1:ck/Y8XWNR1gHa4BFkwE3oSu7XDJGwl+8TI7E/RB2EcQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.4/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4 h1:4f2/JKYZHAZbQ7koBpZ012bKi32NHPY0m7TDuJgsbug= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.4/go.mod h1:lfSYenAXtavyX2A1LsViglqlG9eEFYxNryTZS5rn3QE= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.4 h1:uK6dUUdJtqutK1XO/tmNaQMJiPLCJY/eAeOOmqQ6ygY= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.4/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= -github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -237,8 +240,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -273,6 +276,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= @@ -280,7 +288,6 @@ github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= @@ -306,12 +313,13 @@ github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -335,8 +343,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= @@ -356,11 +364,10 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= @@ -371,8 +378,11 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -380,17 +390,22 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -437,7 +452,6 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -448,9 +462,7 @@ github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0Gqw github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= @@ -572,25 +584,19 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -680,14 +686,19 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -739,8 +750,20 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -753,8 +776,8 @@ golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -791,8 +814,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -829,17 +852,17 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -862,8 +885,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -913,7 +936,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -940,14 +962,14 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -959,14 +981,15 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1011,16 +1034,16 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1038,6 +1061,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo= +google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1076,6 +1101,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= +google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1091,7 +1122,10 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1104,8 +1138,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pubsub/producer.go b/pubsub/producer.go index 5c87f4f722..dacaeba7d0 100644 --- a/pubsub/producer.go +++ b/pubsub/producer.go @@ -201,6 +201,7 @@ func (p *Producer[Request, Response]) clearMessages(ctx context.Context) time.Du } if _, err := p.client.XDel(ctx, p.redisStream, pelData.Lower).Result(); err != nil { log.Error("error deleting PEL's lower message thats past its TTL", "msgID", pelData.Lower, "err", err) + return 0 } } } diff --git a/staker/block_validator.go b/staker/block_validator.go index e1b2c75b84..5a1f123693 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -29,6 +29,8 @@ import ( "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/client/redis" + "github.com/offchainlabs/nitro/validator/inputs" + "github.com/offchainlabs/nitro/validator/server_api" "github.com/spf13/pflag" ) @@ -94,6 +96,9 @@ type BlockValidator struct { // for testing only testingProgressMadeChan chan struct{} + // For troubleshooting failed validations + validationInputsWriter *inputs.Writer + fatalErr chan<- error MemoryFreeLimitChecker resourcemanager.LimitChecker @@ -115,6 +120,9 @@ type BlockValidatorConfig struct { Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"` ValidationServerConfigsList string `koanf:"validation-server-configs-list"` + // The directory to which the BlockValidator will write the + // block_inputs_.json files when WriteToFile() is called. + BlockInputsFilePath string `koanf:"block-inputs-file-path"` memoryFreeLimit int } @@ -182,6 +190,7 @@ func BlockValidatorConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".failure-is-fatal", DefaultBlockValidatorConfig.FailureIsFatal, "failing a validation is treated as a fatal error") BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f) f.String(prefix+".memory-free-limit", DefaultBlockValidatorConfig.MemoryFreeLimit, "minimum free-memory limit after reaching which the blockvalidator pauses validation. Enabled by default as 1GB, to disable provide empty string") + f.String(prefix+".block-inputs-file-path", DefaultBlockValidatorConfig.BlockInputsFilePath, "directory to write block validation inputs files") } func BlockValidatorDangerousConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -201,6 +210,7 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, + BlockInputsFilePath: "./target/validation_inputs", MemoryFreeLimit: "default", RecordingIterLimit: 20, } @@ -219,6 +229,7 @@ var TestBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, + BlockInputsFilePath: "./target/validation_inputs", MemoryFreeLimit: "default", } @@ -277,6 +288,13 @@ func NewBlockValidator( fatalErr: fatalErr, prevBatchCache: make(map[uint64][]byte), } + valInputsWriter, err := inputs.NewWriter( + inputs.WithBaseDir(ret.stack.InstanceDir()), + inputs.WithSlug("BlockValidator")) + if err != nil { + return nil, err + } + ret.validationInputsWriter = valInputsWriter if !config().Dangerous.ResetBlockValidation { validated, err := ret.ReadLastValidatedInfo() if err != nil { @@ -508,18 +526,16 @@ func (v *BlockValidator) sendRecord(s *validationStatus) error { } //nolint:gosec -func (v *BlockValidator) writeToFile(validationEntry *validationEntry, moduleRoot common.Hash) error { +func (v *BlockValidator) writeToFile(validationEntry *validationEntry) error { input, err := validationEntry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) if err != nil { return err } - for _, spawner := range v.execSpawners { - if validator.SpawnerSupportsModule(spawner, moduleRoot) { - _, err = spawner.WriteToFile(input, validationEntry.End, moduleRoot).Await(v.GetContext()) - return err - } + inputJson := server_api.ValidationInputToJson(input) + if err := v.validationInputsWriter.Write(inputJson); err != nil { + return err } - return errors.New("did not find exec spawner for wasmModuleRoot") + return nil } func (v *BlockValidator) SetCurrentWasmModuleRoot(hash common.Hash) error { @@ -823,7 +839,7 @@ validationsLoop: runEnd, err := run.Current() if err == nil && runEnd != validationStatus.Entry.End { err = fmt.Errorf("validation failed: expected %v got %v", validationStatus.Entry.End, runEnd) - writeErr := v.writeToFile(validationStatus.Entry, run.WasmModuleRoot()) + writeErr := v.writeToFile(validationStatus.Entry) if writeErr != nil { log.Warn("failed to write debug results file", "err", writeErr) } diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 6ea9fd8ded..5b0c211324 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" @@ -45,7 +46,7 @@ type L1Validator struct { rollup *RollupWatcher rollupAddress common.Address validatorUtils *rollupgen.ValidatorUtils - client arbutil.L1Interface + client *ethclient.Client builder *txbuilder.Builder wallet ValidatorWalletInterface callOpts bind.CallOpts @@ -57,7 +58,7 @@ type L1Validator struct { } func NewL1Validator( - client arbutil.L1Interface, + client *ethclient.Client, wallet ValidatorWalletInterface, validatorUtilsAddress common.Address, callOpts bind.CallOpts, diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 5ef28a49dc..4d7db52322 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -4,16 +4,19 @@ package staker import ( + "bytes" "context" "encoding/binary" "errors" "fmt" "math/big" + "strings" "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/headerreader" @@ -48,12 +51,19 @@ type RollupWatcher struct { *rollupgen.RollupUserLogic address common.Address fromBlock *big.Int - client arbutil.L1Interface + client RollupWatcherL1Interface baseCallOpts bind.CallOpts unSupportedL3Method atomic.Bool + supportedL3Method atomic.Bool } -func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) { +type RollupWatcherL1Interface interface { + bind.ContractBackend + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) +} + +func NewRollupWatcher(address common.Address, client RollupWatcherL1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) { con, err := rollupgen.NewRollupUserLogic(address, client) if err != nil { return nil, err @@ -73,15 +83,41 @@ func (r *RollupWatcher) getCallOpts(ctx context.Context) *bind.CallOpts { return &opts } +const noNodeErr string = "NO_NODE" + +func looksLikeNoNodeError(err error) bool { + if err == nil { + return false + } + if strings.Contains(err.Error(), noNodeErr) { + return true + } + var errWithData rpc.DataError + ok := errors.As(err, &errWithData) + if !ok { + return false + } + dataString, ok := errWithData.ErrorData().(string) + if !ok { + return false + } + data := common.FromHex(dataString) + return bytes.Contains(data, []byte(noNodeErr)) +} + func (r *RollupWatcher) getNodeCreationBlock(ctx context.Context, nodeNum uint64) (*big.Int, error) { callOpts := r.getCallOpts(ctx) if !r.unSupportedL3Method.Load() { createdAtBlock, err := r.GetNodeCreationBlockForLogLookup(callOpts, nodeNum) if err == nil { + r.supportedL3Method.Store(true) return createdAtBlock, nil } - log.Trace("failed to call getNodeCreationBlockForLogLookup, falling back on node CreatedAtBlock field", "err", err) - if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) && !looksLikeNoNodeError(err) { + if r.supportedL3Method.Load() { + return nil, fmt.Errorf("getNodeCreationBlockForLogLookup failed despite previously succeeding: %w", err) + } + log.Info("getNodeCreationBlockForLogLookup does not seem to exist, falling back on node CreatedAtBlock field", "err", err) r.unSupportedL3Method.Store(true) } else { return nil, err diff --git a/staker/staker.go b/staker/staker.go index 77ca93e02c..45e6f6f551 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rpc" @@ -280,7 +281,7 @@ type ValidatorWalletInterface interface { TxSenderAddress() *common.Address RollupAddress() common.Address ChallengeManagerAddress() common.Address - L1Client() arbutil.L1Interface + L1Client() *ethclient.Client TestTransactions(context.Context, []*types.Transaction) error ExecuteTransactions(context.Context, *txbuilder.Builder, common.Address) (*types.Transaction, error) TimeoutChallenges(context.Context, []uint64) (*types.Transaction, error) @@ -304,7 +305,6 @@ func NewStaker( validatorUtilsAddress common.Address, fatalErr chan<- error, ) (*Staker, error) { - if err := config().Validate(); err != nil { return nil, err } @@ -511,7 +511,9 @@ func (s *Staker) Start(ctxIn context.Context) { } s.StopWaiter.Start(ctxIn, s) backoff := time.Second - ephemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "is ahead of on-chain nonce", 0) + isAheadOfOnChainNonceEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "is ahead of on-chain nonce", 0) + exceedsMaxMempoolSizeEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, dataposter.ErrExceedsMaxMempoolSize.Error(), 0) + blockValidationPendingEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "block validation is still pending", 0) s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) { defer func() { panicErr := recover() @@ -545,7 +547,9 @@ func (s *Staker) Start(ctxIn context.Context) { } } if err == nil { - ephemeralErrorHandler.Reset() + isAheadOfOnChainNonceEphemeralErrorHandler.Reset() + exceedsMaxMempoolSizeEphemeralErrorHandler.Reset() + blockValidationPendingEphemeralErrorHandler.Reset() backoff = time.Second stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) stakerActionSuccessCounter.Inc(1) @@ -563,7 +567,9 @@ func (s *Staker) Start(ctxIn context.Context) { } else { logLevel = log.Warn } - logLevel = ephemeralErrorHandler.LogLevel(err, logLevel) + logLevel = isAheadOfOnChainNonceEphemeralErrorHandler.LogLevel(err, logLevel) + logLevel = exceedsMaxMempoolSizeEphemeralErrorHandler.LogLevel(err, logLevel) + logLevel = blockValidationPendingEphemeralErrorHandler.LogLevel(err, logLevel) logLevel("error acting as staker", "err", err) return backoff }) @@ -1220,7 +1226,7 @@ func (s *Staker) updateStakerBalanceMetric(ctx context.Context) { } balance, err := s.client.BalanceAt(ctx, *txSenderAddress, nil) if err != nil { - log.Error("error getting staker balance", "txSenderAddress", *txSenderAddress, "err", err) + log.Warn("error getting staker balance", "txSenderAddress", *txSenderAddress, "err", err) return } stakerBalanceGauge.Update(arbmath.BalancePerEther(balance)) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 60306d712f..9257c5582a 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -23,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/client/redis" + "github.com/offchainlabs/nitro/validator/server_api" validatorclient "github.com/offchainlabs/nitro/validator/client" ) @@ -40,6 +41,7 @@ type StatelessBlockValidator struct { streamer TransactionStreamerInterface db ethdb.Database dapReaders []daprovider.Reader + stack *node.Node } type BlockValidatorRegistrer interface { @@ -264,6 +266,7 @@ func NewStatelessBlockValidator( db: arbdb, dapReaders: dapReaders, execSpawners: executionSpawners, + stack: stack, }, nil } @@ -508,6 +511,18 @@ func (v *StatelessBlockValidator) ValidateResult( return true, &entry.End, nil } +func (v *StatelessBlockValidator) ValidationInputsAt(ctx context.Context, pos arbutil.MessageIndex, target ethdb.WasmTarget) (server_api.InputJSON, error) { + entry, err := v.CreateReadyValidationEntry(ctx, pos) + if err != nil { + return server_api.InputJSON{}, err + } + input, err := entry.ToInput([]ethdb.WasmTarget{target}) + if err != nil { + return server_api.InputJSON{}, err + } + return *server_api.ValidationInputToJson(input), nil +} + func (v *StatelessBlockValidator) OverrideRecorder(t *testing.T, recorder execution.ExecutionRecorder) { v.recorder = recorder } diff --git a/staker/txbuilder/builder.go b/staker/txbuilder/builder.go index 9a5e9df2b5..f52b03a781 100644 --- a/staker/txbuilder/builder.go +++ b/staker/txbuilder/builder.go @@ -12,13 +12,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbutil" + "github.com/ethereum/go-ethereum/ethclient" ) type ValidatorWalletInterface interface { // Address must be able to be called concurrently with other functions Address() *common.Address - L1Client() arbutil.L1Interface + L1Client() *ethclient.Client TestTransactions(context.Context, []*types.Transaction) error ExecuteTransactions(context.Context, *Builder, common.Address) (*types.Transaction, error) AuthIfEoa() *bind.TransactOpts @@ -27,10 +27,10 @@ type ValidatorWalletInterface interface { // Builder combines any transactions sent to it via SendTransaction into one batch, // which is then sent to the validator wallet. // This lets the validator make multiple atomic transactions. -// This inherits from an eth client so it can be used as an L1Interface, -// where it transparently intercepts calls to SendTransaction and queues them for the next batch. +// This inherits from an ethclient.Client so it can be used to transparently +// intercept calls to SendTransaction and queue them for the next batch. type Builder struct { - arbutil.L1Interface + *ethclient.Client transactions []*types.Transaction builderAuth *bind.TransactOpts isAuthFake bool @@ -55,7 +55,7 @@ func NewBuilder(wallet ValidatorWalletInterface) (*Builder, error) { return &Builder{ builderAuth: builderAuth, wallet: wallet, - L1Interface: wallet.L1Client(), + Client: wallet.L1Client(), isAuthFake: isAuthFake, }, nil } @@ -70,7 +70,7 @@ func (b *Builder) ClearTransactions() { func (b *Builder) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { if len(b.transactions) == 0 && !b.isAuthFake { - return b.L1Interface.EstimateGas(ctx, call) + return b.Client.EstimateGas(ctx, call) } return 0, nil } diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 6346029c3a..3202d58569 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -16,10 +16,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" @@ -384,7 +384,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) ( return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } -func (v *Contract) L1Client() arbutil.L1Interface { +func (v *Contract) L1Client() *ethclient.Client { return v.l1Reader.Client() } diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index 3ae305b36c..7c7f472579 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -10,8 +10,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker/txbuilder" @@ -19,7 +19,7 @@ import ( type EOA struct { auth *bind.TransactOpts - client arbutil.L1Interface + client *ethclient.Client rollupAddress common.Address challengeManager *challengegen.ChallengeManager challengeManagerAddress common.Address @@ -27,7 +27,7 @@ type EOA struct { getExtraGas func() uint64 } -func NewEOA(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, getExtraGas func() uint64) (*EOA, error) { +func NewEOA(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client *ethclient.Client, getExtraGas func() uint64) (*EOA, error) { return &EOA{ auth: dataPoster.Auth(), client: l1Client, @@ -63,7 +63,7 @@ func (w *EOA) TxSenderAddress() *common.Address { return &w.auth.From } -func (w *EOA) L1Client() arbutil.L1Interface { +func (w *EOA) L1Client() *ethclient.Client { return w.client } diff --git a/staker/validatorwallet/noop.go b/staker/validatorwallet/noop.go index b050ebe861..fec39ac2b1 100644 --- a/staker/validatorwallet/noop.go +++ b/staker/validatorwallet/noop.go @@ -10,18 +10,18 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/staker/txbuilder" ) // NoOp validator wallet is used for watchtower mode. type NoOp struct { - l1Client arbutil.L1Interface + l1Client *ethclient.Client rollupAddress common.Address } -func NewNoOp(l1Client arbutil.L1Interface, rollupAddress common.Address) *NoOp { +func NewNoOp(l1Client *ethclient.Client, rollupAddress common.Address) *NoOp { return &NoOp{ l1Client: l1Client, rollupAddress: rollupAddress, @@ -46,7 +46,7 @@ func (*NoOp) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types return nil, errors.New("no op validator wallet cannot timeout challenges") } -func (n *NoOp) L1Client() arbutil.L1Interface { return n.l1Client } +func (n *NoOp) L1Client() *ethclient.Client { return n.l1Client } func (n *NoOp) RollupAddress() common.Address { return n.rollupAddress } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 7304e8c2e0..0a0bc895a6 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -24,6 +24,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/conf" @@ -35,6 +36,7 @@ import ( "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/validator/inputs" "github.com/offchainlabs/nitro/validator/server_api" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" @@ -69,7 +71,6 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbutil" _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" @@ -83,7 +84,6 @@ import ( ) type info = *BlockchainTestInfo -type client = arbutil.L1Interface type SecondNodeParams struct { nodeConfig *arbnode.Config @@ -138,8 +138,8 @@ func (tc *TestClient) GetBaseFeeAt(t *testing.T, blockNum *big.Int) *big.Int { return GetBaseFeeAt(t, tc.Client, tc.ctx, blockNum) } -func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) { - SendWaitTestTransactions(t, tc.ctx, tc.Client, txs) +func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) []*types.Receipt { + return SendWaitTestTransactions(t, tc.ctx, tc.Client, txs) } func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common.Address, *mocksgen.Simple) { @@ -763,26 +763,29 @@ func (b *NodeBuilder) BridgeBalance(t *testing.T, account string, amount *big.In return BridgeBalance(t, account, amount, b.L1Info, b.L2Info, b.L1.Client, b.L2.Client, b.ctx) } -func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, txs []*types.Transaction) { +func SendWaitTestTransactions(t *testing.T, ctx context.Context, client *ethclient.Client, txs []*types.Transaction) []*types.Receipt { t.Helper() + receipts := make([]*types.Receipt, len(txs)) for _, tx := range txs { Require(t, client.SendTransaction(ctx, tx)) } - for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, client, tx) + for i, tx := range txs { + var err error + receipts[i], err = EnsureTxSucceeded(ctx, client, tx) Require(t, err) } + return receipts } func TransferBalance( - t *testing.T, from, to string, amount *big.Int, l2info info, client client, ctx context.Context, + t *testing.T, from, to string, amount *big.Int, l2info info, client *ethclient.Client, ctx context.Context, ) (*types.Transaction, *types.Receipt) { t.Helper() return TransferBalanceTo(t, from, l2info.GetAddress(to), amount, l2info, client, ctx) } func TransferBalanceTo( - t *testing.T, from string, to common.Address, amount *big.Int, l2info info, client client, ctx context.Context, + t *testing.T, from string, to common.Address, amount *big.Int, l2info info, client *ethclient.Client, ctx context.Context, ) (*types.Transaction, *types.Receipt) { t.Helper() tx := l2info.PrepareTxTo(from, &to, l2info.TransferGas, amount, nil) @@ -795,7 +798,7 @@ func TransferBalanceTo( // if l2client is not nil - will wait until balance appears in l2 func BridgeBalance( - t *testing.T, account string, amount *big.Int, l1info info, l2info info, l1client client, l2client client, ctx context.Context, + t *testing.T, account string, amount *big.Int, l1info info, l2info info, l1client *ethclient.Client, l2client *ethclient.Client, ctx context.Context, ) (*types.Transaction, *types.Receipt) { t.Helper() @@ -855,8 +858,8 @@ func SendSignedTxesInBatchViaL1( t *testing.T, ctx context.Context, l1info *BlockchainTestInfo, - l1client arbutil.L1Interface, - l2client arbutil.L1Interface, + l1client *ethclient.Client, + l2client *ethclient.Client, delayedTxes types.Transactions, ) types.Receipts { delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) @@ -906,8 +909,8 @@ func SendSignedTxViaL1( t *testing.T, ctx context.Context, l1info *BlockchainTestInfo, - l1client arbutil.L1Interface, - l2client arbutil.L1Interface, + l1client *ethclient.Client, + l2client *ethclient.Client, delayedTx *types.Transaction, ) *types.Receipt { delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) @@ -937,8 +940,8 @@ func SendUnsignedTxViaL1( t *testing.T, ctx context.Context, l1info *BlockchainTestInfo, - l1client arbutil.L1Interface, - l2client arbutil.L1Interface, + l1client *ethclient.Client, + l2client *ethclient.Client, templateTx *types.Transaction, ) *types.Receipt { delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) @@ -984,13 +987,13 @@ func SendUnsignedTxViaL1( return receipt } -func GetBaseFee(t *testing.T, client client, ctx context.Context) *big.Int { +func GetBaseFee(t *testing.T, client *ethclient.Client, ctx context.Context) *big.Int { header, err := client.HeaderByNumber(ctx, nil) Require(t, err) return header.BaseFee } -func GetBaseFeeAt(t *testing.T, client client, ctx context.Context, blockNum *big.Int) *big.Int { +func GetBaseFeeAt(t *testing.T, client *ethclient.Client, ctx context.Context, blockNum *big.Int) *big.Int { header, err := client.HeaderByNumber(ctx, blockNum) Require(t, err) return header.BaseFee @@ -1212,7 +1215,7 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return l1info, l1Client, l1backend, stack } -func getInitMessage(ctx context.Context, t *testing.T, parentChainClient client, addresses *chaininfo.RollupAddresses) *arbostypes.ParsedInitMessage { +func getInitMessage(ctx context.Context, t *testing.T, parentChainClient *ethclient.Client, addresses *chaininfo.RollupAddresses) *arbostypes.ParsedInitMessage { bridge, err := arbnode.NewDelayedBridge(parentChainClient, addresses.Bridge, addresses.DeployedAt) Require(t, err) deployedAtBig := arbmath.UintToBig(addresses.DeployedAt) @@ -1231,7 +1234,7 @@ func deployOnParentChain( t *testing.T, ctx context.Context, parentChainInfo info, - parentChainClient client, + parentChainClient *ethclient.Client, parentChainReaderConfig *headerreader.Config, chainConfig *params.ChainConfig, wasmModuleRoot common.Hash, @@ -1454,7 +1457,7 @@ func authorizeDASKeyset( ctx context.Context, dasSignerKey *blsSignatures.PublicKey, l1info info, - l1client arbutil.L1Interface, + l1client *ethclient.Client, ) { if dasSignerKey == nil { return @@ -1694,6 +1697,34 @@ func logParser[T any](t *testing.T, source string, name string) func(*types.Log) } } +// recordBlock writes a json file with all of the data needed to validate a block. +// +// This can be used as an input to the arbitrator prover to validate a block. +func recordBlock(t *testing.T, block uint64, builder *NodeBuilder) { + t.Helper() + ctx := builder.ctx + inboxPos := arbutil.MessageIndex(block) + for { + time.Sleep(250 * time.Millisecond) + batches, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() + Require(t, err) + haveMessages, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1) + Require(t, err) + if haveMessages >= inboxPos { + break + } + } + validationInputsWriter, err := inputs.NewWriter(inputs.WithSlug(t.Name())) + Require(t, err) + inputJson, err := builder.L2.ConsensusNode.StatelessBlockValidator.ValidationInputsAt(ctx, inboxPos, rawdb.TargetWavm) + if err != nil { + Fatal(t, "failed to get validation inputs", block, err) + } + if err := validationInputsWriter.Write(&inputJson); err != nil { + Fatal(t, "failed to write validation inputs", block, err) + } +} + func populateMachineDir(t *testing.T, cr *github.ConsensusRelease) string { baseDir := t.TempDir() machineDir := baseDir + "/machines" diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 9f4d153b6f..ed3844d528 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -6,6 +6,7 @@ package arbtest import ( "context" "encoding/base64" + "errors" "io" "math/big" "net" @@ -22,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" @@ -37,25 +37,20 @@ func startLocalDASServer( t *testing.T, ctx context.Context, dataDir string, - l1client arbutil.L1Interface, + l1client *ethclient.Client, seqInboxAddress common.Address, ) (*http.Server, *blsSignatures.PublicKey, das.BackendConfig, *das.RestfulDasServer, string) { keyDir := t.TempDir() pubkey, _, err := das.GenerateAndStoreKeys(keyDir) Require(t, err) - config := das.DataAvailabilityConfig{ - Enable: true, - Key: das.KeyConfig{ - KeyDir: keyDir, - }, - LocalFileStorage: das.LocalFileStorageConfig{ - Enable: true, - DataDir: dataDir, - }, - ParentChainNodeURL: "none", - RequestTimeout: 5 * time.Second, - } + config := das.DefaultDataAvailabilityConfig + config.Enable = true + config.Key = das.KeyConfig{KeyDir: keyDir} + config.ParentChainNodeURL = "none" + config.LocalFileStorage = das.DefaultLocalFileStorageConfig + config.LocalFileStorage.Enable = true + config.LocalFileStorage.DataDir = dataDir storageService, lifecycleManager, err := das.CreatePersistentStorageService(ctx, &config) defer lifecycleManager.StopAndWaitUntil(time.Second) @@ -327,3 +322,80 @@ func initTest(t *testing.T) { enableLogging(logLvl) } } + +func TestDASBatchPosterFallback(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Setup L1 + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.chainConfig = params.ArbitrumDevTestDASChainConfig() + builder.BuildL1(t) + l1client := builder.L1.Client + l1info := builder.L1Info + + // Setup DAS server + dasDataDir := t.TempDir() + dasRpcServer, pubkey, backendConfig, _, restServerUrl := startLocalDASServer( + t, ctx, dasDataDir, l1client, builder.addresses.SequencerInbox) + authorizeDASKeyset(t, ctx, pubkey, l1info, l1client) + + // Setup sequence/batch-poster L2 node + builder.nodeConfig.DataAvailability.Enable = true + builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(backendConfig) + builder.nodeConfig.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + builder.nodeConfig.DataAvailability.RestAggregator.Enable = true + builder.nodeConfig.DataAvailability.RestAggregator.Urls = []string{restServerUrl} + builder.nodeConfig.DataAvailability.ParentChainNodeURL = "none" + builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = true // Disable DAS fallback + builder.nodeConfig.BatchPoster.ErrorDelay = time.Millisecond * 250 // Increase error delay because we expect errors + builder.L2Info = NewArbTestInfo(t, builder.chainConfig.ChainID) + builder.L2Info.GenerateAccount("User2") + cleanup := builder.BuildL2OnL1(t) + defer cleanup() + l2client := builder.L2.Client + l2info := builder.L2Info + + // Setup secondary L2 node + nodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest() + nodeConfigB.BlockValidator.Enable = false + nodeConfigB.DataAvailability.Enable = true + nodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + nodeConfigB.DataAvailability.RestAggregator.Enable = true + nodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrl} + nodeConfigB.DataAvailability.ParentChainNodeURL = "none" + nodeBParams := SecondNodeParams{ + nodeConfig: nodeConfigB, + initData: &l2info.ArbInitData, + } + l2B, cleanupB := builder.Build2ndNode(t, &nodeBParams) + defer cleanupB() + + // Check batch posting using the DAS + checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(1e12), l2B.Client) + + // Shutdown the DAS + err := dasRpcServer.Shutdown(ctx) + Require(t, err) + + // Send 2nd transaction and check it doesn't arrive on second node + tx, _ := TransferBalanceTo(t, "Owner", l2info.GetAddress("User2"), big.NewInt(1e12), l2info, l2client, ctx) + _, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3) + if err == nil || !errors.Is(err, context.DeadlineExceeded) { + Fatal(t, "expected context-deadline exceeded error, but got:", err) + } + + // Enable the DAP fallback and check the transaction on the second node. + // (We don't need to restart the node because of the hot-reload.) + builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = false + _, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3) + Require(t, err) + l2balance, err := l2B.Client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(2e12)) != 0 { + Fatal(t, "Unexpected balance:", l2balance) + } + + // Send another transaction with fallback on + checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(3e12), l2B.Client) +} diff --git a/system_tests/eth_sync_test.go b/system_tests/eth_sync_test.go index 1f07f7c45f..ce9994fb1e 100644 --- a/system_tests/eth_sync_test.go +++ b/system_tests/eth_sync_test.go @@ -71,7 +71,7 @@ func TestEthSyncing(t *testing.T) { if progress == nil { Fatal(t, "eth_syncing returned nil but shouldn't have") } - for testClientB.ConsensusNode.TxStreamer.ExecuteNextMsg(ctx, testClientB.ExecNode) { + for testClientB.ConsensusNode.TxStreamer.ExecuteNextMsg(ctx) { } progress, err = testClientB.Client.SyncProgress(ctx) Require(t, err) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index ddc229074c..bf30c928d8 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -27,7 +27,6 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" @@ -178,7 +177,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b Require(t, err, "failed to get batch metadata after adding batch:") } -func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend arbutil.L1Interface) { +func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend *ethclient.Client) { t.Helper() // With SimulatedBeacon running in on-demand block production mode, the // finalized block is considered to be be the nearest multiple of 32 less @@ -190,7 +189,7 @@ func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTes } } -func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, l1Client arbutil.L1Interface, chainConfig *params.ChainConfig) (common.Address, *mocksgen.SequencerInboxStub, common.Address) { +func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, l1Client *ethclient.Client, chainConfig *params.ChainConfig) (common.Address, *mocksgen.SequencerInboxStub, common.Address) { txOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) bridgeAddr, tx, bridge, err := mocksgen.DeployBridgeUnproxied(&txOpts, l1Client) Require(t, err) diff --git a/system_tests/program_gas_test.go b/system_tests/program_gas_test.go new file mode 100644 index 0000000000..119897cbfe --- /dev/null +++ b/system_tests/program_gas_test.go @@ -0,0 +1,458 @@ +package arbtest + +import ( + "context" + "fmt" + "math" + "math/big" + "regexp" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestProgramSimpleCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test")) + evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData) + otherProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("storage")) + matchSnake := regexp.MustCompile("_[a-z]") + + for _, tc := range []struct { + hostio string + opcode vm.OpCode + params []any + maxDiff float64 + }{ + {hostio: "exit_early", opcode: vm.STOP}, + {hostio: "transient_load_bytes32", opcode: vm.TLOAD, params: []any{common.HexToHash("dead")}}, + {hostio: "transient_store_bytes32", opcode: vm.TSTORE, params: []any{common.HexToHash("dead"), common.HexToHash("beef")}}, + {hostio: "return_data_size", opcode: vm.RETURNDATASIZE, maxDiff: 1.5}, + {hostio: "account_balance", opcode: vm.BALANCE, params: []any{builder.L2Info.GetAddress("Owner")}}, + {hostio: "account_code", opcode: vm.EXTCODECOPY, params: []any{otherProgram}}, + {hostio: "account_code_size", opcode: vm.EXTCODESIZE, params: []any{otherProgram}, maxDiff: 0.3}, + {hostio: "account_codehash", opcode: vm.EXTCODEHASH, params: []any{otherProgram}}, + {hostio: "evm_gas_left", opcode: vm.GAS, maxDiff: 1.5}, + {hostio: "evm_ink_left", opcode: vm.GAS, maxDiff: 1.5}, + {hostio: "block_basefee", opcode: vm.BASEFEE, maxDiff: 0.5}, + {hostio: "chainid", opcode: vm.CHAINID, maxDiff: 1.5}, + {hostio: "block_coinbase", opcode: vm.COINBASE, maxDiff: 0.5}, + {hostio: "block_gas_limit", opcode: vm.GASLIMIT, maxDiff: 1.5}, + {hostio: "block_number", opcode: vm.NUMBER, maxDiff: 1.5}, + {hostio: "block_timestamp", opcode: vm.TIMESTAMP, maxDiff: 1.5}, + {hostio: "contract_address", opcode: vm.ADDRESS, maxDiff: 0.5}, + {hostio: "math_div", opcode: vm.DIV, params: []any{big.NewInt(1), big.NewInt(3)}}, + {hostio: "math_mod", opcode: vm.MOD, params: []any{big.NewInt(1), big.NewInt(3)}}, + {hostio: "math_add_mod", opcode: vm.ADDMOD, params: []any{big.NewInt(1), big.NewInt(3), big.NewInt(5)}, maxDiff: 0.7}, + {hostio: "math_mul_mod", opcode: vm.MULMOD, params: []any{big.NewInt(1), big.NewInt(3), big.NewInt(5)}, maxDiff: 0.7}, + {hostio: "msg_sender", opcode: vm.CALLER, maxDiff: 0.5}, + {hostio: "msg_value", opcode: vm.CALLVALUE, maxDiff: 0.5}, + {hostio: "tx_gas_price", opcode: vm.GASPRICE, maxDiff: 0.5}, + {hostio: "tx_ink_price", opcode: vm.GASPRICE, maxDiff: 1.5}, + {hostio: "tx_origin", opcode: vm.ORIGIN, maxDiff: 0.5}, + } { + t.Run(tc.hostio, func(t *testing.T) { + solFunc := matchSnake.ReplaceAllStringFunc(tc.hostio, func(s string) string { + return strings.ToUpper(strings.TrimPrefix(s, "_")) + }) + packer, _ := util.NewCallParser(mocksgen.HostioTestABI, solFunc) + data, err := packer(tc.params...) + Require(t, err) + compareGasUsage(t, builder, evmProgram, stylusProgram, data, nil, compareGasForEach, tc.maxDiff, compareGasPair{tc.opcode, tc.hostio}) + }) + } +} + +func TestProgramPowCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test")) + evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData) + packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "mathPow") + + for _, exponentNumBytes := range []uint{1, 2, 10, 32} { + name := fmt.Sprintf("exponentNumBytes%v", exponentNumBytes) + t.Run(name, func(t *testing.T) { + exponent := new(big.Int).Lsh(big.NewInt(1), exponentNumBytes*8-1) + params := []any{big.NewInt(1), exponent} + data, err := packer(params...) + Require(t, err) + evmGasUsage, stylusGasUsage := measureGasUsage(t, builder, evmProgram, stylusProgram, data, nil) + expectedGas := 2.652 + 1.75*float64(exponentNumBytes+1) + t.Logf("evm EXP usage: %v - stylus math_pow usage: %v - expected math_pow usage: %v", + evmGasUsage[vm.EXP][0], stylusGasUsage["math_pow"][0], expectedGas) + // The math_pow HostIO uses significally less gas than the EXP opcode. So, + // instead of comparing it to EVM, we compare it to the expected gas usage + // for each test case. + checkPercentDiff(t, stylusGasUsage["math_pow"][0], expectedGas, 0.001) + }) + } +} + +func TestProgramStorageCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusMulticall := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("multicall")) + evmMulticall := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.MultiCallTestMetaData) + + const numSlots = 42 + rander := testhelpers.NewPseudoRandomDataSource(t, 0) + readData := multicallEmptyArgs() + writeRandAData := multicallEmptyArgs() + writeRandBData := multicallEmptyArgs() + writeZeroData := multicallEmptyArgs() + for i := 0; i < numSlots; i++ { + slot := rander.GetHash() + readData = multicallAppendLoad(readData, slot, false) + writeRandAData = multicallAppendStore(writeRandAData, slot, rander.GetHash(), false) + writeRandBData = multicallAppendStore(writeRandBData, slot, rander.GetHash(), false) + writeZeroData = multicallAppendStore(writeZeroData, slot, common.Hash{}, false) + } + + for _, tc := range []struct { + name string + data []byte + }{ + {"initialWrite", writeRandAData}, + {"read", readData}, + {"writeAgain", writeRandBData}, + {"delete", writeZeroData}, + {"readZeros", readData}, + {"writeAgainAgain", writeRandAData}, + } { + t.Run(tc.name, func(t *testing.T) { + compareGasUsage(t, builder, evmMulticall, stylusMulticall, tc.data, nil, compareGasSum, 0, + compareGasPair{vm.SSTORE, "storage_flush_cache"}, compareGasPair{vm.SLOAD, "storage_load_bytes32"}) + }) + } +} + +func TestProgramLogCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test")) + evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData) + packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "emitLog") + + for ntopics := int8(0); ntopics < 5; ntopics++ { + for _, dataSize := range []uint64{10, 100, 1000} { + name := fmt.Sprintf("emitLog%dData%d", ntopics, dataSize) + t.Run(name, func(t *testing.T) { + args := []any{ + testhelpers.RandomSlice(dataSize), + ntopics, + } + for t := 0; t < 4; t++ { + args = append(args, testhelpers.RandomHash()) + } + data, err := packer(args...) + Require(t, err) + opcode := vm.LOG0 + vm.OpCode(ntopics) + compareGasUsage(t, builder, evmProgram, stylusProgram, data, nil, compareGasForEach, 0, compareGasPair{opcode, "emit_log"}) + }) + } + } + +} + +func TestProgramCallCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusMulticall := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("multicall")) + evmMulticall := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.MultiCallTestMetaData) + otherStylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test")) + otherEvmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData) + packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "msgValue") + otherData, err := packer() + Require(t, err) + + for _, pair := range []compareGasPair{ + {vm.CALL, "call_contract"}, + {vm.DELEGATECALL, "delegate_call_contract"}, + {vm.STATICCALL, "static_call_contract"}, + } { + t.Run(pair.hostio+"/burnGas", func(t *testing.T) { + arbTest := common.HexToAddress("0x0000000000000000000000000000000000000069") + burnArbGas, _ := util.NewCallParser(precompilesgen.ArbosTestABI, "burnArbGas") + burnData, err := burnArbGas(big.NewInt(0)) + Require(t, err) + data := argsForMulticall(pair.opcode, arbTest, nil, burnData) + compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair) + }) + + t.Run(pair.hostio+"/evmContract", func(t *testing.T) { + data := argsForMulticall(pair.opcode, otherEvmProgram, nil, otherData) + compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair, + compareGasPair{vm.RETURNDATACOPY, "read_return_data"}) // also test read_return_data + }) + + t.Run(pair.hostio+"/stylusContract", func(t *testing.T) { + data := argsForMulticall(pair.opcode, otherStylusProgram, nil, otherData) + compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair, + compareGasPair{vm.RETURNDATACOPY, "read_return_data"}) // also test read_return_data + }) + + t.Run(pair.hostio+"/multipleTimes", func(t *testing.T) { + data := multicallEmptyArgs() + for i := 0; i < 9; i++ { + data = multicallAppend(data, pair.opcode, otherEvmProgram, otherData) + } + compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair) + }) + } + + t.Run("call_contract/evmContractWithValue", func(t *testing.T) { + value := big.NewInt(1000) + data := argsForMulticall(vm.CALL, otherEvmProgram, value, otherData) + compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, value, compareGasForEach, 0, compareGasPair{vm.CALL, "call_contract"}) + }) +} + +func TestProgramCreateCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusCreate := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("create")) + evmCreate := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.CreateTestMetaData) + deployCode := common.FromHex(mocksgen.ProgramTestMetaData.Bin) + + t.Run("create1", func(t *testing.T) { + data := []byte{0x01} + data = append(data, (common.Hash{}).Bytes()...) // endowment + data = append(data, deployCode...) + compareGasUsage(t, builder, evmCreate, stylusCreate, data, nil, compareGasForEach, 0, compareGasPair{vm.CREATE, "create1"}) + }) + + t.Run("create2", func(t *testing.T) { + data := []byte{0x02} + data = append(data, (common.Hash{}).Bytes()...) // endowment + data = append(data, (common.HexToHash("beef")).Bytes()...) // salt + data = append(data, deployCode...) + compareGasUsage(t, builder, evmCreate, stylusCreate, data, nil, compareGasForEach, 0, compareGasPair{vm.CREATE2, "create2"}) + }) +} + +func TestProgramKeccakCost(t *testing.T) { + builder := setupGasCostTest(t) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx) + stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test")) + evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData) + packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "keccak") + + for i := 1; i < 5; i++ { + size := uint64(math.Pow10(i)) + name := fmt.Sprintf("keccak%d", size) + t.Run(name, func(t *testing.T) { + preImage := testhelpers.RandomSlice(size) + preImage[len(preImage)-1] = 0 + data, err := packer(preImage) + Require(t, err) + const maxDiff = 2.5 // stylus keccak charges significantly less gas + compareGasUsage(t, builder, evmProgram, stylusProgram, data, nil, compareGasForEach, maxDiff, compareGasPair{vm.KECCAK256, "native_keccak256"}) + }) + } +} + +func setupGasCostTest(t *testing.T) *NodeBuilder { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + t.Cleanup(cleanup) + return builder +} + +// deployEvmContract deploys an Evm contract and return its address. +func deployEvmContract(t *testing.T, ctx context.Context, auth bind.TransactOpts, client *ethclient.Client, metadata *bind.MetaData) common.Address { + t.Helper() + parsed, err := metadata.GetAbi() + Require(t, err) + address, tx, _, err := bind.DeployContract(&auth, *parsed, common.FromHex(metadata.Bin), client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + return address +} + +// measureGasUsage calls an EVM and a Wasm contract passing the same data and the same value. +func measureGasUsage( + t *testing.T, + builder *NodeBuilder, + evmContract common.Address, + stylusContract common.Address, + txData []byte, + txValue *big.Int, +) (map[vm.OpCode][]uint64, map[string][]float64) { + const txGas uint64 = 32_000_000 + txs := []*types.Transaction{ + builder.L2Info.PrepareTxTo("Owner", &evmContract, txGas, txValue, txData), + builder.L2Info.PrepareTxTo("Owner", &stylusContract, txGas, txValue, txData), + } + receipts := builder.L2.SendWaitTestTransactions(t, txs) + + evmGas := receipts[0].GasUsedForL2() + evmGasUsage, err := evmOpcodesGasUsage(builder.ctx, builder.L2.Client.Client(), txs[0]) + Require(t, err) + + stylusGas := receipts[1].GasUsedForL2() + stylusGasUsage, err := stylusHostiosGasUsage(builder.ctx, builder.L2.Client.Client(), txs[1]) + Require(t, err) + + t.Logf("evm total usage: %v - stylus total usage: %v", evmGas, stylusGas) + + return evmGasUsage, stylusGasUsage +} + +type compareGasPair struct { + opcode vm.OpCode + hostio string +} + +type compareGasMode int + +const ( + compareGasForEach compareGasMode = iota + compareGasSum +) + +// compareGasUsage calls measureGasUsage and then it ensures the given opcodes and hostios cost +// roughly the same amount of gas. +func compareGasUsage( + t *testing.T, + builder *NodeBuilder, + evmContract common.Address, + stylusContract common.Address, + txData []byte, + txValue *big.Int, + mode compareGasMode, + maxAllowedDifference float64, + pairs ...compareGasPair, +) { + if evmContract == stylusContract { + Fatal(t, "evm and stylus contract are the same") + } + evmGasUsage, stylusGasUsage := measureGasUsage(t, builder, evmContract, stylusContract, txData, txValue) + for i := range pairs { + opcode := pairs[i].opcode + hostio := pairs[i].hostio + switch mode { + case compareGasForEach: + if len(evmGasUsage[opcode]) != len(stylusGasUsage[hostio]) { + Fatal(t, "mismatch between hostios and opcodes", evmGasUsage, stylusGasUsage) + } + for i := range evmGasUsage[opcode] { + opcodeGas := evmGasUsage[opcode][i] + hostioGas := stylusGasUsage[hostio][i] + t.Logf("evm %v usage: %v - stylus %v usage: %v", opcode, opcodeGas, hostio, hostioGas) + checkPercentDiff(t, float64(opcodeGas), hostioGas, maxAllowedDifference) + } + case compareGasSum: + evmSum := float64(0) + stylusSum := float64(0) + for i := range evmGasUsage[opcode] { + evmSum += float64(evmGasUsage[opcode][i]) + stylusSum += stylusGasUsage[hostio][i] + } + t.Logf("evm %v usage: %v - stylus %v usage: %v", opcode, evmSum, hostio, stylusSum) + checkPercentDiff(t, evmSum, stylusSum, maxAllowedDifference) + } + } +} + +func evmOpcodesGasUsage(ctx context.Context, rpcClient rpc.ClientInterface, tx *types.Transaction) ( + map[vm.OpCode][]uint64, error) { + + var result logger.ExecutionResult + err := rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), nil) + if err != nil { + return nil, fmt.Errorf("failed to trace evm call: %w", err) + } + + gasUsage := map[vm.OpCode][]uint64{} + for i := range result.StructLogs { + op := vm.StringToOp(result.StructLogs[i].Op) + gasUsed := uint64(0) + if op == vm.CALL || op == vm.STATICCALL || op == vm.DELEGATECALL || op == vm.CREATE || op == vm.CREATE2 { + // For the CALL* opcodes, the GasCost in the tracer represents the gas sent + // to the callee contract, which is 63/64 of the remaining gas. This happens + // because the tracer is evaluated before the call is executed, so the EVM + // doesn't know how much gas will being used. + // + // In the case of the Stylus tracer, the trace is emitted after the + // execution, so the EndInk field is set to the ink after the call returned. + // Hence, it also includes the ink spent by the callee contract. + // + // To make a precise comparison between the EVM and Stylus, we modify the + // EVM measurement to include the gas spent by the callee contract. To do + // so, we go through the opcodes after CALL until we find the first opcode + // in the caller's depth. Then, we subtract the gas before the call by the + // gas after the call returned. + var gasAfterCall uint64 + for j := i + 1; j < len(result.StructLogs); j++ { + if result.StructLogs[j].Depth == result.StructLogs[i].Depth { + // back to the original call + gasAfterCall = result.StructLogs[j].Gas + result.StructLogs[j].GasCost + break + } + } + if gasAfterCall == 0 { + return nil, fmt.Errorf("malformed log: didn't get back to call original depth") + } + if i == 0 { + return nil, fmt.Errorf("malformed log: call is first opcode") + } + gasUsed = result.StructLogs[i-1].Gas - gasAfterCall + } else { + gasUsed = result.StructLogs[i].GasCost + } + gasUsage[op] = append(gasUsage[op], gasUsed) + } + return gasUsage, nil +} + +func stylusHostiosGasUsage(ctx context.Context, rpcClient rpc.ClientInterface, tx *types.Transaction) ( + map[string][]float64, error) { + + traceOpts := struct { + Tracer string `json:"tracer"` + }{ + Tracer: "stylusTracer", + } + var result []gethexec.HostioTraceInfo + err := rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), traceOpts) + if err != nil { + return nil, fmt.Errorf("failed to trace stylus call: %w", err) + } + + const InkPerGas = 10000 + gasUsage := map[string][]float64{} + for _, hostioLog := range result { + gasCost := float64(hostioLog.StartInk-hostioLog.EndInk) / InkPerGas + gasUsage[hostioLog.Name] = append(gasUsage[hostioLog.Name], gasCost) + } + return gasUsage, nil +} + +// checkPercentDiff checks whether the two values are close enough. +func checkPercentDiff(t *testing.T, a, b float64, maxAllowedDifference float64) { + t.Helper() + if maxAllowedDifference == 0 { + maxAllowedDifference = 0.25 + } + percentageDifference := (max(a, b) / min(a, b)) - 1 + if percentageDifference > maxAllowedDifference { + Fatal(t, fmt.Sprintf("gas usages are too different; got %v, max allowed is %v", percentageDifference, maxAllowedDifference)) + } +} diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 1cbbf268f1..cf8cd72559 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -417,10 +417,15 @@ func storageTest(t *testing.T, jit bool) { key := testhelpers.RandomHash() value := testhelpers.RandomHash() tx := l2info.PrepareTxTo("Owner", &programAddress, l2info.TransferGas, nil, argsForStorageWrite(key, value)) - ensure(tx, l2client.SendTransaction(ctx, tx)) + receipt := ensure(tx, l2client.SendTransaction(ctx, tx)) + assertStorageAt(t, ctx, l2client, programAddress, key, value) validateBlocks(t, 2, jit, builder) + + // Captures a block_input_.json file for the block that included the + // storage write transaction. + recordBlock(t, receipt.BlockNumber.Uint64(), builder) } func TestProgramTransientStorage(t *testing.T) { diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index 2739c7545e..912b48ea6a 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -96,10 +96,6 @@ func (s *mockSpawner) LatestWasmModuleRoot() containers.PromiseInterface[common. return containers.NewReadyPromise[common.Hash](mockWasmModuleRoots[0], nil) } -func (s *mockSpawner) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] { - return containers.NewReadyPromise[struct{}](struct{}{}, nil) -} - type mockValRun struct { containers.Promise[validator.GoGlobalState] root common.Hash diff --git a/system_tests/wrap_transaction_test.go b/system_tests/wrap_transaction_test.go index bd561ad5e5..36052fb2db 100644 --- a/system_tests/wrap_transaction_test.go +++ b/system_tests/wrap_transaction_test.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" @@ -22,7 +23,7 @@ import ( "github.com/offchainlabs/nitro/util/headerreader" ) -func GetPendingBlockNumber(ctx context.Context, client arbutil.L1Interface) (*big.Int, error) { +func GetPendingBlockNumber(ctx context.Context, client *ethclient.Client) (*big.Int, error) { // Attempt to get the block number from ArbSys, if it exists arbSys, err := precompilesgen.NewArbSys(common.BigToAddress(big.NewInt(100)), client) if err != nil { @@ -37,7 +38,7 @@ func GetPendingBlockNumber(ctx context.Context, client arbutil.L1Interface) (*bi } // Will wait until txhash is in the blockchain and return its receipt -func WaitForTx(ctxinput context.Context, client arbutil.L1Interface, txhash common.Hash, timeout time.Duration) (*types.Receipt, error) { +func WaitForTx(ctxinput context.Context, client *ethclient.Client, txhash common.Hash, timeout time.Duration) (*types.Receipt, error) { ctx, cancel := context.WithTimeout(ctxinput, timeout) defer cancel() @@ -75,11 +76,11 @@ func WaitForTx(ctxinput context.Context, client arbutil.L1Interface, txhash comm } } -func EnsureTxSucceeded(ctx context.Context, client arbutil.L1Interface, tx *types.Transaction) (*types.Receipt, error) { +func EnsureTxSucceeded(ctx context.Context, client *ethclient.Client, tx *types.Transaction) (*types.Receipt, error) { return EnsureTxSucceededWithTimeout(ctx, client, tx, time.Second*5) } -func EnsureTxSucceededWithTimeout(ctx context.Context, client arbutil.L1Interface, tx *types.Transaction, timeout time.Duration) (*types.Receipt, error) { +func EnsureTxSucceededWithTimeout(ctx context.Context, client *ethclient.Client, tx *types.Transaction, timeout time.Duration) (*types.Receipt, error) { receipt, err := WaitForTx(ctx, client, tx.Hash(), timeout) if err != nil { return nil, fmt.Errorf("waitFoxTx (tx=%s) got: %w", tx.Hash().Hex(), err) @@ -103,12 +104,12 @@ func EnsureTxSucceededWithTimeout(ctx context.Context, client arbutil.L1Interfac return receipt, arbutil.DetailTxError(ctx, client, tx, receipt) } -func EnsureTxFailed(t *testing.T, ctx context.Context, client arbutil.L1Interface, tx *types.Transaction) *types.Receipt { +func EnsureTxFailed(t *testing.T, ctx context.Context, client *ethclient.Client, tx *types.Transaction) *types.Receipt { t.Helper() return EnsureTxFailedWithTimeout(t, ctx, client, tx, time.Second*5) } -func EnsureTxFailedWithTimeout(t *testing.T, ctx context.Context, client arbutil.L1Interface, tx *types.Transaction, timeout time.Duration) *types.Receipt { +func EnsureTxFailedWithTimeout(t *testing.T, ctx context.Context, client *ethclient.Client, tx *types.Transaction, timeout time.Duration) *types.Receipt { t.Helper() receipt, err := WaitForTx(ctx, client, tx.Hash(), timeout) Require(t, err) diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index 160323cf60..4831994bba 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -18,8 +18,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/util/pretty" @@ -28,7 +28,7 @@ import ( ) type BlobClient struct { - ec arbutil.L1Interface + ec *ethclient.Client beaconUrl *url.URL secondaryBeaconUrl *url.URL httpClient *http.Client @@ -63,7 +63,7 @@ func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".authorization", DefaultBlobClientConfig.Authorization, "Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters") } -func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { +func NewBlobClient(config BlobClientConfig, ec *ethclient.Client) (*BlobClient, error) { beaconUrl, err := url.Parse(config.BeaconUrl) if err != nil { return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index c8041dc871..98f778dee8 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" @@ -33,7 +34,7 @@ type ArbSysInterface interface { type HeaderReader struct { stopwaiter.StopWaiter config ConfigFetcher - client arbutil.L1Interface + client *ethclient.Client isParentChainArbitrum bool arbSys ArbSysInterface @@ -120,7 +121,7 @@ var TestConfig = Config{ }, } -func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { +func New(ctx context.Context, client *ethclient.Client, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { isParentChainArbitrum := false var arbSys ArbSysInterface if arbSysPrecompile != nil { @@ -522,7 +523,7 @@ func (s *HeaderReader) LatestFinalizedBlockNr(ctx context.Context) (uint64, erro return header.Number.Uint64(), nil } -func (s *HeaderReader) Client() arbutil.L1Interface { +func (s *HeaderReader) Client() *ethclient.Client { return s.client } diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index 3b18ad1851..934362f00a 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -188,19 +188,6 @@ func (c *ExecutionClient) LatestWasmModuleRoot() containers.PromiseInterface[com }) } -func (c *ExecutionClient) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] { - jsonInput := server_api.ValidationInputToJson(input) - if err := jsonInput.WriteToFile(); err != nil { - return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) { - return struct{}{}, err - }) - } - return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) { - err := c.client.CallContext(ctx, nil, server_api.Namespace+"_writeToFile", jsonInput, expOut, moduleRoot) - return struct{}{}, err - }) -} - func (r *ExecutionClientRun) SendKeepAlive(ctx context.Context) time.Duration { err := r.client.client.CallContext(ctx, nil, server_api.Namespace+"_execKeepAlive", r.id) if err != nil { diff --git a/validator/inputs/writer.go b/validator/inputs/writer.go new file mode 100644 index 0000000000..a45e584f52 --- /dev/null +++ b/validator/inputs/writer.go @@ -0,0 +1,141 @@ +package inputs + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/offchainlabs/nitro/validator/server_api" +) + +// Writer is a configurable writer of InputJSON files. +// +// The default Writer will write to a path like: +// +// $HOME/.arbuitrum/validation-inputs//block_inputs_.json +// +// The path can be nested under a slug directory so callers can provide a +// recognizable name to differentiate various contexts in which the InputJSON +// is being written. If the Writer is configured by calling SetSlug, then the +// path will be like: +// +// $HOME/.arbuitrum/validation-inputs///block_inputs_.json +// +// The inclusion of a timestamp directory is on by default to avoid conflicts which +// would result in files being overwritten. However, the Writer can be configured +// to not use a timestamp directory. If the Writer is configured by calling +// SetUseTimestampDir(false), then the path will be like: +// +// $HOME/.arbuitrum/validation-inputs//block_inputs_.json +// +// Finally, to give complete control to the clients, the base directory can be +// set directly with SetBaseDir. In which case, the path will be like: +// +// /block_inputs_.json +// or +// //block_inputs_.json +// or +// ///block_inputs_.json +type Writer struct { + clock Clock + baseDir string + slug string + useTimestampDir bool +} + +// WriterOption is a function that configures a Writer. +type WriterOption func(*Writer) + +// Clock is an interface for getting the current time. +type Clock interface { + Now() time.Time +} + +type realClock struct{} + +func (realClock) Now() time.Time { + return time.Now() +} + +// NewWriter creates a new Writer with default settings. +func NewWriter(options ...WriterOption) (*Writer, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return nil, err + } + baseDir := filepath.Join(homeDir, ".arbitrum", "validation-inputs") + w := &Writer{ + clock: realClock{}, + baseDir: baseDir, + slug: "", + useTimestampDir: true, + } + for _, o := range options { + o(w) + } + return w, nil +} + +// withTestClock configures the Writer to use the given clock. +// +// This is only intended for testing. +func withTestClock(clock Clock) WriterOption { + return func(w *Writer) { + w.clock = clock + } +} + +// WithSlug configures the Writer to use the given slug as a directory name. +func WithSlug(slug string) WriterOption { + return func(w *Writer) { + w.slug = slug + } +} + +// WithoutSlug clears the slug configuration. +// +// This is equivalent to the WithSlug("") option but is more readable. +func WithoutSlug() WriterOption { + return WithSlug("") +} + +// WithBaseDir configures the Writer to use the given base directory. +func WithBaseDir(baseDir string) WriterOption { + return func(w *Writer) { + w.baseDir = baseDir + } +} + +// WithTimestampDirEnabled controls the addition of a timestamp directory. +func WithTimestampDirEnabled(useTimestampDir bool) WriterOption { + return func(w *Writer) { + w.useTimestampDir = useTimestampDir + } +} + +// Write writes the given InputJSON to a file in JSON format. +func (w *Writer) Write(json *server_api.InputJSON) error { + dir := w.baseDir + if w.slug != "" { + dir = filepath.Join(dir, w.slug) + } + if w.useTimestampDir { + t := w.clock.Now() + tStr := t.Format("20060102_150405") + dir = filepath.Join(dir, tStr) + } + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + contents, err := json.Marshal() + if err != nil { + return err + } + if err = os.WriteFile( + filepath.Join(dir, fmt.Sprintf("block_inputs_%d.json", json.Id)), + contents, 0600); err != nil { + return err + } + return nil +} diff --git a/validator/inputs/writer_test.go b/validator/inputs/writer_test.go new file mode 100644 index 0000000000..59cb63dae7 --- /dev/null +++ b/validator/inputs/writer_test.go @@ -0,0 +1,92 @@ +package inputs + +import ( + "os" + "testing" + "time" + + "github.com/offchainlabs/nitro/validator/server_api" +) + +func TestDefaultBaseDir(t *testing.T) { + // Simply testing that the default baseDir is set relative to the user's home directory. + // This way, the other tests can all override the baseDir to a temporary directory. + w, err := NewWriter() + if err != nil { + t.Fatal(err) + } + homeDir, err := os.UserHomeDir() + if err != nil { + t.Fatal(err) + } + if w.baseDir != homeDir+"/.arbitrum/validation-inputs" { + t.Errorf("unexpected baseDir: %v", w.baseDir) + } +} + +type fakeClock struct { + now time.Time +} + +func (c fakeClock) Now() time.Time { + return c.now +} + +func TestWriting(t *testing.T) { + dir := t.TempDir() + w, err := NewWriter( + withTestClock(fakeClock{now: time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)}), + WithBaseDir(dir), + ) + if err != nil { + t.Fatal(err) + } + err = w.Write(&server_api.InputJSON{Id: 24601}) + if err != nil { + t.Fatal(err) + } + // The file should exist. + if _, err := os.Stat(dir + "/20210102_030405/block_inputs_24601.json"); err != nil { + t.Error(err) + } +} + +func TestWritingWithSlug(t *testing.T) { + dir := t.TempDir() + w, err := NewWriter( + withTestClock(fakeClock{now: time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)}), + WithBaseDir(dir), + WithSlug("foo"), + ) + if err != nil { + t.Fatal(err) + } + err = w.Write(&server_api.InputJSON{Id: 24601}) + if err != nil { + t.Fatal(err) + } + // The file should exist. + if _, err := os.Stat(dir + "/foo/20210102_030405/block_inputs_24601.json"); err != nil { + t.Error(err) + } +} + +func TestWritingWithoutTimestampDir(t *testing.T) { + dir := t.TempDir() + w, err := NewWriter( + withTestClock(fakeClock{now: time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)}), + WithBaseDir(dir), + WithTimestampDirEnabled(false), + ) + if err != nil { + t.Fatal(err) + } + err = w.Write(&server_api.InputJSON{Id: 24601}) + if err != nil { + t.Fatal(err) + } + // The file should exist. + if _, err := os.Stat(dir + "/block_inputs_24601.json"); err != nil { + t.Error(err) + } +} diff --git a/validator/interface.go b/validator/interface.go index af08629137..9fb831ca0d 100644 --- a/validator/interface.go +++ b/validator/interface.go @@ -27,7 +27,6 @@ type ExecutionSpawner interface { ValidationSpawner CreateExecutionRun(wasmModuleRoot common.Hash, input *ValidationInput) containers.PromiseInterface[ExecutionRun] LatestWasmModuleRoot() containers.PromiseInterface[common.Hash] - WriteToFile(input *ValidationInput, expOut GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] } type ExecutionRun interface { diff --git a/validator/server_api/json.go b/validator/server_api/json.go index 6fe936e17d..8dfbc8446a 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -8,7 +8,6 @@ import ( "encoding/json" "errors" "fmt" - "os" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -68,15 +67,9 @@ type InputJSON struct { DebugChain bool } -func (i *InputJSON) WriteToFile() error { - contents, err := json.MarshalIndent(i, "", " ") - if err != nil { - return err - } - if err = os.WriteFile(fmt.Sprintf("block_inputs_%d.json", i.Id), contents, 0600); err != nil { - return err - } - return nil +// Marshal returns the JSON encoding of the InputJSON. +func (i *InputJSON) Marshal() ([]byte, error) { + return json.MarshalIndent(i, "", " ") } type BatchInfoJson struct { diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 6f0d0cee1d..07971e2ba5 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -2,11 +2,8 @@ package server_arb import ( "context" - "encoding/binary" "errors" "fmt" - "os" - "path/filepath" "runtime" "sync/atomic" "time" @@ -98,7 +95,7 @@ func (s *ArbitratorSpawner) Name() string { return "arbitrator" } -func (v *ArbitratorSpawner) loadEntryToMachine(ctx context.Context, entry *validator.ValidationInput, mach *ArbitratorMachine) error { +func (v *ArbitratorSpawner) loadEntryToMachine(_ context.Context, entry *validator.ValidationInput, mach *ArbitratorMachine) error { resolver := func(ty arbutil.PreimageType, hash common.Hash) ([]byte, error) { // Check if it's a known preimage if preimage, ok := entry.Preimages[ty][hash]; ok { @@ -192,6 +189,7 @@ func (v *ArbitratorSpawner) execute( } func (v *ArbitratorSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { + println("LAUCHING ARBITRATOR VALIDATION") v.count.Add(1) promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](v, func(ctx context.Context) (validator.GoGlobalState, error) { defer v.count.Add(-1) @@ -208,139 +206,6 @@ func (v *ArbitratorSpawner) Room() int { return avail } -var launchTime = time.Now().Format("2006_01_02__15_04") - -//nolint:gosec -func (v *ArbitratorSpawner) writeToFile(ctx context.Context, input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) error { - outDirPath := filepath.Join(v.locator.RootPath(), v.config().OutputPath, launchTime, fmt.Sprintf("block_%d", input.Id)) - err := os.MkdirAll(outDirPath, 0755) - if err != nil { - return err - } - if ctx.Err() != nil { - return ctx.Err() - } - - rootPathAssign := "" - if executable, err := os.Executable(); err == nil { - rootPathAssign = "ROOTPATH=\"" + filepath.Dir(executable) + "\"\n" - } - cmdFile, err := os.OpenFile(filepath.Join(outDirPath, "run-prover.sh"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755) - if err != nil { - return err - } - defer cmdFile.Close() - _, err = cmdFile.WriteString("#!/bin/bash\n" + - fmt.Sprintf("# expected output: batch %d, postion %d, hash %s\n", expOut.Batch, expOut.PosInBatch, expOut.BlockHash) + - "MACHPATH=\"" + v.locator.GetMachinePath(moduleRoot) + "\"\n" + - rootPathAssign + - "if (( $# > 1 )); then\n" + - " if [[ $1 == \"-m\" ]]; then\n" + - " MACHPATH=$2\n" + - " shift\n" + - " shift\n" + - " fi\n" + - "fi\n" + - "${ROOTPATH}/bin/prover ${MACHPATH}/replay.wasm") - if err != nil { - return err - } - if ctx.Err() != nil { - return ctx.Err() - } - - libraries := []string{"soft-float.wasm", "wasi_stub.wasm", "go_stub.wasm", "host_io.wasm", "brotli.wasm"} - for _, module := range libraries { - _, err = cmdFile.WriteString(" -l " + "${MACHPATH}/" + module) - if err != nil { - return err - } - } - _, err = cmdFile.WriteString(fmt.Sprintf(" --inbox-position %d --position-within-message %d --last-block-hash %s", input.StartState.Batch, input.StartState.PosInBatch, input.StartState.BlockHash)) - if err != nil { - return err - } - - for _, msg := range input.BatchInfo { - if ctx.Err() != nil { - return ctx.Err() - } - sequencerFileName := fmt.Sprintf("sequencer_%d.bin", msg.Number) - err = os.WriteFile(filepath.Join(outDirPath, sequencerFileName), msg.Data, 0644) - if err != nil { - return err - } - _, err = cmdFile.WriteString(" --inbox " + sequencerFileName) - if err != nil { - return err - } - } - - preimageFile, err := os.Create(filepath.Join(outDirPath, "preimages.bin")) - if err != nil { - return err - } - defer preimageFile.Close() - for ty, preimages := range input.Preimages { - _, err = preimageFile.Write([]byte{byte(ty)}) - if err != nil { - return err - } - for _, data := range preimages { - if ctx.Err() != nil { - return ctx.Err() - } - lenbytes := make([]byte, 8) - binary.LittleEndian.PutUint64(lenbytes, uint64(len(data))) - _, err := preimageFile.Write(lenbytes) - if err != nil { - return err - } - _, err = preimageFile.Write(data) - if err != nil { - return err - } - } - } - - _, err = cmdFile.WriteString(" --preimages preimages.bin") - if err != nil { - return err - } - - if input.HasDelayedMsg { - if ctx.Err() != nil { - return ctx.Err() - } - _, err = cmdFile.WriteString(fmt.Sprintf(" --delayed-inbox-position %d", input.DelayedMsgNr)) - if err != nil { - return err - } - filename := fmt.Sprintf("delayed_%d.bin", input.DelayedMsgNr) - err = os.WriteFile(filepath.Join(outDirPath, filename), input.DelayedMsg, 0644) - if err != nil { - return err - } - _, err = cmdFile.WriteString(fmt.Sprintf(" --delayed-inbox %s", filename)) - if err != nil { - return err - } - } - - _, err = cmdFile.WriteString(" \"$@\"\n") - if err != nil { - return err - } - return nil -} - -func (v *ArbitratorSpawner) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] { - return stopwaiter.LaunchPromiseThread[struct{}](v, func(ctx context.Context) (struct{}, error) { - err := v.writeToFile(ctx, input, expOut, moduleRoot) - return struct{}{}, err - }) -} - func (v *ArbitratorSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] { getMachine := func(ctx context.Context) (MachineInterface, error) { initialFrozenMachine, err := v.machineLoader.GetZeroStepMachine(ctx, wasmModuleRoot) diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index 2bea75fbe9..0748101277 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -33,7 +33,7 @@ type JitMachine struct { maxExecutionTime time.Duration } -func createJitMachine(jitBinary string, binaryPath string, cranelift bool, wasmMemoryUsageLimit int, maxExecutionTime time.Duration, moduleRoot common.Hash, fatalErrChan chan error) (*JitMachine, error) { +func createJitMachine(jitBinary string, binaryPath string, cranelift bool, wasmMemoryUsageLimit int, maxExecutionTime time.Duration, _ common.Hash, fatalErrChan chan error) (*JitMachine, error) { invocation := []string{"--binary", binaryPath, "--forks"} if cranelift { invocation = append(invocation, "--cranelift") diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index a10d931dfc..ef3e1b2c49 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -118,15 +118,6 @@ func (a *ExecServerAPI) Start(ctx_in context.Context) { a.CallIteratively(a.removeOldRuns) } -func (a *ExecServerAPI) WriteToFile(ctx context.Context, jsonInput *server_api.InputJSON, expOut validator.GoGlobalState, moduleRoot common.Hash) error { - input, err := server_api.ValidationInputFromJson(jsonInput) - if err != nil { - return err - } - _, err = a.execSpawner.WriteToFile(input, expOut, moduleRoot).Await(ctx) - return err -} - var errRunNotFound error = errors.New("run not found") func (a *ExecServerAPI) getRun(id uint64) (validator.ExecutionRun, error) { From 67e09f35682ce40332c9983f4f7cd3b4a016652a Mon Sep 17 00:00:00 2001 From: Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:56:22 +0200 Subject: [PATCH 35/41] system_tests: use wasmCacheTag when calling WrapDatabaseWithWasm Co-authored-by: Diego Ximenes Mendes --- system_tests/common_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index b5e71e3385..807d39d082 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -1420,7 +1420,7 @@ func Create2ndNodeWithConfig( Require(t, err) wasmData, err := chainStack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 1, execConfig.StylusTarget.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, wasmCacheTag, execConfig.StylusTarget.WasmTargets()) arbDb, err := chainStack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) From b178d70275e2bc0fe1d4701fb8715cddfc03c8e1 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 8 Oct 2024 15:06:44 +0200 Subject: [PATCH 36/41] fix names of stylus cache metrics --- arbos/programs/native.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 5baacea381..f1fde5c556 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -47,16 +47,16 @@ type rustBytes = C.RustBytes type rustSlice = C.RustSlice var ( - stylusLRUCacheSizeBytesGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/lru/size_bytes", nil) - stylusLRUCacheSizeCountGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/lru/count", nil) - stylusLRUCacheSizeHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/hits", nil) - stylusLRUCacheSizeMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/misses", nil) - stylusLRUCacheSizeDoesNotFitCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/does_not_fit", nil) - - stylusLongTermCacheSizeBytesGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/long_term/size_bytes", nil) - stylusLongTermCacheSizeCountGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/long_term/count", nil) - stylusLongTermCacheSizeHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/long_term/hits", nil) - stylusLongTermCacheSizeMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/long_term/misses", nil) + stylusLRUCacheSizeBytesGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/lru/size_bytes", nil) + stylusLRUCacheCountGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/lru/count", nil) + stylusLRUCacheHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/hits", nil) + stylusLRUCacheMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/misses", nil) + stylusLRUCacheDoesNotFitCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/does_not_fit", nil) + + stylusLongTermCacheSizeBytesGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/long_term/size_bytes", nil) + stylusLongTermCacheCountGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/long_term/count", nil) + stylusLongTermCacheHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/long_term/hits", nil) + stylusLongTermCacheMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/long_term/misses", nil) ) func activateProgram( @@ -342,15 +342,15 @@ func UpdateWasmCacheMetrics() { metrics := C.stylus_get_cache_metrics() stylusLRUCacheSizeBytesGauge.Update(int64(metrics.lru.size_bytes)) - stylusLRUCacheSizeCountGauge.Update(int64(metrics.lru.count)) - stylusLRUCacheSizeHitsCounter.Inc(int64(metrics.lru.hits)) - stylusLRUCacheSizeMissesCounter.Inc(int64(metrics.lru.misses)) - stylusLRUCacheSizeDoesNotFitCounter.Inc(int64(metrics.lru.does_not_fit)) + stylusLRUCacheCountGauge.Update(int64(metrics.lru.count)) + stylusLRUCacheHitsCounter.Inc(int64(metrics.lru.hits)) + stylusLRUCacheMissesCounter.Inc(int64(metrics.lru.misses)) + stylusLRUCacheDoesNotFitCounter.Inc(int64(metrics.lru.does_not_fit)) stylusLongTermCacheSizeBytesGauge.Update(int64(metrics.long_term.size_bytes)) - stylusLongTermCacheSizeCountGauge.Update(int64(metrics.long_term.count)) - stylusLongTermCacheSizeHitsCounter.Inc(int64(metrics.long_term.hits)) - stylusLongTermCacheSizeMissesCounter.Inc(int64(metrics.long_term.misses)) + stylusLongTermCacheCountGauge.Update(int64(metrics.long_term.count)) + stylusLongTermCacheHitsCounter.Inc(int64(metrics.long_term.hits)) + stylusLongTermCacheMissesCounter.Inc(int64(metrics.long_term.misses)) } // Used for testing From c6619a5d3335433a2294c05f627f38d1b897d9c7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 8 Oct 2024 15:12:19 +0200 Subject: [PATCH 37/41] fix build2ndNode --- system_tests/common_test.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 807d39d082..1cde8fd7bc 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -86,12 +86,13 @@ import ( type info = *BlockchainTestInfo type SecondNodeParams struct { - nodeConfig *arbnode.Config - execConfig *gethexec.Config - stackConfig *node.Config - dasConfig *das.DataAvailabilityConfig - initData *statetransfer.ArbosInitializationInfo - addresses *chaininfo.RollupAddresses + nodeConfig *arbnode.Config + execConfig *gethexec.Config + stackConfig *node.Config + dasConfig *das.DataAvailabilityConfig + initData *statetransfer.ArbosInitializationInfo + addresses *chaininfo.RollupAddresses + wasmCacheTag uint32 } type TestClient struct { @@ -717,7 +718,7 @@ func build2ndNode( testClient := NewTestClient(ctx) testClient.Client, testClient.ConsensusNode = - Create2ndNodeWithConfig(t, ctx, firstNodeTestClient.ConsensusNode, parentChainTestClient.Stack, parentChainInfo, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, valnodeConfig, params.addresses, initMessage) + Create2ndNodeWithConfig(t, ctx, firstNodeTestClient.ConsensusNode, parentChainTestClient.Stack, parentChainInfo, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, valnodeConfig, params.addresses, initMessage, params.wasmCacheTag) testClient.ExecNode = getExecNode(t, testClient.ConsensusNode) testClient.cleanup = func() { testClient.ConsensusNode.StopAndWait() } return testClient, func() { testClient.cleanup() } @@ -1399,6 +1400,7 @@ func Create2ndNodeWithConfig( valnodeConfig *valnode.Config, addresses *chaininfo.RollupAddresses, initMessage *arbostypes.ParsedInitMessage, + wasmCacheTag uint32, ) (*ethclient.Client, *arbnode.Node) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() From 495ade6413f6984391dd7b099f11e1309f467160 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 8 Oct 2024 15:20:05 +0200 Subject: [PATCH 38/41] use fixed arbos tag in stylus_clear_long_term_cache as it's only for testing --- arbitrator/stylus/src/lib.rs | 6 +++--- arbos/programs/native.go | 4 ++-- system_tests/program_test.go | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index feac828989..a6be21f7bf 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -377,11 +377,11 @@ pub extern "C" fn stylus_clear_lru_cache() { InitCache::clear_lru_cache() } -/// Clears long term cache. +/// Clears long term cache (for arbos_tag = 1) /// Only used for testing purposes. #[no_mangle] -pub extern "C" fn stylus_clear_long_term_cache(arbos_tag: u32) { - InitCache::clear_long_term(arbos_tag); +pub extern "C" fn stylus_clear_long_term_cache() { + InitCache::clear_long_term(1); } /// Gets entry size in bytes. diff --git a/arbos/programs/native.go b/arbos/programs/native.go index f1fde5c556..e5c2632667 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -393,8 +393,8 @@ func ClearWasmLruCache() { } // Used for testing -func ClearWasmLongTermCache(arbos_tag uint32) { - C.stylus_clear_long_term_cache(u32(arbos_tag)) +func ClearWasmLongTermCache() { + C.stylus_clear_long_term_cache() } // Used for testing diff --git a/system_tests/program_test.go b/system_tests/program_test.go index aab207e0f6..fac459118b 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2183,7 +2183,7 @@ func TestWasmLongTermCache(t *testing.T) { ownerAuth.Value = common.Big0 - programs.ClearWasmLongTermCache(1) + programs.ClearWasmLongTermCache() checkLongTermCacheMetrics(t, programs.WasmLongTermCacheMetrics{ Count: 0, SizeBytes: 0, @@ -2312,7 +2312,7 @@ func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { ownerAuth.Value = common.Big0 - programs.ClearWasmLongTermCache(1) + programs.ClearWasmLongTermCache() programs.ClearWasmLruCache() // only 2 out of 3 programs should fit lru programs.SetWasmLruCacheCapacity( @@ -2339,7 +2339,7 @@ func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { }) // clear long term cache to emulate restart - programs.ClearWasmLongTermCache(1) + programs.ClearWasmLongTermCache() programs.ClearWasmLruCache() checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ From dca2484643f48dad1bf59706c2eace8fea9c075c Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 8 Oct 2024 15:36:31 +0200 Subject: [PATCH 39/41] program_test: add entry sizes sanity check --- system_tests/program_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index fac459118b..fa07e3b5f1 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2074,11 +2074,14 @@ func TestWasmLruCache(t *testing.T) { keccakProgramAddress, keccakEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "keccak") mathProgramAddress, mathEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, auth, "math") t.Log( - "lruEntrySizeEstimateBytes, ", + "entrySizeEstimateBytes, ", "fallible:", fallibleEntrySize, "keccak:", keccakEntrySize, "math:", mathEntrySize, ) + if fallibleEntrySize == keccakEntrySize || fallibleEntrySize == mathEntrySize || keccakEntrySize == mathEntrySize { + Fatal(t, "at least two programs have the same entry size") + } programs.ClearWasmLruCache() checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ @@ -2175,11 +2178,14 @@ func TestWasmLongTermCache(t *testing.T) { keccakProgramAddress, keccakEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "keccak") mathProgramAddress, mathEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "math") t.Log( - "lruEntrySizeEstimateBytes, ", + "entrySizeEstimateBytes, ", "fallible:", fallibleEntrySize, "keccak:", keccakEntrySize, "math:", mathEntrySize, ) + if fallibleEntrySize == keccakEntrySize || fallibleEntrySize == mathEntrySize || keccakEntrySize == mathEntrySize { + Fatal(t, "at least two programs have the same entry size") + } ownerAuth.Value = common.Big0 @@ -2309,6 +2315,9 @@ func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { fallibleProgramAddress, fallibleEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "fallible") keccakProgramAddress, keccakEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "keccak") mathProgramAddress, mathEntrySize := deployWasmAndGetEntrySizeEstimateBytes(t, builder, ownerAuth, "math") + if fallibleEntrySize == keccakEntrySize || fallibleEntrySize == mathEntrySize || keccakEntrySize == mathEntrySize { + Fatal(t, "at least two programs have the same entry size") + } ownerAuth.Value = common.Big0 From 737ffb1fe659a9d4e618d46a22d24de466338a0c Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 8 Oct 2024 15:54:47 +0200 Subject: [PATCH 40/41] program_test: add comment --- system_tests/program_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index fa07e3b5f1..4755096b26 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2366,6 +2366,7 @@ func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { Require(t, err) // restore nonce in L2Info builder.L2Info.GetInfoWithPrivKey("Owner").Nonce.Store(nonce) + // fallibleProgram should be added only to lru cache as the api call should be processed with wasm cache tag = 0 checkLruCacheMetrics(t, programs.WasmLruCacheMetrics{ Count: 1, SizeBytes: fallibleEntrySize, From 6396a775ef12a9f0ff517b0f39a4f13ee02f0a0c Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 9 Oct 2024 14:03:06 +0200 Subject: [PATCH 41/41] count long term cache misses only when cache tag is 1 --- arbitrator/stylus/src/cache.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 6192a30eff..5df4c62f7a 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -177,7 +177,10 @@ impl InitCache { cache.long_term_counters.hits += 1; return Some(data); } - cache.long_term_counters.misses += 1; + if long_term_tag == Self::ARBOS_TAG { + // only count misses only when we can expect to find the item in long term cache + cache.long_term_counters.misses += 1; + } // See if the item is in the LRU cache, promoting if so if let Some(item) = cache.lru.peek(&key).cloned() {