diff --git a/LICENSE b/LICENSE.md similarity index 78% rename from LICENSE rename to LICENSE.md index 0739c2d8bf..ea9a53da75 100644 --- a/LICENSE +++ b/LICENSE.md @@ -10,27 +10,28 @@ Parameters Licensor: Offchain Labs Licensed Work: Arbitrum Nitro - The Licensed Work is (c) 2021-2023 Offchain Labs + The Licensed Work is (c) 2021-2024 Offchain Labs Additional Use Grant: You may use the Licensed Work in a production environment solely to provide a point of interface to permit end users or applications utilizing the Covered Arbitrum Chains to interact and query the state of a Covered Arbitrum Chain, including without limitation - validating the correctness of the posted chain state. For purposes - of this Additional Use Grant, the "Covered Arbitrum Chains" are - means (a) Arbitrum One (chainid:42161), Arbitrum Nova (chainid:42170), - Arbitrum Rinkeby testnet/Rinkarby (chainid:421611), and - Arbitrum Nitro Goerli testnet (chainid:421613) (b) any future - blockchains authorized to be designated as Covered Arbitrum Chains - by the decentralized autonomous organization governing the Arbitrum - network; and (c) any “Layer 3” Arbitrum-based blockchain that is built - on and settles to another Covered Arbitrum Chain. - - - - - -Change Date: Dec 31, 2027 + validating the correctness of the posted chain state, or to deploy + and operate (x) a blockchain that settles to a Covered Arbitrum Chain + or (y) a blockchain in accordance with, and subject to, the [Arbitrum + Expansion Program Term of Use](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf). For purposes of this + Additional Use Grant, the "Covered Arbitrum Chains" are + (a) Arbitrum One (chainid:42161), Arbitrum Nova (chainid:42170), + rbitrum Rinkeby testnet/Rinkarby (chainid:421611),Arbitrum Nitro + Goerli testnet (chainid:421613), and Arbitrum Sepolia Testnet + (chainid:421614); (b) any future blockchains authorized to be + designated as Covered Arbitrum Chains by the decentralized autonomous + organization governing the Arbitrum network; and (c) any “Layer 3” + Arbitrum-based blockchain that is built on and settles to another + Covered Arbitrum Chain. + + +Change Date: Dec 31, 2028 Change License: Apache License Version 2.0 diff --git a/Makefile b/Makefile index f03f6d5860..003732551e 100644 --- a/Makefile +++ b/Makefile @@ -310,38 +310,6 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run ./linter/recursivelock ./... - go run ./linter/comparesame ./... - - # Disabled since we have a lot of use of math/rand package. - # We should probably move to crypto/rand at some point even though most of - # our uses doesn't seem to be security sensitive. - # TODO fix this and enable. - # go run ./linter/cryptorand ./... - - # This yields lot of legitimate warnings, most of which in practice would - # probably never happen. - # # TODO fix this and enable. - # go run ./linter/errcheck ./... - - go run ./linter/featureconfig ./... - - # Disabled since we have high cognitive complexity several places. - # TODO fix this and enable. - # go run ./linter/gocognit ./... - - go run ./linter/ineffassign ./... - go run ./linter/interfacechecker ./... - go run ./linter/logruswitherror ./... - - go run ./linter/shadowpredecl ./... - go run ./linter/slicedirect ./... - - # Disabled since it fails many places, although ones I looked into seem - # to be false positives logically. - # TODO fix this and enable and mark false positives with lint ignore. - # go run ./linter/uintcast ./... - go run ./linter/koanf ./... go run ./linter/pointercheck ./... golangci-lint run --fix diff --git a/README.md b/README.md index 67a182ec30..4a522be82f 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,11 @@ Arbitrum One successfully migrated from the Classic Arbitrum stack onto Nitro on ## License -We currently have Nitro [licensed](./LICENSE) under a Business Source License, similar to our friends at Uniswap and Aave, with an "Additional Use Grant" to ensure that everyone can have full comfort using and running nodes on all public Arbitrum chains. +Nitro is currently licensed under a [Business Source License](./LICENSE), similar to our friends at Uniswap and Aave, with an "Additional Use Grant" to ensure that everyone can have full comfort using and running nodes on all public Arbitrum chains. + +The Additional Use Grant also permits the deployment of the Nitro software, in a permissionless fashion and without cost, as a new blockchain provided that the chain settles to either Arbitrum One or Arbitrum Nova. + +For those that prefer to deploy the Nitro software either directly on Ethereum (i.e. an L2) or have it settle to another Layer-2 on top of Ethereum, the [Arbitrum Expansion Program (the "AEP")](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf) was recently established. The AEP allows for the permissionless deployment in the aforementioned fashion provided that 10% of net revenue (as more fully described in the AEP) is contributed back to the Arbitrum community in accordance with the requirements of the AEP. ## Contact diff --git a/arbitrator/jit/src/machine.rs b/arbitrator/jit/src/machine.rs index db25a0dee7..cd3420e27e 100644 --- a/arbitrator/jit/src/machine.rs +++ b/arbitrator/jit/src/machine.rs @@ -284,7 +284,7 @@ impl WasmEnv { Ok(env) } - pub fn send_results(&mut self, error: Option) { + pub fn send_results(&mut self, error: Option, memory_used: u64) { let writer = match &mut self.process.socket { Some((writer, _)) => writer, None => return, @@ -311,6 +311,7 @@ impl WasmEnv { check!(socket::write_u64(writer, self.small_globals[1])); check!(socket::write_bytes32(writer, &self.large_globals[0])); check!(socket::write_bytes32(writer, &self.large_globals[1])); + check!(socket::write_u64(writer, memory_used)); check!(writer.flush()); } } diff --git a/arbitrator/jit/src/main.rs b/arbitrator/jit/src/main.rs index 513cd067c4..968da2a978 100644 --- a/arbitrator/jit/src/main.rs +++ b/arbitrator/jit/src/main.rs @@ -114,8 +114,9 @@ fn main() { true => None, false => Some(message), }; + let memory_used = memory.size().0 as u64 * 65_536; - env.send_results(error); + env.send_results(error, memory_used); } // require a usize be at least 32 bits wide diff --git a/arbitrator/jit/src/syscall.rs b/arbitrator/jit/src/syscall.rs index c81641a7f8..4f657eeefa 100644 --- a/arbitrator/jit/src/syscall.rs +++ b/arbitrator/jit/src/syscall.rs @@ -337,7 +337,7 @@ pub fn js_value_call(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { let value = match (object, method_name.as_slice()) { (Ref(GO_ID), b"_makeFuncWrapper") => { - let arg = match args.get(0) { + let arg = match args.first() { Some(arg) => arg, None => fail!( "Go trying to call Go._makeFuncWrapper with bad args {:?}", @@ -415,7 +415,7 @@ pub fn js_value_call(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { (Ref(CRYPTO_ID), b"getRandomValues") => { let name = "crypto.getRandomValues"; - let id = match args.get(0) { + let id = match args.first() { Some(Ref(x)) => x, _ => fail!("Go trying to call {name} with bad args {:?}", args), }; @@ -456,7 +456,7 @@ pub fn js_value_new(mut env: WasmEnvMut, sp: u32) { let args_len = sp.read_u64(2); let args = sp.read_value_slice(args_ptr, args_len); match class { - UINT8_ARRAY_ID => match args.get(0) { + UINT8_ARRAY_ID => match args.first() { Some(JsValue::Number(size)) => { let id = pool.insert(DynamicObject::Uint8Array(vec![0; *size as usize])); sp.write_u64(4, GoValue::Object(id).encode()); diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index 0849312f3d..6ca552d83c 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -362,7 +362,7 @@ impl Module { bin.memories.len() <= 1, "Multiple memories are not supported" ); - if let Some(limits) = bin.memories.get(0) { + if let Some(limits) = bin.memories.first() { let page_size = Memory::PAGE_SIZE; let initial = limits.initial; // validate() checks this is less than max::u32 let allowed = u32::MAX as u64 / Memory::PAGE_SIZE - 1; // we require the size remain *below* 2^32 diff --git a/arbitrator/wasm-libraries/go-stub/src/lib.rs b/arbitrator/wasm-libraries/go-stub/src/lib.rs index df77893fcb..1a5d1963c7 100644 --- a/arbitrator/wasm-libraries/go-stub/src/lib.rs +++ b/arbitrator/wasm-libraries/go-stub/src/lib.rs @@ -218,7 +218,7 @@ pub unsafe extern "C" fn go__syscall_js_valueNew(sp: GoStack) { let args_len = sp.read_u64(2); let args = read_value_slice(args_ptr, args_len); if class == UINT8_ARRAY_ID { - if let Some(InterpValue::Number(size)) = args.get(0) { + if let Some(InterpValue::Number(size)) = args.first() { let id = DynamicObjectPool::singleton() .insert(DynamicObject::Uint8Array(vec![0; *size as usize])); sp.write_u64(4, GoValue::Object(id).encode()); @@ -321,7 +321,7 @@ unsafe fn value_call_impl(sp: &mut GoStack) -> Result { let args_len = sp.read_u64(4); let args = read_value_slice(args_ptr, args_len); if object == InterpValue::Ref(GO_ID) && &method_name == b"_makeFuncWrapper" { - let id = args.get(0).ok_or_else(|| { + let id = args.first().ok_or_else(|| { format!( "Go attempting to call Go._makeFuncWrapper with bad args {:?}", args, @@ -405,7 +405,7 @@ unsafe fn value_call_impl(sp: &mut GoStack) -> Result { )) } } else if object == InterpValue::Ref(CRYPTO_ID) && &method_name == b"getRandomValues" { - let id = match args.get(0) { + let id = match args.first() { Some(InterpValue::Ref(x)) => *x, _ => { return Err(format!( diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index a54336fd5a..c4fc500d76 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" @@ -37,6 +38,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" @@ -57,23 +59,22 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - bridgeAddr common.Address - gasRefunderAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *redislock.Simple - firstEphemeralError time.Time // first time a continuous error suspected to be ephemeral occurred - messagesPerBatch *arbmath.MovingAverage[uint64] + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + bridgeAddr common.Address + gasRefunderAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + messagesPerBatch *arbmath.MovingAverage[uint64] // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. @@ -121,6 +122,7 @@ type BatchPosterConfig struct { ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` + UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -167,6 +169,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") + f.Bool(prefix+".use-access-lists", DefaultBatchPosterConfig.UseAccessLists, "post batches with access lists to reduce gas usage (disabled for L3s)") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) @@ -189,6 +192,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", L1BlockBoundBypass: time.Hour, + UseAccessLists: true, RedisLock: redislock.DefaultCfg, } @@ -215,18 +219,20 @@ var TestBatchPosterConfig = BatchPosterConfig{ ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", L1BlockBoundBypass: time.Hour, + UseAccessLists: true, } type BatchPosterOpts struct { - DataPosterDB ethdb.Database - L1Reader *headerreader.HeaderReader - Inbox *InboxTracker - Streamer *TransactionStreamer - SyncMonitor *SyncMonitor - Config BatchPosterConfigFetcher - DeployInfo *chaininfo.RollupAddresses - TransactOpts *bind.TransactOpts - DAWriter das.DataAvailabilityServiceWriter + DataPosterDB ethdb.Database + L1Reader *headerreader.HeaderReader + Inbox *InboxTracker + Streamer *TransactionStreamer + SyncMonitor *SyncMonitor + Config BatchPosterConfigFetcher + DeployInfo *chaininfo.RollupAddresses + TransactOpts *bind.TransactOpts + DAWriter das.DataAvailabilityServiceWriter + ParentChainID *big.Int } func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { @@ -286,11 +292,11 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e HeaderReader: opts.L1Reader, Auth: opts.TransactOpts, RedisClient: redisClient, - RedisLock: redisLock, Config: dataPosterConfigFetcher, MetadataRetriever: b.getBatchPosterPosition, ExtraBacklog: b.GetBacklogEstimate, RedisKey: "data-poster.queue", + ParentChainID: opts.ParentChainID, }) if err != nil { return nil, err @@ -298,7 +304,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e // Dataposter sender may be external signer address, so we should initialize // access list after initializing dataposter. b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList { - if opts.L1Reader.IsParentChainArbitrum() { + if !b.config().UseAccessLists || opts.L1Reader.IsParentChainArbitrum() { // Access lists cost gas instead of saving gas when posting to L2s, // because data is expensive in comparison to computation. return nil @@ -770,6 +776,8 @@ func (b *BatchPoster) encodeAddBatch(seqNum *big.Int, prevMsgNum arbutil.Message return fullData, nil } +var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") + func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realNonce uint64, realAccessList types.AccessList) (uint64, error) { config := b.config() useNormalEstimation := b.dataPoster.MaxMempoolTransactions() == 1 @@ -790,7 +798,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, AccessList: realAccessList, }) if err != nil { - return 0, err + return 0, fmt.Errorf("%w: %w", ErrNormalGasEstimationFailed, err) } return gas + config.ExtraBatchGas, nil } @@ -830,6 +838,8 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, const ethPosBlockTime = 12 * time.Second +var errAttemptLockFailed = errors.New("failed to acquire lock; either another batch poster posted a batch or this node fell behind") + func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) { if b.batchReverted.Load() { return false, fmt.Errorf("batch was reverted, not posting any more batches") @@ -1006,6 +1016,18 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if b.daWriter != nil { + if !b.redisLock.AttemptLock(ctx) { + return false, errAttemptLockFailed + } + + gotNonce, gotMeta, err := b.dataPoster.GetNextNonceAndMeta(ctx) + if err != nil { + return false, err + } + if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) { + return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) + } + cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled if errors.Is(err, das.BatchToDasFailed) { if config.DisableDasFallbackStoreDataOnChain { @@ -1129,6 +1151,18 @@ func (b *BatchPoster) Start(ctxIn context.Context) { b.redisLock.Start(ctxIn) b.StopWaiter.Start(ctxIn, b) b.LaunchThread(b.pollForReverts) + commonEphemeralErrorHandler := util.NewEphemeralErrorHandler(time.Minute, "", 0) + exceedMaxMempoolSizeEphemeralErrorHandler := util.NewEphemeralErrorHandler(5*time.Minute, dataposter.ErrExceedsMaxMempoolSize.Error(), time.Minute) + storageRaceEphemeralErrorHandler := util.NewEphemeralErrorHandler(5*time.Minute, storage.ErrStorageRace.Error(), time.Minute) + normalGasEstimationFailedEphemeralErrorHandler := util.NewEphemeralErrorHandler(5*time.Minute, ErrNormalGasEstimationFailed.Error(), time.Minute) + accumulatorNotFoundEphemeralErrorHandler := util.NewEphemeralErrorHandler(5*time.Minute, AccumulatorNotFoundErr.Error(), time.Minute) + resetAllEphemeralErrs := func() { + commonEphemeralErrorHandler.Reset() + exceedMaxMempoolSizeEphemeralErrorHandler.Reset() + storageRaceEphemeralErrorHandler.Reset() + normalGasEstimationFailedEphemeralErrorHandler.Reset() + accumulatorNotFoundEphemeralErrorHandler.Reset() + } b.CallIteratively(func(ctx context.Context) time.Duration { var err error if common.HexToAddress(b.config().GasRefunderAddress) != (common.Address{}) { @@ -1147,27 +1181,39 @@ func (b *BatchPoster) Start(ctxIn context.Context) { batchPosterWalletBalance.Update(arbmath.BalancePerEther(walletBalance)) } } - if !b.redisLock.AttemptLock(ctx) { + couldLock, err := b.redisLock.CouldAcquireLock(ctx) + if err != nil { + log.Warn("Error checking if we could acquire redis lock", "err", err) + // Might as well try, worst case we fail to lock + couldLock = true + } + if !couldLock { + log.Debug("Not posting batches right now because another batch poster has the lock or this node is behind") b.building = nil + resetAllEphemeralErrs() return b.config().PollInterval } posted, err := b.maybePostSequencerBatch(ctx) if err == nil { - b.firstEphemeralError = time.Time{} + resetAllEphemeralErrs() } if err != nil { + if ctx.Err() != nil { + // Shutting down. No need to print the context canceled error. + return 0 + } b.building = nil logLevel := log.Error // Likely the inbox tracker just isn't caught up. // Let's see if this error disappears naturally. - if b.firstEphemeralError == (time.Time{}) { - b.firstEphemeralError = time.Now() - logLevel = log.Warn - } else if time.Since(b.firstEphemeralError) < time.Minute { - logLevel = log.Warn - } else if time.Since(b.firstEphemeralError) < time.Minute*5 && strings.Contains(err.Error(), "will exceed max mempool size") { - logLevel = log.Warn - } + logLevel = commonEphemeralErrorHandler.LogLevel(err, logLevel) + // If the error matches one of these, it's only logged at debug for the first minute, + // then at warn for the next 4 minutes, then at error. If the error isn't one of these, + // it'll be logged at warn for the first minute, then at error. + logLevel = exceedMaxMempoolSizeEphemeralErrorHandler.LogLevel(err, logLevel) + logLevel = storageRaceEphemeralErrorHandler.LogLevel(err, logLevel) + logLevel = normalGasEstimationFailedEphemeralErrorHandler.LogLevel(err, logLevel) + logLevel = accumulatorNotFoundEphemeralErrorHandler.LogLevel(err, logLevel) logLevel("error posting batch", "err", err) return b.config().ErrorDelay } else if posted { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 059e080eea..09f3e218b1 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" @@ -57,12 +58,12 @@ type DataPoster struct { client arbutil.L1Interface auth *bind.TransactOpts signer signerFn - redisLock AttemptLocker config ConfigFetcher usingNoOpStorage bool replacementTimes []time.Duration metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) extraBacklog func() uint64 + parentChainID *big.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -84,10 +85,6 @@ type DataPoster struct { // This can be local or external, hence the context parameter. type signerFn func(context.Context, common.Address, *types.Transaction) (*types.Transaction, error) -type AttemptLocker interface { - AttemptLock(context.Context) bool -} - func parseReplacementTimes(val string) ([]time.Duration, error) { var res []time.Duration var lastReplacementTime time.Duration @@ -114,11 +111,11 @@ type DataPosterOpts struct { HeaderReader *headerreader.HeaderReader Auth *bind.TransactOpts RedisClient redis.UniversalClient - RedisLock AttemptLocker Config ConfigFetcher MetadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) ExtraBacklog func() uint64 RedisKey string // Redis storage key + ParentChainID *big.Int } func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, error) { @@ -175,10 +172,10 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro replacementTimes: replacementTimes, metadataRetriever: opts.MetadataRetriever, queue: queue, - redisLock: opts.RedisLock, errorCount: make(map[uint64]int), maxFeeCapExpression: expression, extraBacklog: opts.ExtraBacklog, + parentChainID: opts.ParentChainID, } if dp.extraBacklog == nil { dp.extraBacklog = func() uint64 { return 0 } @@ -196,6 +193,7 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro }, } } + return dp, nil } @@ -236,6 +234,35 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error ) } +// txToSendTxArgs converts transaction to SendTxArgs. This is needed for +// external signer to specify From field. +func txToSendTxArgs(addr common.Address, tx *types.Transaction) (*apitypes.SendTxArgs, error) { + var to *common.MixedcaseAddress + if tx.To() != nil { + to = new(common.MixedcaseAddress) + *to = common.NewMixedcaseAddress(*tx.To()) + } + data := (hexutil.Bytes)(tx.Data()) + val := (*hexutil.Big)(tx.Value()) + if val == nil { + val = (*hexutil.Big)(big.NewInt(0)) + } + al := tx.AccessList() + return &apitypes.SendTxArgs{ + From: common.NewMixedcaseAddress(addr), + To: to, + Gas: hexutil.Uint64(tx.Gas()), + GasPrice: (*hexutil.Big)(tx.GasPrice()), + MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), + MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), + Value: *val, + Nonce: hexutil.Uint64(tx.Nonce()), + Data: &data, + AccessList: &al, + ChainID: (*hexutil.Big)(tx.ChainId()), + }, nil +} + // externalSigner returns signer function and ethereum address of the signer. // Returns an error if address isn't specified or if it can't connect to the // signer RPC server. @@ -249,27 +276,27 @@ func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, com return nil, common.Address{}, fmt.Errorf("error connecting external signer: %w", err) } sender := common.HexToAddress(opts.Address) - - var hasher types.Signer return func(ctx context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { // According to the "eth_signTransaction" API definition, this should be // RLP encoded transaction object. // https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_signtransaction var data hexutil.Bytes - if err := client.CallContext(ctx, &data, opts.Method, tx); err != nil { - return nil, fmt.Errorf("signing transaction: %w", err) + args, err := txToSendTxArgs(addr, tx) + if err != nil { + return nil, fmt.Errorf("error converting transaction to sendTxArgs: %w", err) } - var signedTx types.Transaction - if err := rlp.DecodeBytes(data, &signedTx); err != nil { - return nil, fmt.Errorf("error decoding signed transaction: %w", err) + if err := client.CallContext(ctx, &data, opts.Method, args); err != nil { + return nil, fmt.Errorf("making signing request to external signer: %w", err) } - if hasher == nil { - hasher = types.LatestSignerForChainID(tx.ChainId()) + signedTx := &types.Transaction{} + if err := signedTx.UnmarshalBinary(data); err != nil { + return nil, fmt.Errorf("unmarshaling signed transaction: %w", err) } - if hasher.Hash(tx) != hasher.Hash(&signedTx) { - return nil, fmt.Errorf("transaction: %x from external signer differs from request: %x", hasher.Hash(&signedTx), hasher.Hash(tx)) + hasher := types.LatestSignerForChainID(tx.ChainId()) + if h := hasher.Hash(args.ToTransaction()); h != hasher.Hash(signedTx) { + return nil, fmt.Errorf("transaction: %x from external signer differs from request: %x", hasher.Hash(signedTx), h) } - return &signedTx, nil + return signedTx, nil }, sender, nil } @@ -288,6 +315,8 @@ func (p *DataPoster) MaxMempoolTransactions() uint64 { return p.config().MaxMempoolTransactions } +var ErrExceedsMaxMempoolSize = errors.New("posting this transaction will exceed max mempool size") + // Does basic check whether posting transaction with specified nonce would // result in exceeding maximum queue length or maximum transactions in mempool. func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) error { @@ -310,7 +339,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return fmt.Errorf("getting nonce of a dataposter sender: %w", err) } if nextNonce >= cfg.MaxMempoolTransactions+unconfirmedNonce { - return fmt.Errorf("posting a transaction with nonce: %d will exceed max mempool size: %d, unconfirmed nonce: %d", nextNonce, cfg.MaxMempoolTransactions, unconfirmedNonce) + return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions) } } return nil @@ -533,7 +562,7 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, err } if nonce != expectedNonce { - return nil, fmt.Errorf("data poster expected next transaction to have nonce %v but was requested to post transaction with nonce %v", expectedNonce, nonce) + return nil, fmt.Errorf("%w: data poster expected next transaction to have nonce %v but was requested to post transaction with nonce %v", storage.ErrStorageRace, expectedNonce, nonce) } err = p.updateBalance(ctx) @@ -554,6 +583,7 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim Value: value, Data: calldata, AccessList: accessList, + ChainID: p.parentChainID, } fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(&inner)) if err != nil { @@ -745,9 +775,6 @@ func (p *DataPoster) Start(ctxIn context.Context) { p.CallIteratively(func(ctx context.Context) time.Duration { p.mutex.Lock() defer p.mutex.Unlock() - if !p.redisLock.AttemptLock(ctx) { - return minWait - } err := p.updateBalance(ctx) if err != nil { log.Warn("failed to update tx poster balance", "err", err) diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 74b4aff18e..3d7fa60dc7 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -2,27 +2,18 @@ package dataposter import ( "context" - "crypto/tls" - "crypto/x509" - "encoding/json" "fmt" - "io" "math/big" "net/http" - "os" "testing" "time" "github.com/Knetic/govaluate" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/google/go-cmp/cmp" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" ) func TestParseReplacementTimes(t *testing.T) { @@ -60,9 +51,24 @@ func TestParseReplacementTimes(t *testing.T) { } } +func signerTestCfg(addr common.Address) (*ExternalSignerCfg, error) { + cp, err := externalsignertest.CertPaths() + if err != nil { + return nil, fmt.Errorf("getting certificates path: %w", err) + } + return &ExternalSignerCfg{ + Address: common.Bytes2Hex(addr.Bytes()), + URL: externalsignertest.SignerURL, + Method: externalsignertest.SignerMethod, + RootCA: cp.ServerCert, + ClientCert: cp.ClientCert, + ClientPrivateKey: cp.ClientKey, + }, nil +} + func TestExternalSigner(t *testing.T) { ctx := context.Background() - httpSrv, srv := newServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(ctx, t) t.Cleanup(func() { if err := httpSrv.Shutdown(ctx); err != nil { t.Fatalf("Error shutting down http server: %v", err) @@ -76,172 +82,39 @@ func TestExternalSigner(t *testing.T) { return } }() - signer, addr, err := externalSigner(ctx, - &ExternalSignerCfg{ - Address: srv.address.Hex(), - URL: "https://localhost:1234", - Method: "test_signTransaction", - RootCA: cert, - ClientCert: "./testdata/client.crt", - ClientPrivateKey: "./testdata/client.key", - }) - if err != nil { - t.Fatalf("Error getting external signer: %v", err) - } - tx := types.NewTransaction(13, common.HexToAddress("0x01"), big.NewInt(1), 2, big.NewInt(3), []byte{0x01, 0x02, 0x03}) - got, err := signer(ctx, addr, tx) - if err != nil { - t.Fatalf("Error signing transaction with external signer: %v", err) - } - want, err := srv.signerFn(addr, tx) - if err != nil { - t.Fatalf("Error signing transaction: %v", err) - } - if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { - t.Errorf("Signing transaction: unexpected diff: %v\n", diff) - } -} - -type server struct { - handlers map[string]func(*json.RawMessage) (string, error) - signerFn bind.SignerFn - address common.Address -} - -type request struct { - ID *json.RawMessage `json:"id"` - Method string `json:"method"` - Params *json.RawMessage `json:"params"` -} - -type response struct { - ID *json.RawMessage `json:"id"` - Result string `json:"result,omitempty"` -} - -// newServer returns http server and server struct that implements RPC methods. -// It sets up an account in temporary directory and cleans up after test is -// done. -func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { - t.Helper() - signer, address, err := setupAccount("/tmp/keystore") + signerCfg, err := signerTestCfg(srv.Address) if err != nil { - t.Fatalf("Error setting up account: %v", err) + t.Fatalf("Error getting signer test config: %v", err) } - t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) - - s := &server{signerFn: signer, address: address} - s.handlers = map[string]func(*json.RawMessage) (string, error){ - "test_signTransaction": s.signTransaction, - } - m := http.NewServeMux() - - clientCert, err := os.ReadFile("./testdata/client.crt") + signer, addr, err := externalSigner(ctx, signerCfg) if err != nil { - t.Fatalf("Error reading client certificate: %v", err) + t.Fatalf("Error getting external signer: %v", err) } - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(clientCert) - - httpSrv := &http.Server{ - Addr: ":1234", - Handler: m, - ReadTimeout: 5 * time.Second, - TLSConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: pool, + tx := types.NewTx( + &types.DynamicFeeTx{ + Nonce: 13, + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(1), + Gas: 3, + To: nil, + Value: big.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, }, - } - m.HandleFunc("/", s.mux) - return httpSrv, s -} - -// setupAccount creates a new account in a given directory, unlocks it, creates -// signer with that account and returns it along with account address. -func setupAccount(dir string) (bind.SignerFn, common.Address, error) { - ks := keystore.NewKeyStore( - dir, - keystore.StandardScryptN, - keystore.StandardScryptP, ) - a, err := ks.NewAccount("password") - if err != nil { - return nil, common.Address{}, fmt.Errorf("creating account account: %w", err) - } - if err := ks.Unlock(a, "password"); err != nil { - return nil, common.Address{}, fmt.Errorf("unlocking account: %w", err) - } - txOpts, err := bind.NewKeyStoreTransactorWithChainID(ks, a, big.NewInt(1)) - if err != nil { - return nil, common.Address{}, fmt.Errorf("creating transactor: %w", err) - } - return txOpts.Signer, a.Address, nil -} - -// UnmarshallFirst unmarshalls slice of params and returns the first one. -// Parameters in Go ethereum RPC calls are marashalled as slices. E.g. -// eth_sendRawTransaction or eth_signTransaction, marshall transaction as a -// slice of transactions in a message: -// https://github.com/ethereum/go-ethereum/blob/0004c6b229b787281760b14fb9460ffd9c2496f1/rpc/client.go#L548 -func unmarshallFirst(params []byte) (*types.Transaction, error) { - var arr []apitypes.SendTxArgs - if err := json.Unmarshal(params, &arr); err != nil { - return nil, fmt.Errorf("unmarshaling first param: %w", err) - } - if len(arr) != 1 { - return nil, fmt.Errorf("argument should be a single transaction, but got: %d", len(arr)) - } - return arr[0].ToTransaction(), nil -} - -func (s *server) signTransaction(params *json.RawMessage) (string, error) { - tx, err := unmarshallFirst(*params) - if err != nil { - return "", err - } - signedTx, err := s.signerFn(s.address, tx) - if err != nil { - return "", fmt.Errorf("signing transaction: %w", err) - } - data, err := rlp.EncodeToBytes(signedTx) - if err != nil { - return "", fmt.Errorf("rlp encoding transaction: %w", err) - } - return hexutil.Encode(data), nil -} - -func (s *server) mux(w http.ResponseWriter, r *http.Request) { - body, err := io.ReadAll(r.Body) + got, err := signer(ctx, addr, tx) if err != nil { - http.Error(w, "can't read body", http.StatusBadRequest) - return - } - var req request - if err := json.Unmarshal(body, &req); err != nil { - http.Error(w, "can't unmarshal JSON request", http.StatusBadRequest) - return - } - method, ok := s.handlers[req.Method] - if !ok { - http.Error(w, "method not found", http.StatusNotFound) - return + t.Fatalf("Error signing transaction with external signer: %v", err) } - result, err := method(req.Params) + args, err := txToSendTxArgs(addr, tx) if err != nil { - fmt.Printf("error calling method: %v\n", err) - http.Error(w, "error calling method", http.StatusInternalServerError) - return + t.Fatalf("Error converting transaction to sendTxArgs: %v", err) } - resp := response{ID: req.ID, Result: result} - respBytes, err := json.Marshal(resp) + want, err := srv.SignerFn(addr, args.ToTransaction()) if err != nil { - http.Error(w, fmt.Sprintf("error encoding response: %v", err), http.StatusInternalServerError) - return + t.Fatalf("Error signing transaction: %v", err) } - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(respBytes); err != nil { - fmt.Printf("error writing response: %v\n", err) + if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { + t.Errorf("Signing transaction: unexpected diff: %v\n", diff) } } diff --git a/arbnode/dataposter/externalsignertest/externalsignertest.go b/arbnode/dataposter/externalsignertest/externalsignertest.go new file mode 100644 index 0000000000..7d15515feb --- /dev/null +++ b/arbnode/dataposter/externalsignertest/externalsignertest.go @@ -0,0 +1,153 @@ +package externalsignertest + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "math/big" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/signer/core/apitypes" +) + +var ( + dataPosterPath = "arbnode/dataposter" + selfPath = filepath.Join(dataPosterPath, "externalsignertest") + + SignerPort = 1234 + SignerURL = fmt.Sprintf("https://localhost:%v", SignerPort) + SignerMethod = "test_signTransaction" +) + +type CertAbsPaths struct { + ServerCert string + ServerKey string + ClientCert string + ClientKey string +} + +func basePath() (string, error) { + _, file, _, ok := runtime.Caller(1) + if !ok { + return "", fmt.Errorf("error getting caller") + } + idx := strings.Index(file, selfPath) + if idx == -1 { + return "", fmt.Errorf("error determining base path, selfPath: %q is not substring of current file path: %q", selfPath, file) + } + return file[:idx], nil +} + +func testDataPath() (string, error) { + base, err := basePath() + if err != nil { + return "", fmt.Errorf("getting base path: %w", err) + } + return filepath.Join(base, dataPosterPath, "testdata"), nil +} + +func CertPaths() (*CertAbsPaths, error) { + td, err := testDataPath() + if err != nil { + return nil, fmt.Errorf("getting test data path: %w", err) + } + return &CertAbsPaths{ + ServerCert: filepath.Join(td, "localhost.crt"), + ServerKey: filepath.Join(td, "localhost.key"), + ClientCert: filepath.Join(td, "client.crt"), + ClientKey: filepath.Join(td, "client.key"), + }, nil +} + +func NewServer(ctx context.Context, t *testing.T) (*http.Server, *SignerAPI) { + rpcServer := rpc.NewServer() + signer, address, err := setupAccount("/tmp/keystore") + if err != nil { + t.Fatalf("Error setting up account: %v", err) + } + t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) + s := &SignerAPI{SignerFn: signer, Address: address} + if err := rpcServer.RegisterName("test", s); err != nil { + t.Fatalf("Failed to register EthSigningAPI, error: %v", err) + } + cp, err := CertPaths() + if err != nil { + t.Fatalf("Error getting certificate paths: %v", err) + } + clientCert, err := os.ReadFile(cp.ClientCert) + if err != nil { + t.Fatalf("Error reading client certificate: %v", err) + } + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(clientCert) + + httpServer := &http.Server{ + Addr: fmt.Sprintf(":%d", SignerPort), + Handler: rpcServer, + ReadTimeout: 30 * time.Second, + ReadHeaderTimeout: 30 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 120 * time.Second, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, + } + + return httpServer, s +} + +// setupAccount creates a new account in a given directory, unlocks it, creates +// signer with that account and returns it along with account address. +func setupAccount(dir string) (bind.SignerFn, common.Address, error) { + ks := keystore.NewKeyStore( + dir, + keystore.StandardScryptN, + keystore.StandardScryptP, + ) + a, err := ks.NewAccount("password") + if err != nil { + return nil, common.Address{}, fmt.Errorf("creating account account: %w", err) + } + if err := ks.Unlock(a, "password"); err != nil { + return nil, common.Address{}, fmt.Errorf("unlocking account: %w", err) + } + txOpts, err := bind.NewKeyStoreTransactorWithChainID(ks, a, big.NewInt(1337)) + if err != nil { + return nil, common.Address{}, fmt.Errorf("creating transactor: %w", err) + } + return txOpts.Signer, a.Address, nil +} + +type SignerAPI struct { + SignerFn bind.SignerFn + Address common.Address +} + +func (a *SignerAPI) SignTransaction(ctx context.Context, req *apitypes.SendTxArgs) (hexutil.Bytes, error) { + if req == nil { + return nil, fmt.Errorf("nil request") + } + signedTx, err := a.SignerFn(a.Address, req.ToTransaction()) + if err != nil { + return nil, fmt.Errorf("signing transaction: %w", err) + } + signedTxBytes, err := signedTx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("marshaling signed transaction: %w", err) + } + return signedTxBytes, nil +} diff --git a/arbnode/node.go b/arbnode/node.go index fe88d58a0e..cd0c5026e2 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" - "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" @@ -307,6 +306,7 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error { func StakerDataposter( ctx context.Context, db ethdb.Database, l1Reader *headerreader.HeaderReader, transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor, + parentChainID *big.Int, ) (*dataposter.DataPoster, error) { cfg := cfgFetcher.Get() if transactOpts == nil && cfg.Staker.DataPoster.ExternalSigner.URL == "" { @@ -319,13 +319,6 @@ func StakerDataposter( if err != nil { return nil, fmt.Errorf("creating redis client from url: %w", err) } - lockCfgFetcher := func() *redislock.SimpleCfg { - return &cfg.Staker.RedisLock - } - redisLock, err := redislock.NewSimple(redisC, lockCfgFetcher, func() bool { return syncMonitor.Synced() }) - if err != nil { - return nil, err - } dpCfg := func() *dataposter.DataPosterConfig { return &cfg.Staker.DataPoster } @@ -341,10 +334,10 @@ func StakerDataposter( HeaderReader: l1Reader, Auth: transactOpts, RedisClient: redisC, - RedisLock: redisLock, Config: dpCfg, MetadataRetriever: mdRetriever, RedisKey: sender + ".staker-data-poster.queue", + ParentChainID: parentChainID, }) } @@ -361,6 +354,7 @@ func createNodeImpl( txOptsBatchPoster *bind.TransactOpts, dataSigner signature.DataSignerFunc, fatalErrChan chan error, + parentChainID *big.Int, ) (*Node, error) { config := configFetcher.Get() @@ -591,6 +585,7 @@ func createNodeImpl( txOptsValidator, configFetcher, syncMonitor, + parentChainID, ) if err != nil { return nil, err @@ -658,15 +653,16 @@ func createNodeImpl( return nil, errors.New("batchposter, but no TxOpts") } batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ - DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), - L1Reader: l1Reader, - Inbox: inboxTracker, - Streamer: txStreamer, - SyncMonitor: syncMonitor, - Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, - DeployInfo: deployInfo, - TransactOpts: txOptsBatchPoster, - DAWriter: daWriter, + DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), + L1Reader: l1Reader, + Inbox: inboxTracker, + Streamer: txStreamer, + SyncMonitor: syncMonitor, + Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, + DeployInfo: deployInfo, + TransactOpts: txOptsBatchPoster, + DAWriter: daWriter, + ParentChainID: parentChainID, }) if err != nil { return nil, err @@ -724,8 +720,9 @@ func CreateNode( txOptsBatchPoster *bind.TransactOpts, dataSigner signature.DataSignerFunc, fatalErrChan chan error, + parentChainID *big.Int, ) (*Node, error) { - currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan) + currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID) if err != nil { return nil, err } @@ -755,8 +752,6 @@ func CreateNode( } func (n *Node) Start(ctx context.Context) error { - // config is the static config at start, not a dynamic config - config := n.configFetcher.Get() execClient, ok := n.Execution.(*gethexec.ExecutionNode) if !ok { execClient = nil @@ -788,7 +783,7 @@ func (n *Node) Start(ctx context.Context) error { return fmt.Errorf("error initializing feed broadcast server: %w", err) } } - if n.InboxTracker != nil && n.BroadcastServer != nil && config.Sequencer { + if n.InboxTracker != nil && n.BroadcastServer != nil { // Even if the sequencer coordinator will populate this backlog, // we want to make sure it's populated before any clients connect. err = n.InboxTracker.PopulateFeedBacklog(n.BroadcastServer) @@ -888,9 +883,6 @@ func (n *Node) Start(ctx context.Context) error { } func (n *Node) StopAndWait() { - if n.Execution != nil { - n.Execution.StopAndWait() - } if n.MaintenanceRunner != nil && n.MaintenanceRunner.Started() { n.MaintenanceRunner.StopAndWait() } @@ -943,7 +935,10 @@ func (n *Node) StopAndWait() { if n.DASLifecycleManager != nil { n.DASLifecycleManager.StopAndWaitUntil(2 * time.Second) } + if n.Execution != nil { + n.Execution.StopAndWait() + } if err := n.Stack.Close(); err != nil { - log.Error("error on stak close", "err", err) + log.Error("error on stack close", "err", err) } } diff --git a/arbnode/redislock/redis.go b/arbnode/redislock/redis.go index c8252e059f..09296e3c79 100644 --- a/arbnode/redislock/redis.go +++ b/arbnode/redislock/redis.go @@ -29,6 +29,7 @@ type Simple struct { } type SimpleCfg struct { + Enable bool `koanf:"enable"` MyId string `koanf:"my-id"` LockoutDuration time.Duration `koanf:"lockout-duration" reload:"hot"` RefreshDuration time.Duration `koanf:"refresh-duration" reload:"hot"` @@ -39,6 +40,7 @@ type SimpleCfg struct { type SimpleCfgFetcher func() *SimpleCfg func AddConfigOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultCfg.Enable, "if false, always treat this as locked and don't write the lock to redis") f.String(prefix+".my-id", "", "this node's id prefix when acquiring the lock (optional)") f.Duration(prefix+".lockout-duration", DefaultCfg.LockoutDuration, "how long lock is held") f.Duration(prefix+".refresh-duration", DefaultCfg.RefreshDuration, "how long between consecutive calls to redis") @@ -60,6 +62,7 @@ func NewSimple(client redis.UniversalClient, config SimpleCfgFetcher, readyToLoc } var DefaultCfg = SimpleCfg{ + Enable: true, LockoutDuration: time.Minute, RefreshDuration: time.Second * 10, Key: "", @@ -137,12 +140,33 @@ func (l *Simple) AttemptLock(ctx context.Context) bool { } func (l *Simple) Locked() bool { - if l.client == nil { + if l.client == nil || !l.config().Enable { return true } return time.Now().Before(atomicTimeRead(&l.lockedUntil)) } +// Returns true if a call to AttemptLock will likely succeed +func (l *Simple) CouldAcquireLock(ctx context.Context) (bool, error) { + if l.Locked() { + return true, nil + } + if l.stopping || !l.readyToLock() { + return false, nil + } + // l.client shouldn't be nil here because Locked would've returned true + current, err := l.client.Get(ctx, l.config().Key).Result() + if errors.Is(err, redis.Nil) { + // Lock is free for the taking + return true, nil + } + if err != nil { + return false, err + } + // return true if the lock is free for the taking or is already ours + return current == "" || current == l.myId, nil +} + func (l *Simple) Release(ctx context.Context) { l.mutex.Lock() defer l.mutex.Unlock() diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index cb1ae9d6ea..aba823cc25 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -39,14 +39,14 @@ func Init(conf *Config) error { return nil } - limit, err := parseMemLimit(conf.MemFreeLimit) + limit, err := ParseMemLimit(conf.MemFreeLimit) if err != nil { return err } node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { - var c limitChecker - c, err := newCgroupsMemoryLimitCheckerIfSupported(limit) + var c LimitChecker + c, err := NewCgroupsMemoryLimitCheckerIfSupported(limit) if errors.Is(err, errNotSupported) { log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") c = &trivialLimitChecker{} @@ -57,7 +57,7 @@ func Init(conf *Config) error { return nil } -func parseMemLimit(limitStr string) (int, error) { +func ParseMemLimit(limitStr string) (int, error) { var ( limit int = 1 s string @@ -105,10 +105,10 @@ func ConfigAddOptions(prefix string, f *pflag.FlagSet) { // limit check. type httpServer struct { inner http.Handler - c limitChecker + c LimitChecker } -func newHttpServer(inner http.Handler, c limitChecker) *httpServer { +func newHttpServer(inner http.Handler, c LimitChecker) *httpServer { return &httpServer{inner: inner, c: c} } @@ -116,7 +116,7 @@ func newHttpServer(inner http.Handler, c limitChecker) *httpServer { // limit is exceeded, in which case it returns a HTTP 429 error. func (s *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { start := time.Now() - exceeded, err := s.c.isLimitExceeded() + exceeded, err := s.c.IsLimitExceeded() limitCheckDurationHistogram.Update(time.Since(start).Nanoseconds()) if err != nil { log.Error("Error checking memory limit", "err", err, "checker", s.c.String()) @@ -130,19 +130,19 @@ func (s *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { s.inner.ServeHTTP(w, req) } -type limitChecker interface { - isLimitExceeded() (bool, error) +type LimitChecker interface { + IsLimitExceeded() (bool, error) String() string } -func isSupported(c limitChecker) bool { - _, err := c.isLimitExceeded() +func isSupported(c LimitChecker) bool { + _, err := c.IsLimitExceeded() return err == nil } -// newCgroupsMemoryLimitCheckerIfSupported attempts to auto-discover whether +// NewCgroupsMemoryLimitCheckerIfSupported attempts to auto-discover whether // Cgroups V1 or V2 is supported for checking system memory limits. -func newCgroupsMemoryLimitCheckerIfSupported(memLimitBytes int) (*cgroupsMemoryLimitChecker, error) { +func NewCgroupsMemoryLimitCheckerIfSupported(memLimitBytes int) (*cgroupsMemoryLimitChecker, error) { c := newCgroupsMemoryLimitChecker(cgroupsV1MemoryFiles, memLimitBytes) if isSupported(c) { log.Info("Cgroups v1 detected, enabling memory limit RPC throttling") @@ -161,7 +161,7 @@ func newCgroupsMemoryLimitCheckerIfSupported(memLimitBytes int) (*cgroupsMemoryL // trivialLimitChecker checks no limits, so its limits are never exceeded. type trivialLimitChecker struct{} -func (_ trivialLimitChecker) isLimitExceeded() (bool, error) { +func (_ trivialLimitChecker) IsLimitExceeded() (bool, error) { return false, nil } @@ -202,7 +202,7 @@ func newCgroupsMemoryLimitChecker(files cgroupsMemoryFiles, memLimitBytes int) * } } -// isLimitExceeded checks if the system memory free is less than the limit. +// IsLimitExceeded checks if the system memory free is less than the limit. // It returns true if the limit is exceeded. // // container_memory_working_set_bytes in prometheus is calculated as @@ -223,7 +223,7 @@ func newCgroupsMemoryLimitChecker(files cgroupsMemoryFiles, memLimitBytes int) * // free memory for the page cache, to avoid cache thrashing on chain state // access. How much "reasonable" is will depend on access patterns, state // size, and your application's tolerance for latency. -func (c *cgroupsMemoryLimitChecker) isLimitExceeded() (bool, error) { +func (c *cgroupsMemoryLimitChecker) IsLimitExceeded() (bool, error) { var limit, usage, active, inactive int var err error if limit, err = readIntFromFile(c.files.limitFile); err != nil { diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index 4f52ad017e..4495396063 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -52,7 +52,7 @@ func makeCgroupsTestDir(cgroupDir string) cgroupsMemoryFiles { func TestCgroupsFailIfCantOpen(t *testing.T) { testFiles := makeCgroupsTestDir(t.TempDir()) c := newCgroupsMemoryLimitChecker(testFiles, 1024*1024*512) - if _, err := c.isLimitExceeded(); err == nil { + if _, err := c.IsLimitExceeded(); err == nil { t.Fatal("Should fail open if can't read files") } } @@ -124,7 +124,7 @@ func TestCgroupsMemoryLimit(t *testing.T) { } { t.Run(tc.desc, func(t *testing.T) { testFiles := makeCgroupsTestDir(t.TempDir()) - memLimit, err := parseMemLimit(tc.memLimit) + memLimit, err := ParseMemLimit(tc.memLimit) if err != nil { t.Fatalf("Parsing memory limit failed: %v", err) } @@ -132,12 +132,12 @@ func TestCgroupsMemoryLimit(t *testing.T) { if err := updateFakeCgroupFiles(c, tc.sysLimit, tc.usage, tc.inactive, tc.active); err != nil { t.Fatalf("Updating cgroup files: %v", err) } - exceeded, err := c.isLimitExceeded() + exceeded, err := c.IsLimitExceeded() if err != nil { t.Fatalf("Checking if limit exceeded: %v", err) } if exceeded != tc.want { - t.Errorf("isLimitExceeded() = %t, want %t", exceeded, tc.want) + t.Errorf("IsLimitExceeded() = %t, want %t", exceeded, tc.want) } }, ) diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index cb6f4fe502..ecf38ddf42 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -650,6 +650,8 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("failed sequencing delayed messages after catching lock", "err", err) } } + // This should be redundant now that even non-primary sequencers broadcast over the feed, + // but the backlog efficiently deduplicates messages, so better safe than sorry. err = c.streamer.PopulateFeedBacklog() if err != nil { log.Warn("failed to populate the feed backlog on lockout acquisition", "err", err) diff --git a/arbnode/simple_redis_lock_test.go b/arbnode/simple_redis_lock_test.go index b7506145c3..c9dd576749 100644 --- a/arbnode/simple_redis_lock_test.go +++ b/arbnode/simple_redis_lock_test.go @@ -48,6 +48,7 @@ func simpleRedisLockTest(t *testing.T, redisKeySuffix string, chosen int, backgo Require(t, redisClient.Del(ctx, redisKey).Err()) conf := &redislock.SimpleCfg{ + Enable: true, LockoutDuration: test_delay * test_attempts * 10, RefreshDuration: test_delay * 2, Key: redisKey, diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index db0658f923..24ef2a7cc4 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -862,12 +862,6 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex return err } - if s.broadcastServer != nil { - if err := s.broadcastServer.BroadcastSingle(msgWithMeta, pos); err != nil { - log.Error("failed broadcasting message", "pos", pos, "err", err) - } - } - return nil } @@ -927,6 +921,12 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ default: } + if s.broadcastServer != nil { + if err := s.broadcastServer.BroadcastMessages(messages, pos); err != nil { + log.Error("failed broadcasting message", "pos", pos, "err", err) + } + } + return nil } diff --git a/arbos/block_processor.go b/arbos/block_processor.go index f2adb4ce3a..7b71e0d910 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -42,6 +42,21 @@ var EmitReedeemScheduledEvent func(*vm.EVM, uint64, uint64, [32]byte, [32]byte, var EmitTicketCreatedEvent func(*vm.EVM, [32]byte) error var gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) +// A helper struct that implements String() by marshalling to JSON. +// This is useful for logging because it's lazy, so if the log level is too high to print the transaction, +// it doesn't waste compute marshalling the transaction when the result wouldn't be used. +type printTxAsJson struct { + tx *types.Transaction +} + +func (p printTxAsJson) String() string { + json, err := p.tx.MarshalJSON() + if err != nil { + return fmt.Sprintf("[error marshalling tx: %v]", err) + } + return string(json) +} + type L1Info struct { poster common.Address l1BlockNumber uint64 @@ -358,7 +373,11 @@ func ProduceBlockAdvanced( hooks.TxErrors = append(hooks.TxErrors, err) if err != nil { - log.Debug("error applying transaction", "tx", tx, "err", err) + logLevel := log.Debug + if chainConfig.DebugMode() { + logLevel = log.Warn + } + logLevel("error applying transaction", "tx", printTxAsJson{tx}, "err", err) if !hooks.DiscardInvalidTxsEarly { // we'll still deduct a TxGas's worth from the block-local rate limiter even if the tx was invalid blockGasLeft = arbmath.SaturatingUSub(blockGasLeft, params.TxGas) diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index be5540b601..27ecae8b85 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -259,11 +259,11 @@ func (ps *L1PricingState) AddToL1FeesAvailable(delta *big.Int) (*big.Int, error) if err != nil { return nil, err } - newFee := new(big.Int).Add(old, delta) - if err := ps.SetL1FeesAvailable(newFee); err != nil { + new := new(big.Int).Add(old, delta) + if err := ps.SetL1FeesAvailable(new); err != nil { return nil, err } - return newFee, nil + return new, nil } func (ps *L1PricingState) TransferFromL1FeesAvailable( diff --git a/arbos/parse_l2.go b/arbos/parse_l2.go index 533d025857..aeafd1cc67 100644 --- a/arbos/parse_l2.go +++ b/arbos/parse_l2.go @@ -3,6 +3,7 @@ package arbos import ( "bytes" "encoding/binary" + "encoding/json" "errors" "fmt" "io" @@ -237,7 +238,11 @@ func parseEspressoMsg(rd io.Reader) ([]espressoTypes.Bytes, *arbostypes.Espresso } if jst == nil { j := new(arbostypes.EspressoBlockJustification) - if err := rlp.DecodeBytes(nextMsg, &j); err != nil { + s := []byte{} + if err := rlp.DecodeBytes(nextMsg, &s); err != nil { + return nil, nil, err + } + if err := json.Unmarshal(s, j); err != nil { return nil, nil, err } jst = j @@ -469,7 +474,11 @@ func MessageFromEspresso(header *arbostypes.L1IncomingMessageHeader, txes []espr var l2Message []byte l2Message = append(l2Message, L2MessageKind_EspressoTx) - jstBin, err := rlp.EncodeToBytes(jst) + jstJson, err := json.Marshal(jst) + if err != nil { + return arbostypes.L1IncomingMessage{}, err + } + jstBin, err := rlp.EncodeToBytes(jstJson) if err != nil { return arbostypes.L1IncomingMessage{}, err } diff --git a/arbos/parse_l2_test.go b/arbos/parse_l2_test.go index 59b2018078..9800119b2e 100644 --- a/arbos/parse_l2_test.go +++ b/arbos/parse_l2_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + tagged_base64 "github.com/EspressoSystems/espresso-sequencer-go/tagged-base64" espressoTypes "github.com/EspressoSystems/espresso-sequencer-go/types" "github.com/offchainlabs/nitro/arbos/arbostypes" ) @@ -17,14 +18,19 @@ func TestEspressoParsing(t *testing.T) { Kind: arbostypes.L1MessageType_L2Message, BlockNumber: 1, } + payloadCommitment, err := tagged_base64.New("payloadCommitment", []byte{1, 2, 3}) + Require(t, err) + root, err := tagged_base64.New("root", []byte{4, 5, 6}) + Require(t, err) expectJst := &arbostypes.EspressoBlockJustification{ Header: espressoTypes.Header{ - TransactionsRoot: espressoTypes.NmtRoot{Root: []byte{7, 8, 9}}, - L1Head: 1, - Timestamp: 2, - Height: 3, - L1Finalized: &espressoTypes.L1BlockInfo{}, - PayloadCommitment: espressoTypes.Bytes{1, 2, 3}, + TransactionsRoot: espressoTypes.NmtRoot{Root: []byte{7, 8, 9}}, + L1Head: 1, + Timestamp: 2, + Height: 3, + L1Finalized: &espressoTypes.L1BlockInfo{}, + PayloadCommitment: payloadCommitment, + BlockMerkleTreeRoot: root, }, Proof: []byte{9}, } @@ -38,8 +44,10 @@ func TestEspressoParsing(t *testing.T) { Fail(t) } - if !reflect.DeepEqual(actualJst, expectJst) { + if !reflect.DeepEqual(actualJst.Proof, expectJst.Proof) { + Fail(t) + } + if !reflect.DeepEqual(actualJst.Header, expectJst.Header) { Fail(t) } - } diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index 12d494a230..b66710dbf0 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -24,6 +24,7 @@ type L1Interface interface { TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) BlockNumber(ctx context.Context) (uint64, error) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) + ChainID(ctx context.Context) (*big.Int, error) } func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) { diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 851561f482..f6501105c2 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -19,6 +19,8 @@ var ( errOutOfBounds = errors.New("message not found in backlog") confirmedSequenceNumberGauge = metrics.NewRegisteredGauge("arb/sequencenumber/confirmed", nil) + backlogSizeInBytesGauge = metrics.NewRegisteredGauge("arb/feed/backlog/bytes", nil) + backlogSizeGauge = metrics.NewRegisteredGauge("arb/feed/backlog/messages", nil) ) // Backlog defines the interface for backlog. @@ -35,18 +37,18 @@ type Backlog interface { type backlog struct { head atomic.Pointer[backlogSegment] tail atomic.Pointer[backlogSegment] - lookupByIndex *containers.SyncMap[uint64, *backlogSegment] + lookupByIndex atomic.Pointer[containers.SyncMap[uint64, *backlogSegment]] config ConfigFetcher messageCount atomic.Uint64 } // NewBacklog creates a backlog. func NewBacklog(c ConfigFetcher) Backlog { - lookup := &containers.SyncMap[uint64, *backlogSegment]{} - return &backlog{ - lookupByIndex: lookup, - config: c, + b := &backlog{ + config: c, } + b.lookupByIndex.Store(&containers.SyncMap[uint64, *backlogSegment]{}) + return b } // Head return the head backlogSegment within the backlog. @@ -54,6 +56,36 @@ func (b *backlog) Head() BacklogSegment { return b.head.Load() } +func (b *backlog) backlogSizeInBytes() (uint64, error) { + headSeg := b.head.Load() + tailSeg := b.tail.Load() + if headSeg == nil || tailSeg == nil { + if headSeg == nil && tailSeg == nil { + return 0, nil + } + return 0, errors.New("the head or tail segment of feed backlog is nil") + } + + headSeg.messagesLock.RLock() + if len(headSeg.messages) == 0 { + return 0, errors.New("head segment of the feed backlog is empty") + } + headMsg := headSeg.messages[0] + headSeg.messagesLock.RUnlock() + + tailSeg.messagesLock.RLock() + if len(tailSeg.messages) == 0 { + return 0, errors.New("tail segment of the feed backlog is empty") + } + tailMsg := tailSeg.messages[len(tailSeg.messages)-1] + size := tailMsg.CumulativeSumMsgSize + tailSeg.messagesLock.RUnlock() + + size -= headMsg.CumulativeSumMsgSize + size += headMsg.Size() + return size, nil +} + // Append will add the given messages to the backlogSegment at head until // that segment reaches its limit. If messages remain to be added a new segment // will be created. @@ -61,8 +93,15 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { if bm.ConfirmedSequenceNumberMessage != nil { b.delete(uint64(bm.ConfirmedSequenceNumberMessage.SequenceNumber)) + size, err := b.backlogSizeInBytes() + if err != nil { + log.Warn("error calculating backlogSizeInBytes", "err", err) + } else { + backlogSizeInBytesGauge.Update(int64(size)) + } } + lookupByIndex := b.lookupByIndex.Load() for _, msg := range bm.Messages { segment := b.tail.Load() if segment == nil { @@ -74,10 +113,15 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { prevMsgIdx := segment.End() if segment.count() >= b.config().SegmentLimit { + segment.messagesLock.RLock() + if len(segment.messages) > 0 { + msg.CumulativeSumMsgSize = segment.messages[len(segment.messages)-1].CumulativeSumMsgSize + } + segment.messagesLock.RUnlock() + nextSegment := newBacklogSegment() segment.nextSegment.Store(nextSegment) prevMsgIdx = segment.End() - nextSegment.previousSegment.Store(segment) segment = nextSegment b.tail.Store(segment) } @@ -89,6 +133,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { b.head.Store(segment) b.tail.Store(segment) b.messageCount.Store(0) + backlogSizeInBytesGauge.Update(0) log.Warn(err.Error()) } else if errors.Is(err, errSequenceNumberSeen) { log.Info("ignoring message sequence number, already in backlog", "message sequence number", msg.SequenceNumber) @@ -96,10 +141,12 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } else if err != nil { return err } - b.lookupByIndex.Store(uint64(msg.SequenceNumber), segment) + lookupByIndex.Store(uint64(msg.SequenceNumber), segment) b.messageCount.Add(1) + backlogSizeInBytesGauge.Inc(int64(msg.Size())) } + backlogSizeGauge.Update(int64(b.Count())) return nil } @@ -161,7 +208,7 @@ func (b *backlog) delete(confirmed uint64) { } if confirmed > tail.End() { - log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.End()) + log.Warn("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.End()) b.reset() return } @@ -212,14 +259,15 @@ func (b *backlog) delete(confirmed uint64) { // removeFromLookup removes all entries from the head segment's start index to // the given confirmed index. func (b *backlog) removeFromLookup(start, end uint64) { + lookupByIndex := b.lookupByIndex.Load() for i := start; i <= end; i++ { - b.lookupByIndex.Delete(i) + lookupByIndex.Delete(i) } } // Lookup attempts to find the backlogSegment storing the given message index. func (b *backlog) Lookup(i uint64) (BacklogSegment, error) { - segment, ok := b.lookupByIndex.Load(i) + segment, ok := b.lookupByIndex.Load().Load(i) if !ok { return nil, fmt.Errorf("error finding backlog segment containing message with SequenceNumber %d", i) } @@ -234,10 +282,12 @@ func (s *backlog) Count() uint64 { // reset removes all segments from the backlog. func (b *backlog) reset() { - b.head = atomic.Pointer[backlogSegment]{} - b.tail = atomic.Pointer[backlogSegment]{} - b.lookupByIndex = &containers.SyncMap[uint64, *backlogSegment]{} + b.head.Store(nil) + b.tail.Store(nil) + b.lookupByIndex.Store(&containers.SyncMap[uint64, *backlogSegment]{}) b.messageCount.Store(0) + backlogSizeInBytesGauge.Update(0) + backlogSizeGauge.Update(0) } // BacklogSegment defines the interface for backlogSegment. @@ -253,10 +303,9 @@ type BacklogSegment interface { // backlogSegment stores messages up to a limit defined by the backlog. It also // points to the next backlogSegment in the list. type backlogSegment struct { - messagesLock sync.RWMutex - messages []*m.BroadcastFeedMessage - nextSegment atomic.Pointer[backlogSegment] - previousSegment atomic.Pointer[backlogSegment] + messagesLock sync.RWMutex + messages []*m.BroadcastFeedMessage + nextSegment atomic.Pointer[backlogSegment] } // newBacklogSegment creates a backlogSegment object with an empty slice of @@ -361,9 +410,15 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) s.messagesLock.Lock() defer s.messagesLock.Unlock() + prevCumulativeSum := uint64(0) + if len(s.messages) > 0 { + prevCumulativeSum = s.messages[len(s.messages)-1].CumulativeSumMsgSize + } if expSeqNum := prevMsgIdx + 1; prevMsgIdx == 0 || uint64(msg.SequenceNumber) == expSeqNum { + msg.UpdateCumulativeSumMsgSize(prevCumulativeSum) s.messages = append(s.messages, msg) } else if uint64(msg.SequenceNumber) > expSeqNum { + msg.UpdateCumulativeSumMsgSize(prevCumulativeSum) s.messages = nil s.messages = append(s.messages, msg) return fmt.Errorf("new message sequence number (%d) is greater than the expected sequence number (%d): %w", msg.SequenceNumber, expSeqNum, errDropSegments) @@ -379,7 +434,7 @@ func (s *backlogSegment) Contains(i uint64) bool { s.messagesLock.RLock() defer s.messagesLock.RUnlock() start := s.start() - if i < start || i > s.end() { + if i < start || i > s.end() || len(s.messages) == 0 { return false } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index ab25a523f7..ee712de9ed 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -57,9 +57,9 @@ func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCoun func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { b := &backlog{ - lookupByIndex: &containers.SyncMap[uint64, *backlogSegment]{}, - config: func() *Config { return &DefaultTestConfig }, + config: func() *Config { return &DefaultTestConfig }, } + b.lookupByIndex.Store(&containers.SyncMap[uint64, *backlogSegment]{}) bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(indexes)} err := b.Append(bm) return b, err @@ -161,9 +161,9 @@ func TestDeleteInvalidBacklog(t *testing.T) { lookup := &containers.SyncMap[uint64, *backlogSegment]{} lookup.Store(40, s) b := &backlog{ - lookupByIndex: lookup, - config: func() *Config { return &DefaultTestConfig }, + config: func() *Config { return &DefaultTestConfig }, } + b.lookupByIndex.Store(lookup) b.messageCount.Store(2) b.head.Store(s) b.tail.Store(s) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 8a70e39810..ed3088ca2e 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -5,6 +5,7 @@ package broadcaster import ( "context" + "errors" "net" "github.com/gobwas/ws" @@ -56,10 +57,11 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta }, nil } -func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) error { +func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { defer func() { if r := recover(); r != nil { log.Error("recovered error in BroadcastSingle", "recover", r) + err = errors.New("panic in BroadcastSingle") } }() bfm, err := b.NewBroadcastFeedMessage(msg, seq) @@ -79,6 +81,27 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { b.BroadcastFeedMessages(broadcastFeedMessages) } +func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { + defer func() { + if r := recover(); r != nil { + log.Error("recovered error in BroadcastMessages", "recover", r) + err = errors.New("panic in BroadcastMessages") + } + }() + var feedMessages []*m.BroadcastFeedMessage + for i, msg := range messages { + bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i)) + if err != nil { + return err + } + feedMessages = append(feedMessages, bfm) + } + + b.BroadcastFeedMessages(feedMessages) + + return nil +} + func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) { bm := &m.BroadcastMessage{ diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index f436e765cb..a575ae5cd0 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -35,6 +35,16 @@ type BroadcastFeedMessage struct { SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` Message arbostypes.MessageWithMetadata `json:"message"` Signature []byte `json:"signature"` + + CumulativeSumMsgSize uint64 `json:"-"` +} + +func (m *BroadcastFeedMessage) Size() uint64 { + return uint64(len(m.Signature) + len(m.Message.Message.L2msg) + 160) +} + +func (m *BroadcastFeedMessage) UpdateCumulativeSumMsgSize(val uint64) { + m.CumulativeSumMsgSize += val + m.Size() } func (m *BroadcastFeedMessage) Hash(chainId uint64) (common.Hash, error) { diff --git a/cmd/conf/init.go b/cmd/conf/init.go new file mode 100644 index 0000000000..bebf1955b7 --- /dev/null +++ b/cmd/conf/init.go @@ -0,0 +1,58 @@ +package conf + +import ( + "time" + + "github.com/spf13/pflag" +) + +type InitConfig struct { + Force bool `koanf:"force"` + Url string `koanf:"url"` + DownloadPath string `koanf:"download-path"` + DownloadPoll time.Duration `koanf:"download-poll"` + DevInit bool `koanf:"dev-init"` + DevInitAddress string `koanf:"dev-init-address"` + DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` + Empty bool `koanf:"empty"` + AccountsPerSync uint `koanf:"accounts-per-sync"` + ImportFile string `koanf:"import-file"` + ThenQuit bool `koanf:"then-quit"` + Prune string `koanf:"prune"` + PruneBloomSize uint64 `koanf:"prune-bloom-size"` + ResetToMessage int64 `koanf:"reset-to-message"` +} + +var InitConfigDefault = InitConfig{ + Force: false, + Url: "", + DownloadPath: "/tmp/", + DownloadPoll: time.Minute, + DevInit: false, + DevInitAddress: "", + DevInitBlockNum: 0, + Empty: false, + ImportFile: "", + AccountsPerSync: 100000, + ThenQuit: false, + Prune: "", + PruneBloomSize: 2048, + ResetToMessage: -1, +} + +func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { + f.Bool(prefix+".force", InitConfigDefault.Force, "if true: in case database exists init code will be reexecuted and genesis block compared to database") + f.String(prefix+".url", InitConfigDefault.Url, "url to download initializtion data - will poll if download fails") + f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") + f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") + f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") + f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.") + f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.") + f.Bool(prefix+".empty", InitConfigDefault.Empty, "init with empty state") + f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") + f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import") + f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") + f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") + f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") + f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") +} diff --git a/cmd/genericconf/config.go b/cmd/genericconf/config.go index 6b44ace974..50aafbe223 100644 --- a/cmd/genericconf/config.go +++ b/cmd/genericconf/config.go @@ -121,6 +121,6 @@ func (c *RpcConfig) Apply(stackConf *node.Config) { } func RpcConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".max-batch-response-size", DefaultRpcConfig.MaxBatchResponseSize, "the maximum response size for a JSON-RPC request measured in bytes (-1 means no limit)") - f.Int(prefix+".batch-request-limit", DefaultRpcConfig.BatchRequestLimit, "the maximum number of requests in a batch") + f.Int(prefix+".max-batch-response-size", DefaultRpcConfig.MaxBatchResponseSize, "the maximum response size for a JSON-RPC request measured in bytes (0 means no limit)") + f.Int(prefix+".batch-request-limit", DefaultRpcConfig.BatchRequestLimit, "the maximum number of requests in a batch (0 means no limit)") } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 1427ef161e..4cf5dcda06 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -10,93 +10,37 @@ import ( "fmt" "math/big" "os" - "reflect" - "regexp" "runtime" "strings" "sync" "time" - "github.com/offchainlabs/nitro/cmd/util" - "github.com/cavaliergopher/grab/v3" extract "github.com/codeclysm/extract/v3" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/ipfshelper" + "github.com/offchainlabs/nitro/cmd/pruning" + "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/statetransfer" - "github.com/spf13/pflag" + "github.com/offchainlabs/nitro/util/arbmath" ) -type InitConfig struct { - Force bool `koanf:"force"` - Url string `koanf:"url"` - DownloadPath string `koanf:"download-path"` - DownloadPoll time.Duration `koanf:"download-poll"` - DevInit bool `koanf:"dev-init"` - DevInitAddress string `koanf:"dev-init-address"` - DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` - Empty bool `koanf:"empty"` - AccountsPerSync uint `koanf:"accounts-per-sync"` - ImportFile string `koanf:"import-file"` - ThenQuit bool `koanf:"then-quit"` - Prune string `koanf:"prune"` - PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMessage int64 `koanf:"reset-to-message"` -} - -var InitConfigDefault = InitConfig{ - Force: false, - Url: "", - DownloadPath: "/tmp/", - DownloadPoll: time.Minute, - DevInit: false, - DevInitAddress: "", - DevInitBlockNum: 0, - ImportFile: "", - AccountsPerSync: 100000, - ThenQuit: false, - Prune: "", - PruneBloomSize: 2048, - ResetToMessage: -1, -} - -func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.Bool(prefix+".force", InitConfigDefault.Force, "if true: in case database exists init code will be reexecuted and genesis block compared to database") - f.String(prefix+".url", InitConfigDefault.Url, "url to download initializtion data - will poll if download fails") - f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") - f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") - f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") - f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.") - f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.") - f.Bool(prefix+".empty", InitConfigDefault.Empty, "init with empty state") - f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") - f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import") - f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") - f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") - f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") - f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") -} - -func downloadInit(ctx context.Context, initConfig *InitConfig) (string, error) { +func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, error) { if initConfig.Url == "" { return "", nil } @@ -215,238 +159,19 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo return nil } -type importantRoots struct { - chainDb ethdb.Database - roots []common.Hash - heights []uint64 -} - -// The minimum block distance between two important roots -const minRootDistance = 2000 - -// Marks a header as important, and records its root and height. -// If overwrite is true, it'll remove any future roots and replace them with this header. -// If overwrite is false, it'll ignore this header if it has future roots. -func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error { - targetBlockNum := header.Number.Uint64() - for { - if header == nil || header.Root == (common.Hash{}) { - log.Error("missing state of pruning target", "blockNum", targetBlockNum) - return nil - } - exists, err := r.chainDb.Has(header.Root.Bytes()) - if err != nil { - return err - } - if exists { - break - } - num := header.Number.Uint64() - if num%3000 == 0 { - log.Info("looking for old block with state to keep", "current", num, "target", targetBlockNum) - } - // An underflow is fine here because it'll just return nil due to not found - header = rawdb.ReadHeader(r.chainDb, header.ParentHash, num-1) - } - height := header.Number.Uint64() - for len(r.heights) > 0 && r.heights[len(r.heights)-1] > height { - if !overwrite { - return nil - } - r.roots = r.roots[:len(r.roots)-1] - r.heights = r.heights[:len(r.heights)-1] - } - if len(r.heights) > 0 && r.heights[len(r.heights)-1]+minRootDistance > height { - return nil - } - r.roots = append(r.roots, header.Root) - r.heights = append(r.heights, height) - return nil -} - -var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{64})*$") - -// Finds important roots to retain while proving -func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, nodeConfig *NodeConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) ([]common.Hash, error) { - initConfig := &nodeConfig.Init - chainConfig := gethexec.TryReadStoredChainConfig(chainDb) - if chainConfig == nil { - return nil, errors.New("database doesn't have a chain config (was this node initialized?)") - } - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "", true) - if err != nil { - return nil, err - } - defer func() { - err := arbDb.Close() - if err != nil { - log.Warn("failed to close arbitrum database after finding pruning targets", "err", err) - } - }() - roots := importantRoots{ - chainDb: chainDb, - } - genesisNum := chainConfig.ArbitrumChainParams.GenesisBlockNum - genesisHash := rawdb.ReadCanonicalHash(chainDb, genesisNum) - genesisHeader := rawdb.ReadHeader(chainDb, genesisHash, genesisNum) - if genesisHeader == nil { - return nil, errors.New("missing L2 genesis block header") - } - err = roots.addHeader(genesisHeader, false) - if err != nil { - return nil, err - } - if initConfig.Prune == "validator" { - if l1Client == nil || reflect.ValueOf(l1Client).IsNil() { - return nil, errors.New("an L1 connection is required for validator pruning") - } - callOpts := bind.CallOpts{ - Context: ctx, - BlockNumber: big.NewInt(int64(rpc.FinalizedBlockNumber)), - } - rollup, err := staker.NewRollupWatcher(rollupAddrs.Rollup, l1Client, callOpts) - if err != nil { - return nil, err - } - latestConfirmedNum, err := rollup.LatestConfirmed(&callOpts) - if err != nil { - return nil, err - } - latestConfirmedNode, err := rollup.LookupNode(ctx, latestConfirmedNum) - if err != nil { - return nil, err - } - confirmedHash := latestConfirmedNode.Assertion.AfterState.GlobalState.BlockHash - confirmedNumber := rawdb.ReadHeaderNumber(chainDb, confirmedHash) - var confirmedHeader *types.Header - if confirmedNumber != nil { - confirmedHeader = rawdb.ReadHeader(chainDb, confirmedHash, *confirmedNumber) - } - if confirmedHeader != nil { - err = roots.addHeader(confirmedHeader, false) - if err != nil { - return nil, err - } - } else { - log.Warn("missing latest confirmed block", "hash", confirmedHash) - } - - validatorDb := rawdb.NewTable(arbDb, storage.BlockValidatorPrefix) - lastValidated, err := staker.ReadLastValidatedInfo(validatorDb) - if err != nil { - return nil, err - } - if lastValidated != nil { - var lastValidatedHeader *types.Header - headerNum := rawdb.ReadHeaderNumber(chainDb, lastValidated.GlobalState.BlockHash) - if headerNum != nil { - lastValidatedHeader = rawdb.ReadHeader(chainDb, lastValidated.GlobalState.BlockHash, *headerNum) - } - if lastValidatedHeader != nil { - err = roots.addHeader(lastValidatedHeader, false) - if err != nil { - return nil, err - } - } else { - log.Warn("missing latest validated block", "hash", lastValidated.GlobalState.BlockHash) - } - } - } else if initConfig.Prune == "full" { - if nodeConfig.Node.ValidatorRequired() { - return nil, errors.New("refusing to prune to full-node level when validator is enabled (you should prune in validator mode)") - } - } else if hashListRegex.MatchString(initConfig.Prune) { - parts := strings.Split(initConfig.Prune, ",") - roots := []common.Hash{genesisHeader.Root} - for _, part := range parts { - root := common.HexToHash(part) - if root == genesisHeader.Root { - // This was already included in the builtin list - continue - } - roots = append(roots, root) - } - return roots, nil - } else { - return nil, fmt.Errorf("unknown pruning mode: \"%v\"", initConfig.Prune) - } - if l1Client != nil { - // Find the latest finalized block and add it as a pruning target - l1Block, err := l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return nil, fmt.Errorf("failed to get finalized block: %w", err) - } - l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) - if err != nil { - return nil, err - } - batch, err := tracker.GetBatchCount() - if err != nil { - return nil, err - } - for { - if ctx.Err() != nil { - return nil, ctx.Err() - } - if batch == 0 { - // No batch has been finalized - break - } - batch -= 1 - meta, err := tracker.GetBatchMetadata(batch) - if err != nil { - return nil, err - } - if meta.ParentChainBlock <= l1BlockNum { - signedBlockNum := arbutil.MessageCountToBlockNumber(meta.MessageCount, genesisNum) - blockNum := uint64(signedBlockNum) - l2Hash := rawdb.ReadCanonicalHash(chainDb, blockNum) - l2Header := rawdb.ReadHeader(chainDb, l2Hash, blockNum) - if l2Header == nil { - log.Warn("latest finalized L2 block is unknown", "blockNum", signedBlockNum) - break - } - err = roots.addHeader(l2Header, false) - if err != nil { - return nil, err - } - break - } - } - } - roots.roots = append(roots.roots, common.Hash{}) // the latest snapshot - log.Info("found pruning target blocks", "heights", roots.heights, "roots", roots.roots) - return roots.roots, nil -} - -func pruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, nodeConfig *NodeConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) error { - config := &nodeConfig.Init - if config.Prune == "" { - return pruner.RecoverPruning(stack.InstanceDir(), chainDb) - } - root, err := findImportantRoots(ctx, chainDb, stack, nodeConfig, cacheConfig, l1Client, rollupAddrs) - if err != nil { - return fmt.Errorf("failed to find root to retain for pruning: %w", err) - } - - pruner, err := pruner.NewPruner(chainDb, pruner.Config{Datadir: stack.InstanceDir(), BloomSize: config.PruneBloomSize}) - if err != nil { - return err - } - return pruner.Prune(root) -} - func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { if readOnlyDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", 0, 0, "", "", true); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() + if !arbmath.BigEquals(chainConfig.ChainID, chainId) { + return nil, nil, fmt.Errorf("database has chain ID %v but config has chain ID %v (are you sure this database is for the right chain?)", chainConfig.ChainID, chainId) + } chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "", false) if err != nil { return chainDb, nil, err } - err = pruneChainDb(ctx, chainDb, stack, config, cacheConfig, l1Client, rollupAddrs) + err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } @@ -642,7 +367,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo return chainDb, l2BlockChain, err } - err = pruneChainDb(ctx, chainDb, stack, config, cacheConfig, l1Client, rollupAddrs) + err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 966b073ef8..45f539488d 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -536,6 +536,7 @@ func mainImpl() int { l1TransactionOptsBatchPoster, dataSigner, fatalErrChan, + big.NewInt(int64(nodeConfig.ParentChain.ID)), ) if err != nil { log.Error("failed to create node", "err", err) @@ -677,7 +678,7 @@ type NodeConfig struct { MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` PProf bool `koanf:"pprof"` PprofCfg genericconf.PProf `koanf:"pprof-cfg"` - Init InitConfig `koanf:"init"` + Init conf.InitConfig `koanf:"init"` Rpc genericconf.RpcConfig `koanf:"rpc"` } @@ -699,7 +700,7 @@ var NodeConfigDefault = NodeConfig{ GraphQL: genericconf.GraphQLConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, - Init: InitConfigDefault, + Init: conf.InitConfigDefault, Rpc: genericconf.DefaultRpcConfig, PProf: false, PprofCfg: genericconf.PProfDefault, @@ -726,7 +727,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { f.Bool("pprof", NodeConfigDefault.PProf, "enable pprof") genericconf.PProfAddOptions("pprof-cfg", f) - InitConfigAddOptions("init", f) + conf.InitConfigAddOptions("init", f) genericconf.RpcConfigAddOptions("rpc", f) } diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go new file mode 100644 index 0000000000..68d89302f0 --- /dev/null +++ b/cmd/pruning/pruning.go @@ -0,0 +1,249 @@ +package pruning + +import ( + "context" + "errors" + "fmt" + "math/big" + "reflect" + "regexp" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/pruner" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/cmd/conf" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/staker" +) + +type importantRoots struct { + chainDb ethdb.Database + roots []common.Hash + heights []uint64 +} + +// The minimum block distance between two important roots +const minRootDistance = 2000 + +// Marks a header as important, and records its root and height. +// If overwrite is true, it'll remove any future roots and replace them with this header. +// If overwrite is false, it'll ignore this header if it has future roots. +func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error { + targetBlockNum := header.Number.Uint64() + for { + if header == nil || header.Root == (common.Hash{}) { + log.Error("missing state of pruning target", "blockNum", targetBlockNum) + return nil + } + exists, err := r.chainDb.Has(header.Root.Bytes()) + if err != nil { + return err + } + if exists { + break + } + num := header.Number.Uint64() + if num%3000 == 0 { + log.Info("looking for old block with state to keep", "current", num, "target", targetBlockNum) + } + // An underflow is fine here because it'll just return nil due to not found + header = rawdb.ReadHeader(r.chainDb, header.ParentHash, num-1) + } + height := header.Number.Uint64() + for len(r.heights) > 0 && r.heights[len(r.heights)-1] > height { + if !overwrite { + return nil + } + r.roots = r.roots[:len(r.roots)-1] + r.heights = r.heights[:len(r.heights)-1] + } + if len(r.heights) > 0 && r.heights[len(r.heights)-1]+minRootDistance > height { + return nil + } + r.roots = append(r.roots, header.Root) + r.heights = append(r.heights, height) + return nil +} + +var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{64})*$") + +// Finds important roots to retain while proving +func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { + chainConfig := gethexec.TryReadStoredChainConfig(chainDb) + if chainConfig == nil { + return nil, errors.New("database doesn't have a chain config (was this node initialized?)") + } + arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "", true) + if err != nil { + return nil, err + } + defer func() { + err := arbDb.Close() + if err != nil { + log.Warn("failed to close arbitrum database after finding pruning targets", "err", err) + } + }() + roots := importantRoots{ + chainDb: chainDb, + } + genesisNum := chainConfig.ArbitrumChainParams.GenesisBlockNum + genesisHash := rawdb.ReadCanonicalHash(chainDb, genesisNum) + genesisHeader := rawdb.ReadHeader(chainDb, genesisHash, genesisNum) + if genesisHeader == nil { + return nil, errors.New("missing L2 genesis block header") + } + err = roots.addHeader(genesisHeader, false) + if err != nil { + return nil, err + } + if initConfig.Prune == "validator" { + if l1Client == nil || reflect.ValueOf(l1Client).IsNil() { + return nil, errors.New("an L1 connection is required for validator pruning") + } + callOpts := bind.CallOpts{ + Context: ctx, + BlockNumber: big.NewInt(int64(rpc.FinalizedBlockNumber)), + } + rollup, err := staker.NewRollupWatcher(rollupAddrs.Rollup, l1Client, callOpts) + if err != nil { + return nil, err + } + latestConfirmedNum, err := rollup.LatestConfirmed(&callOpts) + if err != nil { + return nil, err + } + latestConfirmedNode, err := rollup.LookupNode(ctx, latestConfirmedNum) + if err != nil { + return nil, err + } + confirmedHash := latestConfirmedNode.Assertion.AfterState.GlobalState.BlockHash + confirmedNumber := rawdb.ReadHeaderNumber(chainDb, confirmedHash) + var confirmedHeader *types.Header + if confirmedNumber != nil { + confirmedHeader = rawdb.ReadHeader(chainDb, confirmedHash, *confirmedNumber) + } + if confirmedHeader != nil { + err = roots.addHeader(confirmedHeader, false) + if err != nil { + return nil, err + } + } else { + log.Warn("missing latest confirmed block", "hash", confirmedHash) + } + + validatorDb := rawdb.NewTable(arbDb, storage.BlockValidatorPrefix) + lastValidated, err := staker.ReadLastValidatedInfo(validatorDb) + if err != nil { + return nil, err + } + if lastValidated != nil { + var lastValidatedHeader *types.Header + headerNum := rawdb.ReadHeaderNumber(chainDb, lastValidated.GlobalState.BlockHash) + if headerNum != nil { + lastValidatedHeader = rawdb.ReadHeader(chainDb, lastValidated.GlobalState.BlockHash, *headerNum) + } + if lastValidatedHeader != nil { + err = roots.addHeader(lastValidatedHeader, false) + if err != nil { + return nil, err + } + } else { + log.Warn("missing latest validated block", "hash", lastValidated.GlobalState.BlockHash) + } + } + } else if initConfig.Prune == "full" { + if validatorRequired { + return nil, errors.New("refusing to prune to full-node level when validator is enabled (you should prune in validator mode)") + } + } else if hashListRegex.MatchString(initConfig.Prune) { + parts := strings.Split(initConfig.Prune, ",") + roots := []common.Hash{genesisHeader.Root} + for _, part := range parts { + root := common.HexToHash(part) + if root == genesisHeader.Root { + // This was already included in the builtin list + continue + } + roots = append(roots, root) + } + return roots, nil + } else { + return nil, fmt.Errorf("unknown pruning mode: \"%v\"", initConfig.Prune) + } + if l1Client != nil { + // Find the latest finalized block and add it as a pruning target + l1Block, err := l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) + if err != nil { + return nil, fmt.Errorf("failed to get finalized block: %w", err) + } + l1BlockNum := l1Block.NumberU64() + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) + if err != nil { + return nil, err + } + batch, err := tracker.GetBatchCount() + if err != nil { + return nil, err + } + for { + if ctx.Err() != nil { + return nil, ctx.Err() + } + if batch == 0 { + // No batch has been finalized + break + } + batch -= 1 + meta, err := tracker.GetBatchMetadata(batch) + if err != nil { + return nil, err + } + if meta.ParentChainBlock <= l1BlockNum { + signedBlockNum := arbutil.MessageCountToBlockNumber(meta.MessageCount, genesisNum) + blockNum := uint64(signedBlockNum) + l2Hash := rawdb.ReadCanonicalHash(chainDb, blockNum) + l2Header := rawdb.ReadHeader(chainDb, l2Hash, blockNum) + if l2Header == nil { + log.Warn("latest finalized L2 block is unknown", "blockNum", signedBlockNum) + break + } + err = roots.addHeader(l2Header, false) + if err != nil { + return nil, err + } + break + } + } + } + roots.roots = append(roots.roots, common.Hash{}) // the latest snapshot + log.Info("found pruning target blocks", "heights", roots.heights, "roots", roots.roots) + return roots.roots, nil +} + +func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { + if initConfig.Prune == "" { + return pruner.RecoverPruning(stack.InstanceDir(), chainDb) + } + root, err := findImportantRoots(ctx, chainDb, stack, initConfig, cacheConfig, l1Client, rollupAddrs, validatorRequired) + if err != nil { + return fmt.Errorf("failed to find root to retain for pruning: %w", err) + } + + pruner, err := pruner.NewPruner(chainDb, pruner.Config{Datadir: stack.InstanceDir(), BloomSize: initConfig.PruneBloomSize}) + if err != nil { + return err + } + return pruner.Prune(root) +} diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index b25aadf57b..40f4f26eec 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -6,7 +6,6 @@ package main import ( "context" "fmt" - "net/http" "os" "os/signal" "syscall" @@ -22,10 +21,6 @@ import ( "github.com/offchainlabs/nitro/relay" ) -func init() { - http.DefaultServeMux = http.NewServeMux() -} - func main() { if err := startup(); err != nil { log.Error("Error running relay", "err", err) diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index 07bc26af2c..43d90441ef 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -34,6 +34,7 @@ type manager struct { livelinessSet map[string]bool priorityList []string nonPriorityList []string + maxURLSize int } func main() { @@ -57,6 +58,9 @@ func main() { }, prioritiesSet: make(map[string]bool), livelinessSet: make(map[string]bool), + // maxURLSize dictates the allowed max length for sequencer urls + // urls exceeding this size will be truncated with an ellipsis + maxURLSize: 100, } seqManager.refreshAllLists(ctx) @@ -160,11 +164,12 @@ func main() { flex.SetDirection(tview.FlexRow). AddItem(priorityHeading, 0, 1, false). AddItem(tview.NewFlex(). - AddItem(prioritySeqList, 0, 2, true). + // fixedSize is maxURLSize plus 20 characters to accomodate ellipsis, statuses and emojis + AddItem(prioritySeqList, seqManager.maxURLSize+20, 0, true). AddItem(priorityForm, 0, 3, true), 0, 12, true). AddItem(nonPriorityHeading, 0, 1, false). AddItem(tview.NewFlex(). - AddItem(nonPrioritySeqList, 0, 2, true). + AddItem(nonPrioritySeqList, seqManager.maxURLSize+20, 0, true). AddItem(nonPriorityForm, 0, 3, true), 0, 12, true). AddItem(instructions, 0, 3, false).SetBorder(true) @@ -243,13 +248,22 @@ func (sm *manager) populateLists(ctx context.Context) { if _, ok := sm.livelinessSet[seqURL]; ok { status = fmt.Sprintf("(%d) %v ", index, emoji.GreenCircle) } - prioritySeqList.AddItem(status+seqURL+sec, "", rune(0), nil).SetSecondaryTextColor(tcell.ColorPurple) + url := seqURL + if len(seqURL) > sm.maxURLSize { + url = seqURL[:sm.maxURLSize] + "..." + } + prioritySeqList.AddItem(status+url+sec, "", rune(0), nil).SetSecondaryTextColor(tcell.ColorPurple) + } nonPrioritySeqList.Clear() status := fmt.Sprintf("(-) %v ", emoji.GreenCircle) for _, seqURL := range sm.nonPriorityList { - nonPrioritySeqList.AddItem(status+seqURL, "", rune(0), nil) + url := seqURL + if len(seqURL) > sm.maxURLSize { + url = seqURL[:sm.maxURLSize] + "..." + } + nonPrioritySeqList.AddItem(status+url, "", rune(0), nil) } } diff --git a/execution/gethexec/forwarder.go b/execution/gethexec/forwarder.go index dc3420f8de..984c7224e8 100644 --- a/execution/gethexec/forwarder.go +++ b/execution/gethexec/forwarder.go @@ -151,7 +151,7 @@ func (f *TxForwarder) PublishTransaction(inctx context.Context, tx *types.Transa if err == nil || !f.tryNewForwarderErrors.MatchString(err.Error()) { return err } - log.Info("error forwarding transaction to a backup target", "target", f.targets[pos], "err", err) + log.Warn("error forwarding transaction to a backup target", "target", f.targets[pos], "err", err) } return errors.New("failed to publish transaction to any of the forwarding targets") } @@ -161,7 +161,9 @@ const maxHealthTimeout = 10 * time.Second // CheckHealth returns health of the highest priority forwarding target func (f *TxForwarder) CheckHealth(inctx context.Context) error { - if !f.enabled.Load() { + // If f.enabled is true, len(f.rpcClients) should always be greater than zero, + // but better safe than sorry. + if !f.enabled.Load() || len(f.rpcClients) == 0 { return ErrNoSequencer } f.healthMutex.Lock() @@ -230,6 +232,14 @@ func (f *TxForwarder) Started() bool { return true } +// Returns the URL of the first forwarding target, or an empty string if none are set. +func (f *TxForwarder) PrimaryTarget() string { + if len(f.targets) == 0 { + return "" + } + return f.targets[0] +} + type TxDropper struct{} func NewTxDropper() *TxDropper { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 4e31f6b9bc..77c14488bc 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -256,7 +256,7 @@ func CreateExecutionNode( } func (n *ExecutionNode) Initialize(ctx context.Context, arbnode interface{}, sync arbitrum.SyncProgressBackend) error { - n.ArbInterface.Initialize(n) + n.ArbInterface.Initialize(arbnode) err := n.Backend.Start() if err != nil { return fmt.Errorf("error starting geth backend: %w", err) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index e86bb04b5d..ba603dd465 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -499,14 +499,14 @@ func (s *Sequencer) ForwardTarget() string { if s.forwarder == nil { return "" } - return s.forwarder.targets[0] + return s.forwarder.PrimaryTarget() } func (s *Sequencer) ForwardTo(url string) error { s.activeMutex.Lock() defer s.activeMutex.Unlock() if s.forwarder != nil { - if s.forwarder.targets[0] == url { + if s.forwarder.PrimaryTarget() == url { log.Warn("attempted to update sequencer forward target with existing target", "url", url) return nil } diff --git a/go.mod b/go.mod index bb01de0dfb..158fc86fc6 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ replace github.com/VictoriaMetrics/fastcache => ./fastcache replace github.com/ethereum/go-ethereum => ./go-ethereum require ( - github.com/EspressoSystems/espresso-sequencer-go v0.0.3 + github.com/EspressoSystems/espresso-sequencer-go v0.0.4 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/alicebob/miniredis/v2 v2.21.0 @@ -39,7 +39,6 @@ require ( github.com/miguelmota/go-ethereum-hdwallet v0.1.2 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multihash v0.2.1 - github.com/prysmaticlabs/prysm/v4 v4.1.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 @@ -84,6 +83,7 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/ceramicnetwork/go-dag-jose v0.1.0 // indirect + github.com/cespare/cp v1.1.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect @@ -116,6 +116,7 @@ require ( github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-playground/validator/v10 v10.13.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -129,7 +130,6 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/gorilla/mux v1.8.0 // indirect - github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/h2non/filetype v1.0.6 // indirect @@ -255,10 +255,10 @@ require ( github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect + github.com/sigurn/crc8 v0.0.0-20220107193325-2243fe600f9f // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/urfave/cli/v2 v2.25.7 // indirect - github.com/uudashr/gocognit v1.0.5 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect diff --git a/go.sum b/go.sum index 0e64ff9dbe..fc1a613b38 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,8 @@ github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/EspressoSystems/espresso-sequencer-go v0.0.3 h1:A227KQpb46Lfccz+b/OGEfA2+pjrHN8f6+XzxBVUVmw= -github.com/EspressoSystems/espresso-sequencer-go v0.0.3/go.mod h1:T3MyQRnfbCSTBhAAG2WASmVPCwWkl0/sPKKY5Z1ewtg= +github.com/EspressoSystems/espresso-sequencer-go v0.0.4 h1:/um6EBWciClEM2rBLpA9I8vIyok9D9rEGE+7zBVXR6g= +github.com/EspressoSystems/espresso-sequencer-go v0.0.4/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= @@ -152,7 +152,6 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/bazelbuild/rules_go v0.23.2 h1:Wxu7JjqnF78cKZbsBsARLSXx/jlGaSLCnUV3mTlyHvM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -203,6 +202,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/ceramicnetwork/go-dag-jose v0.1.0 h1:yJ/HVlfKpnD3LdYP03AHyTvbm3BpPiz2oZiOeReJRdU= github.com/ceramicnetwork/go-dag-jose v0.1.0/go.mod h1:qYA1nYt0X8u4XoMAVoOV3upUVKtrxy/I670Dg5F0wjI= github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= +github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -273,7 +273,6 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -415,6 +414,7 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ= +github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= @@ -573,9 +573,6 @@ github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= -github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -620,7 +617,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -1417,11 +1413,6 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.4.0 h1:CtfRrOVZtbDj8rt1WXjklw0kqqJQwICrCKmlfUuBUUw= github.com/openzipkin/zipkin-go v0.4.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1488,10 +1479,6 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo= -github.com/prysmaticlabs/gohashtree v0.0.3-alpha h1:1EVinCWdb3Lorq7xn8DYQHf48nCcdAM3Vb18KsFlRWY= -github.com/prysmaticlabs/prysm/v4 v4.1.1 h1:sbBkgfPzo/SGTJ5IimtsZSGECoRlhbowR1rEhTOdvHo= -github.com/prysmaticlabs/prysm/v4 v4.1.1/go.mod h1:+o907dc4mwEE0wJkQ8RrzCroC+q2WCzdCLtikwonw8c= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= @@ -1565,12 +1552,13 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sigurn/crc8 v0.0.0-20220107193325-2243fe600f9f h1:1R9KdKjCNSd7F8iGTxIpoID9prlYH8nuNYKt0XvweHA= +github.com/sigurn/crc8 v0.0.0-20220107193325-2243fe600f9f/go.mod h1:vQhwQ4meQEDfahT5kd61wLAF5AAeh5ZPLVI4JJ/tYo8= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= @@ -1628,10 +1616,7 @@ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpP github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= -github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -1655,8 +1640,6 @@ github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/X github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= -github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= @@ -1862,7 +1845,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= @@ -2136,10 +2118,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= diff --git a/linter/comparesame/comparesame.go b/linter/comparesame/comparesame.go deleted file mode 100644 index 1d91592e99..0000000000 --- a/linter/comparesame/comparesame.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static Analyzer to ensure code does not contain comparisons of identical expressions. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/comparesame" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(comparesame.Analyzer) -} diff --git a/linter/cryptorand/cryptorand.go b/linter/cryptorand/cryptorand.go deleted file mode 100644 index a7cd1ca65a..0000000000 --- a/linter/cryptorand/cryptorand.go +++ /dev/null @@ -1,13 +0,0 @@ -// Static Analyzer to ensure the crypto/rand package is used for randomness -// throughout the codebase. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/cryptorand" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(cryptorand.Analyzer) -} diff --git a/linter/errcheck/errcheck.go b/linter/errcheck/errcheck.go deleted file mode 100644 index 8275b2bab1..0000000000 --- a/linter/errcheck/errcheck.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static analyzer to ensure that errors are handled in go code. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/errcheck" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(errcheck.Analyzer) -} diff --git a/linter/featureconfig/featureconfig.go b/linter/featureconfig/featureconfig.go deleted file mode 100644 index bb9c2dd3a1..0000000000 --- a/linter/featureconfig/featureconfig.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static analyzer to prevent leaking globals in tests. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/featureconfig" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(featureconfig.Analyzer) -} diff --git a/linter/gocognit/gocognit.go b/linter/gocognit/gocognit.go deleted file mode 100644 index a50d0ac08d..0000000000 --- a/linter/gocognit/gocognit.go +++ /dev/null @@ -1,13 +0,0 @@ -// Static analyzer that checks for high cognitive complexity and complains when -// it's too high. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/gocognit" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(gocognit.Analyzer) -} diff --git a/linter/ineffassign/ineffassign.go b/linter/ineffassign/ineffassign.go deleted file mode 100644 index 697e104822..0000000000 --- a/linter/ineffassign/ineffassign.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static analyzer for detecting ineffectual assignments. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/ineffassign" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(ineffassign.Analyzer) -} diff --git a/linter/interfacechecker/interfacechecker.go b/linter/interfacechecker/interfacechecker.go deleted file mode 100644 index 50bb56f749..0000000000 --- a/linter/interfacechecker/interfacechecker.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static analyzer to prevent conditional checks on select interfaces. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/interfacechecker" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(interfacechecker.Analyzer) -} diff --git a/linter/logruswitherror/logruswitherror.go b/linter/logruswitherror/logruswitherror.go deleted file mode 100644 index d0da1fcb12..0000000000 --- a/linter/logruswitherror/logruswitherror.go +++ /dev/null @@ -1,13 +0,0 @@ -// Static analyzer to ensure that log statements do not use errors in -// templated log statements. Authors should use logrus.WithError(). -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/logruswitherror" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(logruswitherror.Analyzer) -} diff --git a/linter/recursivelock/recursivelock.go b/linter/recursivelock/recursivelock.go deleted file mode 100644 index 8c8caff382..0000000000 --- a/linter/recursivelock/recursivelock.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static Analyzer for detecting nested or recursive mutex read lock statements. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/recursivelock" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(recursivelock.Analyzer) -} diff --git a/linter/shadowpredecl/shadowpredecl.go b/linter/shadowpredecl/shadowpredecl.go deleted file mode 100644 index e51ff0da83..0000000000 --- a/linter/shadowpredecl/shadowpredecl.go +++ /dev/null @@ -1,13 +0,0 @@ -// Static analyzer which disallows declaring constructs that shadow predeclared -// Go identifiers by having the same name. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/shadowpredecl" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(shadowpredecl.Analyzer) -} diff --git a/linter/slicedirect/slicedirect.go b/linter/slicedirect/slicedirect.go deleted file mode 100644 index b31404013d..0000000000 --- a/linter/slicedirect/slicedirect.go +++ /dev/null @@ -1,13 +0,0 @@ -// Static analyzer to ensure that code does not contain applications of [:] -// on expressions which are already slices. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/slicedirect" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(slicedirect.Analyzer) -} diff --git a/linter/uintcast/uintcast.go b/linter/uintcast/uintcast.go deleted file mode 100644 index 1f0eb78575..0000000000 --- a/linter/uintcast/uintcast.go +++ /dev/null @@ -1,13 +0,0 @@ -// Static analyzer for detecting unsafe uint to int casts. -// Use `lint:ignore uintcast` with proper justification to ignore this check. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/uintcast" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(uintcast.Analyzer) -} diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 3b377bc5a0..bdcfb569f4 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -10,6 +10,7 @@ import ( "math/big" "sort" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -17,6 +18,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" @@ -24,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/merkletree" @@ -94,14 +97,37 @@ func (n NodeInterface) GetL1Confirmations(c ctx, evm mech, blockHash bytes32) (u return 0, err } } - latestL1Block, latestBatchCount := node.InboxReader.GetLastReadBlockAndBatchCount() - if latestBatchCount <= batch { - return 0, nil // batch was reorg'd out? - } meta, err := node.InboxTracker.GetBatchMetadata(batch) if err != nil { return 0, err } + if node.L1Reader.IsParentChainArbitrum() { + parentChainClient := node.L1Reader.Client() + parentNodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, parentChainClient) + if err != nil { + return 0, err + } + parentChainBlock, err := parentChainClient.BlockByNumber(n.context, new(big.Int).SetUint64(meta.ParentChainBlock)) + if err != nil { + // Hide the parent chain RPC error from the client in case it contains sensitive information. + // Likely though, this error is just "not found" because the block got reorg'd. + return 0, fmt.Errorf("failed to get parent chain block %v containing batch", meta.ParentChainBlock) + } + confs, err := parentNodeInterface.GetL1Confirmations(&bind.CallOpts{Context: n.context}, parentChainBlock.Hash()) + if err != nil { + log.Warn( + "Failed to get L1 confirmations from parent chain", + "blockNumber", meta.ParentChainBlock, + "blockHash", parentChainBlock.Hash(), "err", err, + ) + return 0, fmt.Errorf("failed to get L1 confirmations from parent chain for block %v", parentChainBlock.Hash()) + } + return confs, nil + } + latestL1Block, latestBatchCount := node.InboxReader.GetLastReadBlockAndBatchCount() + if latestBatchCount <= batch { + return 0, nil // batch was reorg'd out? + } if latestL1Block < meta.ParentChainBlock || arbutil.BlockNumberToMessageCount(blockNum, genesis) > meta.MessageCount { return 0, nil } diff --git a/staker/block_validator.go b/staker/block_validator.go index b8001f77ce..192f65cbaa 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" @@ -78,6 +79,8 @@ type BlockValidator struct { testingProgressMadeChan chan struct{} fatalErr chan<- error + + MemoryFreeLimitChecker resourcemanager.LimitChecker } type BlockValidatorConfig struct { @@ -90,12 +93,25 @@ type BlockValidatorConfig struct { PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` + MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"` + + memoryFreeLimit int + // Espresso specific flags Espresso bool `koanf:"espresso"` HotShotAddress string `koanf:"hotshot-address"` //nolint } func (c *BlockValidatorConfig) Validate() error { + if c.MemoryFreeLimit == "default" { + c.memoryFreeLimit = 1073741824 // 1GB + } else if c.MemoryFreeLimit != "" { + limit, err := resourcemanager.ParseMemLimit(c.MemoryFreeLimit) + if err != nil { + return fmt.Errorf("failed to parse block-validator config memory-free-limit string: %w", err) + } + c.memoryFreeLimit = limit + } return c.ValidationServer.Validate() } @@ -117,6 +133,7 @@ func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".failure-is-fatal", DefaultBlockValidatorConfig.FailureIsFatal, "failing a validation is treated as a fatal error") f.Bool(prefix+".espresso", DefaultBlockValidatorConfig.Espresso, "if true, hotshot header preimages will be added to validation entries to verify that transactions have been sequenced by espresso") BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f) + f.String(prefix+".memory-free-limit", DefaultBlockValidatorConfig.MemoryFreeLimit, "minimum free-memory limit after reaching which the blockvalidator pauses validation. Enabled by default as 1GB, to disable provide empty string") } func BlockValidatorDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -133,6 +150,7 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, + MemoryFreeLimit: "default", } var TestBlockValidatorConfig = BlockValidatorConfig{ @@ -145,6 +163,7 @@ var TestBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, + MemoryFreeLimit: "default", } var DefaultBlockValidatorDangerousConfig = BlockValidatorDangerousConfig{ @@ -223,6 +242,18 @@ func NewBlockValidator( } streamer.SetBlockValidator(ret) inbox.SetBlockValidator(ret) + if config().MemoryFreeLimit != "" { + limtchecker, err := resourcemanager.NewCgroupsMemoryLimitCheckerIfSupported(config().memoryFreeLimit) + if err != nil { + if config().MemoryFreeLimit == "default" { + log.Warn("Cgroups V1 or V2 is unsupported, memory-free-limit feature inside block-validator is disabled") + } else { + return nil, fmt.Errorf("failed to create MemoryFreeLimitChecker, Cgroups V1 or V2 is unsupported") + } + } else { + ret.MemoryFreeLimitChecker = limtchecker + } + } return ret, nil } @@ -566,6 +597,15 @@ func (v *BlockValidator) iterativeValidationEntryCreator(ctx context.Context, ig } func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, error) { + if v.MemoryFreeLimitChecker != nil { + exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() + if err != nil { + log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) + } + if exceeded { + return false, nil + } + } v.reorgMutex.RLock() pos := v.recordSent() created := v.created() @@ -595,6 +635,15 @@ func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, erro return true, nil } for pos <= recordUntil { + if v.MemoryFreeLimitChecker != nil { + exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() + if err != nil { + log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) + } + if exceeded { + return false, nil + } + } validationStatus, found := v.validations.Load(pos) if !found { return false, fmt.Errorf("not found entry for pos %d", pos) @@ -744,6 +793,15 @@ validationsLoop: log.Trace("advanceValidations: no more room", "pos", pos) return nil, nil } + if v.MemoryFreeLimitChecker != nil { + exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() + if err != nil { + log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) + } + if exceeded { + return nil, nil + } + } if currentStatus == Prepared { input, err := validationStatus.Entry.ToInput() if err != nil && ctx.Err() == nil { diff --git a/staker/staker.go b/staker/staker.go index 4f35c1bc9a..2a95e9c9f7 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -21,10 +21,10 @@ import ( flag "github.com/spf13/pflag" "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/staker/txbuilder" + "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -87,7 +87,6 @@ type L1ValidatorConfig struct { GasRefunderAddress string `koanf:"gas-refunder-address"` DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` Dangerous DangerousConfig `koanf:"dangerous"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` @@ -154,7 +153,6 @@ var DefaultL1ValidatorConfig = L1ValidatorConfig{ GasRefunderAddress: "", DataPoster: dataposter.DefaultDataPosterConfigForValidator, RedisUrl: "", - RedisLock: redislock.DefaultCfg, ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, @@ -175,7 +173,6 @@ var TestL1ValidatorConfig = L1ValidatorConfig{ GasRefunderAddress: "", DataPoster: dataposter.TestDataPosterConfigForValidator, RedisUrl: "", - RedisLock: redislock.DefaultCfg, ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, @@ -205,7 +202,6 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) - redislock.AddConfigOptions(prefix+".redis-lock", f) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) } @@ -406,6 +402,7 @@ func (s *Staker) Start(ctxIn context.Context) { } s.StopWaiter.Start(ctxIn, s) backoff := time.Second + ephemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "is ahead of on-chain nonce", 0) s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) { defer func() { panicErr := recover() @@ -438,6 +435,7 @@ func (s *Staker) Start(ctxIn context.Context) { } } if err == nil { + ephemeralErrorHandler.Reset() backoff = time.Second stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) stakerActionSuccessCounter.Inc(1) @@ -449,12 +447,14 @@ func (s *Staker) Start(ctxIn context.Context) { } stakerActionFailureCounter.Inc(1) backoff *= 2 + logLevel := log.Error if backoff > time.Minute { backoff = time.Minute - log.Error("error acting as staker", "err", err) } else { - log.Warn("error acting as staker", "err", err) + logLevel = log.Warn } + logLevel = ephemeralErrorHandler.LogLevel(err, logLevel) + logLevel("error acting as staker", "err", err) return backoff }) s.CallIteratively(func(ctx context.Context) time.Duration { diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 873a87c6f9..f7bf74f699 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -20,6 +20,8 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/util/redisutil" @@ -60,10 +62,29 @@ func addNewBatchPoster(ctx context.Context, t *testing.T, builder *NodeBuilder, } } +func externalSignerTestCfg(addr common.Address) (*dataposter.ExternalSignerCfg, error) { + cp, err := externalsignertest.CertPaths() + if err != nil { + return nil, fmt.Errorf("getting certificates path: %w", err) + } + return &dataposter.ExternalSignerCfg{ + Address: common.Bytes2Hex(addr.Bytes()), + URL: externalsignertest.SignerURL, + Method: externalsignertest.SignerMethod, + RootCA: cp.ServerCert, + ClientCert: cp.ClientCert, + ClientPrivateKey: cp.ClientKey, + }, nil +} + func testBatchPosterParallel(t *testing.T, useRedis bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - httpSrv, srv := newServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(ctx, t) + cp, err := externalsignertest.CertPaths() + if err != nil { + t.Fatalf("Error getting cert paths: %v", err) + } t.Cleanup(func() { if err := httpSrv.Shutdown(ctx); err != nil { t.Fatalf("Error shutting down http server: %v", err) @@ -71,7 +92,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { }) go func() { log.Debug("Server is listening on port 1234...") - if err := httpSrv.ListenAndServeTLS(signerServerCert, signerServerKey); err != nil && err != http.ErrServerClosed { + if err := httpSrv.ListenAndServeTLS(cp.ServerCert, cp.ServerKey); err != nil && err != http.ErrServerClosed { log.Debug("ListenAndServeTLS() failed", "error", err) return } @@ -93,7 +114,11 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { builder := NewNodeBuilder(ctx).DefaultConfig(t, true) builder.nodeConfig.BatchPoster.Enable = false builder.nodeConfig.BatchPoster.RedisUrl = redisUrl - builder.nodeConfig.BatchPoster.DataPoster.ExternalSigner = *externalSignerTestCfg(srv.address) + signerCfg, err := externalSignerTestCfg(srv.Address) + if err != nil { + t.Fatalf("Error getting external signer config: %v", err) + } + builder.nodeConfig.BatchPoster.DataPoster.ExternalSigner = *signerCfg cleanup := builder.Build(t) defer cleanup() @@ -101,10 +126,10 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { defer cleanupB() builder.L2Info.GenerateAccount("User2") - addNewBatchPoster(ctx, t, builder, srv.address) + addNewBatchPoster(ctx, t, builder, srv.Address) builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ - builder.L1Info.PrepareTxTo("Faucet", &srv.address, 30000, big.NewInt(1e18), nil)}) + builder.L1Info.PrepareTxTo("Faucet", &srv.Address, 30000, big.NewInt(1e18), nil)}) var txs []*types.Transaction @@ -128,20 +153,25 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2 startL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) + parentChainID, err := builder.L1.Client.ChainID(ctx) + if err != nil { + t.Fatalf("Failed to get parent chain id: %v", err) + } for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race batchPosterConfig := builder.nodeConfig.BatchPoster batchPoster, err := arbnode.NewBatchPoster(ctx, &arbnode.BatchPosterOpts{ - DataPosterDB: nil, - L1Reader: builder.L2.ConsensusNode.L1Reader, - Inbox: builder.L2.ConsensusNode.InboxTracker, - Streamer: builder.L2.ConsensusNode.TxStreamer, - SyncMonitor: builder.L2.ConsensusNode.SyncMonitor, - Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, - DeployInfo: builder.L2.ConsensusNode.DeployInfo, - TransactOpts: &seqTxOpts, - DAWriter: nil, + DataPosterDB: nil, + L1Reader: builder.L2.ConsensusNode.L1Reader, + Inbox: builder.L2.ConsensusNode.InboxTracker, + Streamer: builder.L2.ConsensusNode.TxStreamer, + SyncMonitor: builder.L2.ConsensusNode.SyncMonitor, + Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, + DeployInfo: builder.L2.ConsensusNode.DeployInfo, + TransactOpts: &seqTxOpts, + DAWriter: nil, + ParentChainID: parentChainID, }, ) Require(t, err) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 6407164a04..e5a068a7f0 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -698,7 +698,7 @@ func createL2BlockChainWithStackConfig( chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) Require(t, err) - arbDb, err := stack.OpenDatabase("arbdb", 0, 0, "", false) + arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "", false) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(&l2info.ArbInitData) @@ -782,10 +782,9 @@ func createTestNodeOnL1WithConfigImpl( execConfigFetcher := func() *gethexec.Config { return execConfig } execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) Require(t, err) - currentNode, err = arbnode.CreateNode( ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, - addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, + addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), ) Require(t, err) @@ -821,7 +820,7 @@ func createTestNode( execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan) + currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337)) Require(t, err) // Give the node an init message @@ -903,7 +902,7 @@ func Create2ndNodeWithConfig( l2chainDb, err := l2stack.OpenDatabase("chaindb", 0, 0, "", false) Require(t, err) - l2arbDb, err := l2stack.OpenDatabase("arbdb", 0, 0, "", false) + l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "", false) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(l2InitData) @@ -925,7 +924,7 @@ func Create2ndNodeWithConfig( currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(13)) Require(t, err) err = currentNode.Start(ctx) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8c9621d57a..96de52e197 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -123,7 +123,7 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest() sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) sequencerTxOptsPtr := &sequencerTxOpts - + parentChainID := big.NewInt(1337) { authorizeDASKeyset(t, ctx, pubkeyA, l1info, l1client) @@ -141,8 +141,7 @@ func TestDASRekey(t *testing.T) { l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) - - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -179,7 +178,7 @@ func TestDASRekey(t *testing.T) { l2chainDb, err := l2stackA.OpenDatabase("chaindb", 0, 0, "", false) Require(t, err) - l2arbDb, err := l2stackA.OpenDatabase("arbdb", 0, 0, "", false) + l2arbDb, err := l2stackA.OpenDatabase("arbitrumdata", 0, 0, "", false) Require(t, err) l2blockchain, err := gethexec.GetBlockChain(l2chainDb, nil, chainConfig, gethexec.ConfigDefaultTest().TxLookupLimit) @@ -189,7 +188,7 @@ func TestDASRekey(t *testing.T) { Require(t, err) l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -322,7 +321,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) sequencerTxOptsPtr := &sequencerTxOpts - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337)) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) diff --git a/system_tests/espresso_test.go b/system_tests/espresso_test.go index 44bd8f1b0a..174a791341 100644 --- a/system_tests/espresso_test.go +++ b/system_tests/espresso_test.go @@ -11,6 +11,7 @@ import ( "time" espressoClient "github.com/EspressoSystems/espresso-sequencer-go/client" + tagged_base64 "github.com/EspressoSystems/espresso-sequencer-go/tagged-base64" espressoTypes "github.com/EspressoSystems/espresso-sequencer-go/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -80,7 +81,7 @@ func createMockHotShot(ctx context.Context, t *testing.T, l2Info *BlockchainTest httpmock.RegisterResponder( "GET", - `=~http://127.0.0.1:50000/status/latest_block_height`, + `=~http://127.0.0.1:50000/status/block-height`, func(r *http.Request) (*http.Response, error) { return httpmock.NewStringResponse(200, strconv.Itoa(startHotShotBlock)), nil }, @@ -96,13 +97,16 @@ func createMockHotShot(ctx context.Context, t *testing.T, l2Info *BlockchainTest if block < uint64(staleBlocks) { timestamp = 0 } + pc, _ := tagged_base64.New("header", []byte{byte(block)}) header := espressoTypes.Header{ // Since we don't realize the validation of espresso yet, // mock a simple nmt root here - Height: block, - TransactionsRoot: espressoTypes.NmtRoot{Root: []byte{}}, - L1Head: 0, // Currently not used - Timestamp: timestamp, + Height: block, + TransactionsRoot: espressoTypes.NmtRoot{Root: []byte{}}, + L1Head: 0, // Currently not used + Timestamp: timestamp, + PayloadCommitment: pc, + BlockMerkleTreeRoot: pc, } return httpmock.NewJsonResponse(200, header) }) diff --git a/system_tests/external_signer.go b/system_tests/external_signer.go deleted file mode 100644 index 1ee9c85581..0000000000 --- a/system_tests/external_signer.go +++ /dev/null @@ -1,188 +0,0 @@ -package arbtest - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "math/big" - "net/http" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/keystore" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/signer/core/apitypes" - "github.com/offchainlabs/nitro/arbnode/dataposter" -) - -var ( - signerPort = 1234 - signerURL = fmt.Sprintf("https://localhost:%v", signerPort) - signerMethod = "test_signTransaction" - signerServerCert = "../arbnode/dataposter/testdata/localhost.crt" - signerServerKey = "../arbnode/dataposter/testdata/localhost.key" - signerClientCert = "../arbnode/dataposter/testdata/client.crt" - signerClientPrivateKey = "../arbnode/dataposter/testdata/client.key" -) - -func externalSignerTestCfg(addr common.Address) *dataposter.ExternalSignerCfg { - return &dataposter.ExternalSignerCfg{ - Address: common.Bytes2Hex(addr.Bytes()), - URL: signerURL, - Method: signerMethod, - RootCA: signerServerCert, - ClientCert: signerClientCert, - ClientPrivateKey: signerClientPrivateKey, - } -} - -type server struct { - handlers map[string]func(*json.RawMessage) (string, error) - signerFn bind.SignerFn - address common.Address -} - -type request struct { - ID *json.RawMessage `json:"id"` - Method string `json:"method"` - Params *json.RawMessage `json:"params"` -} - -type response struct { - ID *json.RawMessage `json:"id"` - Result string `json:"result,omitempty"` -} - -// newServer returns http server and server struct that implements RPC methods. -// It sets up an account in temporary directory and cleans up after test is -// done. -func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { - t.Helper() - signer, address, err := setupAccount("/tmp/keystore") - if err != nil { - t.Fatalf("Error setting up account: %v", err) - } - t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) - - s := &server{signerFn: signer, address: address} - s.handlers = map[string]func(*json.RawMessage) (string, error){ - signerMethod: s.signTransaction, - } - m := http.NewServeMux() - - clientCert, err := os.ReadFile(signerClientCert) - if err != nil { - t.Fatalf("Error reading client certificate: %v", err) - } - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(clientCert) - - httpSrv := &http.Server{ - Addr: fmt.Sprintf(":%v", signerPort), - Handler: m, - ReadTimeout: 5 * time.Second, - TLSConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: pool, - }, - } - m.HandleFunc("/", s.mux) - return httpSrv, s -} - -// setupAccount creates a new account in a given directory, unlocks it, creates -// signer with that account and returns it along with account address. -func setupAccount(dir string) (bind.SignerFn, common.Address, error) { - ks := keystore.NewKeyStore( - dir, - keystore.StandardScryptN, - keystore.StandardScryptP, - ) - a, err := ks.NewAccount("password") - if err != nil { - return nil, common.Address{}, fmt.Errorf("creating account account: %w", err) - } - if err := ks.Unlock(a, "password"); err != nil { - return nil, common.Address{}, fmt.Errorf("unlocking account: %w", err) - } - txOpts, err := bind.NewKeyStoreTransactorWithChainID(ks, a, big.NewInt(1337)) - if err != nil { - return nil, common.Address{}, fmt.Errorf("creating transactor: %w", err) - } - return txOpts.Signer, a.Address, nil -} - -// UnmarshallFirst unmarshalls slice of params and returns the first one. -// Parameters in Go ethereum RPC calls are marashalled as slices. E.g. -// eth_sendRawTransaction or eth_signTransaction, marshall transaction as a -// slice of transactions in a message: -// https://github.com/ethereum/go-ethereum/blob/0004c6b229b787281760b14fb9460ffd9c2496f1/rpc/client.go#L548 -func unmarshallFirst(params []byte) (*types.Transaction, error) { - var arr []apitypes.SendTxArgs - if err := json.Unmarshal(params, &arr); err != nil { - return nil, fmt.Errorf("unmarshaling first param: %w", err) - } - if len(arr) != 1 { - return nil, fmt.Errorf("argument should be a single transaction, but got: %d", len(arr)) - } - return arr[0].ToTransaction(), nil -} - -func (s *server) signTransaction(params *json.RawMessage) (string, error) { - tx, err := unmarshallFirst(*params) - if err != nil { - return "", err - } - signedTx, err := s.signerFn(s.address, tx) - if err != nil { - return "", fmt.Errorf("signing transaction: %w", err) - } - data, err := rlp.EncodeToBytes(signedTx) - if err != nil { - return "", fmt.Errorf("rlp encoding transaction: %w", err) - } - return hexutil.Encode(data), nil -} - -func (s *server) mux(w http.ResponseWriter, r *http.Request) { - body, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, "can't read body", http.StatusBadRequest) - return - } - var req request - if err := json.Unmarshal(body, &req); err != nil { - http.Error(w, "can't unmarshal JSON request", http.StatusBadRequest) - return - } - method, ok := s.handlers[req.Method] - if !ok { - http.Error(w, "method not found", http.StatusNotFound) - return - } - result, err := method(req.Params) - if err != nil { - fmt.Printf("error calling method: %v\n", err) - http.Error(w, "error calling method", http.StatusInternalServerError) - return - } - resp := response{ID: req.ID, Result: result} - respBytes, err := json.Marshal(resp) - if err != nil { - http.Error(w, fmt.Sprintf("error encoding response: %v", err), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(respBytes); err != nil { - fmt.Printf("error writing response: %v\n", err) - } -} diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 9b2f8d9907..955ed4456f 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -278,7 +278,8 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) Require(t, err) - asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan) + parentChainID := big.NewInt(1337) + asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) Require(t, err) err = asserterL2.Start(ctx) Require(t, err) @@ -289,7 +290,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) Require(t, err) - challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan) + challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) Require(t, err) err = challengerL2.Start(ctx) Require(t, err) diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 40953a449d..3424a58e9e 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" @@ -73,3 +74,39 @@ func TestL2BlockRangeForL1(t *testing.T) { t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") } } + +func TestGetL1Confirmations(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) + Require(t, err) + + genesisBlock, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(0)) + Require(t, err) + l1Confs, err := nodeInterface.GetL1Confirmations(&bind.CallOpts{}, genesisBlock.Hash()) + Require(t, err) + + numTransactions := 200 + + if l1Confs >= uint64(numTransactions) { + t.Fatalf("L1Confirmations for latest block %v is already %v (over %v)", genesisBlock.Number(), l1Confs, numTransactions) + } + + for i := 0; i < numTransactions; i++ { + builder.L1.TransferBalance(t, "User", "User", common.Big0, builder.L1Info) + } + + l1Confs, err = nodeInterface.GetL1Confirmations(&bind.CallOpts{}, genesisBlock.Hash()) + Require(t, err) + + // Allow a gap of 10 for asynchronicity, just in case + if l1Confs+10 < uint64(numTransactions) { + t.Fatalf("L1Confirmations for latest block %v is only %v (did not hit expected %v)", genesisBlock.Number(), l1Confs, numTransactions) + } +} diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go new file mode 100644 index 0000000000..ef82c0466e --- /dev/null +++ b/system_tests/pruning_test.go @@ -0,0 +1,119 @@ +package arbtest + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/node" + "github.com/offchainlabs/nitro/cmd/conf" + "github.com/offchainlabs/nitro/cmd/pruning" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func countStateEntries(db ethdb.Iteratee) int { + entries := 0 + it := db.NewIterator(nil, nil) + for it.Next() { + isCode, _ := rawdb.IsCodeKey(it.Key()) + if len(it.Key()) == common.HashLength || isCode { + entries++ + } + } + it.Release() + return entries +} + +func TestPruning(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var dataDir string + + func() { + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + _ = builder.Build(t) + dataDir = builder.dataDir + l2cleanupDone := false + defer func() { + if !l2cleanupDone { + builder.L2.cleanup() + } + builder.L1.cleanup() + }() + builder.L2Info.GenerateAccount("User2") + var txs []*types.Transaction + for i := uint64(0); i < 200; i++ { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + } + for _, tx := range txs { + _, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + l2cleanupDone = true + builder.L2.cleanup() + t.Log("stopped l2 node") + + stack, err := node.New(builder.l2StackConfig) + Require(t, err) + defer stack.Close() + chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) + Require(t, err) + defer chainDb.Close() + chainDbEntriesBeforePruning := countStateEntries(chainDb) + + prand := testhelpers.NewPseudoRandomDataSource(t, 1) + var testKeys [][]byte + for i := 0; i < 100; i++ { + // generate test keys with length of hash to emulate legacy state trie nodes + testKeys = append(testKeys, prand.GetHash().Bytes()) + } + for _, key := range testKeys { + err = chainDb.Put(key, common.FromHex("0xdeadbeef")) + Require(t, err) + } + for _, key := range testKeys { + if has, _ := chainDb.Has(key); !has { + Fatal(t, "internal test error - failed to check existence of test key") + } + } + + initConfig := conf.InitConfigDefault + initConfig.Prune = "full" + coreCacheConfig := gethexec.DefaultCacheConfigFor(stack, &builder.execConfig.Caching) + err = pruning.PruneChainDb(ctx, chainDb, stack, &initConfig, coreCacheConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false) + Require(t, err) + + for _, key := range testKeys { + if has, _ := chainDb.Has(key); has { + Fatal(t, "test key hasn't been pruned as expected") + } + } + + chainDbEntriesAfterPruning := countStateEntries(chainDb) + t.Log("db entries pre-pruning:", chainDbEntriesBeforePruning) + t.Log("db entries post-pruning:", chainDbEntriesAfterPruning) + + if chainDbEntriesAfterPruning >= chainDbEntriesBeforePruning { + Fatal(t, "The db doesn't have less entries after pruning then before. Before:", chainDbEntriesBeforePruning, "After:", chainDbEntriesAfterPruning) + } + }() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.dataDir = dataDir + cancel = builder.Build(t) + defer cancel() + + builder.L2Info.GenerateAccount("User2") + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) +} diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 9429155d7c..f5bdca0970 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -334,7 +334,8 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig execNode, err := gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan) + parentChainID := big.NewInt(1337) + node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) @@ -375,7 +376,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan) + node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID) Require(t, err) Require(t, node.Start(ctx)) client = ClientForStack(t, stack) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 5db63afc3a..1b1bf55853 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/solgen/go/mocksgen" @@ -60,7 +61,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) t.Parallel() ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - httpSrv, srv := newServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(ctx, t) + cp, err := externalsignertest.CertPaths() + if err != nil { + t.Fatalf("Error getting cert paths: %v", err) + } t.Cleanup(func() { if err := httpSrv.Shutdown(ctx); err != nil { t.Fatalf("Error shutting down http server: %v", err) @@ -68,7 +73,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) }) go func() { log.Debug("Server is listening on port 1234...") - if err := httpSrv.ListenAndServeTLS(signerServerCert, signerServerKey); err != nil && err != http.ErrServerClosed { + if err := httpSrv.ListenAndServeTLS(cp.ServerCert, cp.ServerKey); err != nil && err != http.ErrServerClosed { log.Debug("ListenAndServeTLS() failed", "error", err) return } @@ -86,10 +91,10 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) cleanupA := builder.Build(t) defer cleanupA() - addNewBatchPoster(ctx, t, builder, srv.address) + addNewBatchPoster(ctx, t, builder, srv.Address) builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ - builder.L1Info.PrepareTxTo("Faucet", &srv.address, 30000, big.NewInt(1).Mul(big.NewInt(1e18), big.NewInt(1e18)), nil)}) + builder.L1Info.PrepareTxTo("Faucet", &srv.Address, 30000, big.NewInt(1).Mul(big.NewInt(1e18), big.NewInt(1e18)), nil)}) l2nodeA := builder.L2.ConsensusNode execNodeA := builder.L2.ExecNode @@ -152,7 +157,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI)) Require(t, err, "unable to parse rollup ABI") - setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.address}, []bool{true, true, true}) + setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.Address}, []bool{true, true, true}) Require(t, err, "unable to generate setValidator calldata") tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata) Require(t, err, "unable to set validators") @@ -170,8 +175,18 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) valConfig := staker.TestL1ValidatorConfig - - dpA, err := arbnode.StakerDataposter(ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + parentChainID, err := builder.L1.Client.ChainID(ctx) + if err != nil { + t.Fatalf("Failed to get parent chain id: %v", err) + } + dpA, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), + l2nodeA.L1Reader, + &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), + nil, + parentChainID, + ) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } @@ -220,8 +235,19 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err) cfg := arbnode.ConfigDefaultL1NonSequencerTest() - cfg.Staker.DataPoster.ExternalSigner = *externalSignerTestCfg(srv.address) - dpB, err := arbnode.StakerDataposter(ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(cfg), nil) + signerCfg, err := externalSignerTestCfg(srv.Address) + if err != nil { + t.Fatalf("Error getting external signer config: %v", err) + } + cfg.Staker.DataPoster.ExternalSigner = *signerCfg + dpB, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), + l2nodeB.L1Reader, + &l1authB, NewFetcherFromConfig(cfg), + nil, + parentChainID, + ) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } @@ -387,14 +413,14 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err, "EnsureTxSucceeded failed for staker", stakerName, "tx") } if faultyStaker { - conflictInfo, err := validatorUtils.FindStakerConflict(&bind.CallOpts{}, l2nodeA.DeployInfo.Rollup, l1authA.From, srv.address, big.NewInt(1024)) + conflictInfo, err := validatorUtils.FindStakerConflict(&bind.CallOpts{}, l2nodeA.DeployInfo.Rollup, l1authA.From, srv.Address, big.NewInt(1024)) Require(t, err) if staker.ConflictType(conflictInfo.Ty) == staker.CONFLICT_TYPE_FOUND { cancelBackgroundTxs() } } if faultyStaker && !sawStakerZombie { - sawStakerZombie, err = rollup.IsZombie(&bind.CallOpts{}, srv.address) + sawStakerZombie, err = rollup.IsZombie(&bind.CallOpts{}, srv.Address) Require(t, err) } isHonestZombie, err := rollup.IsZombie(&bind.CallOpts{}, valWalletAddrA) @@ -415,7 +441,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) } if !stakerBWasStaked { - stakerBWasStaked, err = rollup.IsStaked(&bind.CallOpts{}, srv.address) + stakerBWasStaked, err = rollup.IsStaked(&bind.CallOpts{}, srv.Address) Require(t, err) } for j := 0; j < 5; j++ { diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index a5100f74fa..c5b8fa23f1 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -53,6 +53,7 @@ type HeaderReader struct { type cachedHeader struct { mutex sync.Mutex + blockTag string // "safe" or "finalized" rpcBlockNum *big.Int headWhenCached *types.Header header *types.Header @@ -135,8 +136,8 @@ func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSys: arbSys, outChannels: make(map[chan<- *types.Header]struct{}), outChannelsBehind: make(map[chan<- *types.Header]struct{}), - safe: cachedHeader{rpcBlockNum: big.NewInt(rpc.SafeBlockNumber.Int64())}, - finalized: cachedHeader{rpcBlockNum: big.NewInt(rpc.FinalizedBlockNumber.Int64())}, + safe: cachedHeader{blockTag: "safe", rpcBlockNum: big.NewInt(rpc.SafeBlockNumber.Int64())}, + finalized: cachedHeader{blockTag: "finalized", rpcBlockNum: big.NewInt(rpc.FinalizedBlockNumber.Int64())}, }, nil } @@ -470,6 +471,11 @@ func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.H } header, err := s.client.HeaderByNumber(ctx, c.rpcBlockNum) if err != nil { + if !errors.Is(err, context.Canceled) { + log.Warn("Failed to get latest confirmed block", "blockTag", c.blockTag, "err", err) + // Hide error to caller to avoid exposing potentially sensitive L1 information. + err = fmt.Errorf("failed to get latest %v block", c.blockTag) + } return nil, err } c.header = header diff --git a/util/log.go b/util/log.go new file mode 100644 index 0000000000..dbeed8051d --- /dev/null +++ b/util/log.go @@ -0,0 +1,67 @@ +package util + +import ( + "strings" + "time" + + "github.com/ethereum/go-ethereum/log" +) + +// EphemeralErrorHandler handles errors that are ephemeral in nature i.h these are errors +// that we would like to log as a warning unless they repeat for more than a certain duration of time. +type EphemeralErrorHandler struct { + Duration time.Duration + ErrorString string + FirstOccurrence *time.Time + + IgnoreDuration time.Duration + IgnoredErrLogLevel func(string, ...interface{}) // Default IgnoredErrLogLevel is log.Debug +} + +func NewEphemeralErrorHandler(duration time.Duration, errorString string, ignoreDuration time.Duration) *EphemeralErrorHandler { + return &EphemeralErrorHandler{ + Duration: duration, + ErrorString: errorString, + FirstOccurrence: &time.Time{}, + IgnoreDuration: ignoreDuration, + IgnoredErrLogLevel: log.Debug, + } +} + +// LogLevel method defaults to returning the input currentLogLevel if the given error doesnt contain the errorSubstring, +// but if it does, then returns one of the corresponding loglevels as follows +// - IgnoredErrLogLevel - if the error has been repeating for less than the IgnoreDuration of time. Defaults to log.Debug +// - log.Warn - if the error has been repeating for less than the given duration of time +// - log.Error - Otherwise +// +// # Usage Examples +// +// ephemeralErrorHandler.Loglevel(err, log.Error)("msg") +// ephemeralErrorHandler.Loglevel(err, log.Error)("msg", "key1", val1, "key2", val2) +// ephemeralErrorHandler.Loglevel(err, log.Error)("msg", "key1", val1) +func (h *EphemeralErrorHandler) LogLevel(err error, currentLogLevel func(msg string, ctx ...interface{})) func(string, ...interface{}) { + if h.ErrorString != "" && !strings.Contains(err.Error(), h.ErrorString) { + h.Reset() + return currentLogLevel + } + + if *h.FirstOccurrence == (time.Time{}) { + *h.FirstOccurrence = time.Now() + } + + if h.IgnoreDuration != 0 && time.Since(*h.FirstOccurrence) < h.IgnoreDuration { + if h.IgnoredErrLogLevel != nil { + return h.IgnoredErrLogLevel + } + return log.Debug + } + + if time.Since(*h.FirstOccurrence) < h.Duration { + return log.Warn + } + return log.Error +} + +func (h *EphemeralErrorHandler) Reset() { + *h.FirstOccurrence = time.Time{} +} diff --git a/util/log_test.go b/util/log_test.go new file mode 100644 index 0000000000..f8007373f2 --- /dev/null +++ b/util/log_test.go @@ -0,0 +1,70 @@ +package util + +import ( + "errors" + "reflect" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" +) + +func compareFunctions(f1, f2 func(msg string, ctx ...interface{})) bool { + return reflect.ValueOf(f1).Pointer() == reflect.ValueOf(f2).Pointer() +} +func TestSimple(t *testing.T) { + allErrHandler := NewEphemeralErrorHandler(2500*time.Millisecond, "", time.Second) + err := errors.New("sample error") + logLevel := allErrHandler.LogLevel(err, log.Error) + if !compareFunctions(log.Debug, logLevel) { + t.Fatalf("incorrect loglevel output. Want: Debug") + } + + time.Sleep(1 * time.Second) + logLevel = allErrHandler.LogLevel(err, log.Error) + if !compareFunctions(log.Warn, logLevel) { + t.Fatalf("incorrect loglevel output. Want: Warn") + } + + time.Sleep(2 * time.Second) + logLevel = allErrHandler.LogLevel(err, log.Error) + if !compareFunctions(log.Error, logLevel) { + t.Fatalf("incorrect loglevel output. Want: Error") + } +} + +func TestComplex(t *testing.T) { + // Simulation: errorA happens continuously for 2 seconds and then errorB happens + errorAHandler := NewEphemeralErrorHandler(time.Second, "errorA", 0) + errorBHandler := NewEphemeralErrorHandler(1500*time.Millisecond, "errorB", 0) + + // Computes result of chaining two ephemeral error handlers for a given recurring error + chainingErrHandlers := func(err error) func(string, ...interface{}) { + logLevel := log.Error + logLevel = errorAHandler.LogLevel(err, logLevel) + logLevel = errorBHandler.LogLevel(err, logLevel) + return logLevel + } + + errA := errors.New("this is a sample errorA") + if !compareFunctions(log.Warn, chainingErrHandlers(errA)) { + t.Fatalf("incorrect loglevel output. Want: Warn") + } + time.Sleep(2 * time.Second) + if !compareFunctions(log.Error, chainingErrHandlers(errA)) { + t.Fatalf("incorrect loglevel output. Want: Error") + } + + errB := errors.New("this is a sample errorB") + if !compareFunctions(log.Warn, chainingErrHandlers(errB)) { + t.Fatalf("incorrect loglevel output. Want: Warn") + } + if !compareFunctions(log.Warn, chainingErrHandlers(errA)) { + t.Fatalf("incorrect loglevel output. Want: Warn") + } + + errC := errors.New("random error") + if !compareFunctions(log.Error, chainingErrHandlers(errC)) { + t.Fatalf("incorrect loglevel output. Want: Error") + } +} diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 357dfb2e93..6af141c668 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -89,11 +89,19 @@ func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) return prioritiesList, nil } -// GetLiveliness returns a map whose keys are sequencers that have their liveliness set to OK +// GetLiveliness returns a list of sequencers that have their liveliness set to OK func (rc *RedisCoordinator) GetLiveliness(ctx context.Context) ([]string, error) { - livelinessList, _, err := rc.Client.Scan(ctx, 0, WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() - if err != nil { - return []string{}, err + var livelinessList []string + cursor := uint64(0) + for { + keySlice, cursor, err := rc.Client.Scan(ctx, cursor, WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() + if err != nil { + return []string{}, err + } + livelinessList = append(livelinessList, keySlice...) + if cursor == 0 { + break + } } for i, elem := range livelinessList { url := strings.TrimPrefix(elem, WANTS_LOCKOUT_KEY_PREFIX) diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index a66ec72fa1..193774667c 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -16,17 +16,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" ) +var jitWasmMemoryUsage = metrics.NewRegisteredHistogram("jit/wasm/memoryusage", nil, metrics.NewBoundedHistogramSample()) + type JitMachine struct { - binary string - process *exec.Cmd - stdin io.WriteCloser + binary string + process *exec.Cmd + stdin io.WriteCloser + wasmMemoryUsageLimit int } -func createJitMachine(jitBinary string, binaryPath string, cranelift bool, moduleRoot common.Hash, fatalErrChan chan error) (*JitMachine, error) { +func createJitMachine(jitBinary string, binaryPath string, cranelift bool, wasmMemoryUsageLimit int, moduleRoot common.Hash, fatalErrChan chan error) (*JitMachine, error) { invocation := []string{"--binary", binaryPath, "--forks"} if cranelift { invocation = append(invocation, "--cranelift") @@ -45,9 +49,10 @@ func createJitMachine(jitBinary string, binaryPath string, cranelift bool, modul }() machine := &JitMachine{ - binary: binaryPath, - process: process, - stdin: stdin, + binary: binaryPath, + process: process, + stdin: stdin, + wasmMemoryUsageLimit: wasmMemoryUsageLimit, } return machine, nil } @@ -261,8 +266,18 @@ func (machine *JitMachine) prove( if state.BlockHash, err = readHash(); err != nil { return state, err } - state.SendRoot, err = readHash() - return state, err + if state.SendRoot, err = readHash(); err != nil { + return state, err + } + memoryUsed, err := readUint64() + if err != nil { + return state, fmt.Errorf("failed to read memory usage from Jit machine: %w", err) + } + if memoryUsed > uint64(machine.wasmMemoryUsageLimit) { + log.Warn("memory used by jit wasm exceeds the wasm memory usage limit", "limit", machine.wasmMemoryUsageLimit, "memoryUsed", memoryUsed) + } + jitWasmMemoryUsage.Update(int64(memoryUsed)) + return state, nil default: message := "inter-process communication failure" log.Error("Jit Machine Failure", "message", message) diff --git a/validator/server_jit/machine_loader.go b/validator/server_jit/machine_loader.go index 5705a9a387..3a831928b7 100644 --- a/validator/server_jit/machine_loader.go +++ b/validator/server_jit/machine_loader.go @@ -13,13 +13,15 @@ import ( ) type JitMachineConfig struct { - ProverBinPath string - JitCranelift bool + ProverBinPath string + JitCranelift bool + WasmMemoryUsageLimit int } var DefaultJitMachineConfig = JitMachineConfig{ - JitCranelift: true, - ProverBinPath: "replay.wasm", + JitCranelift: true, + ProverBinPath: "replay.wasm", + WasmMemoryUsageLimit: 4294967296, } func getJitPath() (string, error) { @@ -57,7 +59,7 @@ func NewJitMachineLoader(config *JitMachineConfig, locator *server_common.Machin } createMachineThreadFunc := func(ctx context.Context, moduleRoot common.Hash) (*JitMachine, error) { binPath := filepath.Join(locator.GetMachinePath(moduleRoot), config.ProverBinPath) - return createJitMachine(jitPath, binPath, config.JitCranelift, moduleRoot, fatalErrChan) + return createJitMachine(jitPath, binPath, config.JitCranelift, config.WasmMemoryUsageLimit, moduleRoot, fatalErrChan) } return &JitMachineLoader{ MachineLoader: *server_common.NewMachineLoader[JitMachine](locator, createMachineThreadFunc), diff --git a/validator/server_jit/spawner.go b/validator/server_jit/spawner.go index ff1749506a..6489821b5b 100644 --- a/validator/server_jit/spawner.go +++ b/validator/server_jit/spawner.go @@ -18,18 +18,23 @@ import ( type JitSpawnerConfig struct { Workers int `koanf:"workers" reload:"hot"` Cranelift bool `koanf:"cranelift"` + + // TODO: change WasmMemoryUsageLimit to a string and use resourcemanager.ParseMemLimit + WasmMemoryUsageLimit int `koanf:"wasm-memory-usage-limit"` } type JitSpawnerConfigFecher func() *JitSpawnerConfig var DefaultJitSpawnerConfig = JitSpawnerConfig{ - Workers: 0, - Cranelift: true, + Workers: 0, + Cranelift: true, + WasmMemoryUsageLimit: 4294967296, // 2^32 WASM memeory limit } func JitSpawnerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".workers", DefaultJitSpawnerConfig.Workers, "number of concurrent validation threads") f.Bool(prefix+".cranelift", DefaultJitSpawnerConfig.Cranelift, "use Cranelift instead of LLVM when validating blocks using the jit-accelerated block validator") + f.Int(prefix+".wasm-memory-usage-limit", DefaultJitSpawnerConfig.WasmMemoryUsageLimit, "if memory used by a jit wasm exceeds this limit, a warning is logged") } type JitSpawner struct { @@ -44,6 +49,7 @@ func NewJitSpawner(locator *server_common.MachineLocator, config JitSpawnerConfi // TODO - preload machines machineConfig := DefaultJitMachineConfig machineConfig.JitCranelift = config().Cranelift + machineConfig.WasmMemoryUsageLimit = config().WasmMemoryUsageLimit loader, err := NewJitMachineLoader(&machineConfig, locator, fatalErrChan) if err != nil { return nil, err diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 49cd2af7e6..6f5bf54e4d 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -119,6 +119,7 @@ func (cc *ClientConnection) Remove() { func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) error { var prevSegment backlog.BacklogSegment + isFirstSegment := true for !backlog.IsBacklogSegmentNil(segment) { // must get the next segment before the messages to be sent are // retrieved ensures another segment is not added in between calls. @@ -132,10 +133,17 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba } msgs := prevSegment.Messages() - if prevSegment.Contains(uint64(cc.requestedSeqNum)) { + if isFirstSegment && prevSegment.Contains(uint64(cc.requestedSeqNum)) { requestedIdx := int(cc.requestedSeqNum) - int(prevSegment.Start()) - msgs = msgs[requestedIdx:] + // This might be false if messages were added after we fetched the segment's messages + if len(msgs) >= requestedIdx { + msgs = msgs[requestedIdx:] + } + } + if len(msgs) == 0 { + break } + isFirstSegment = false bm := &m.BroadcastMessage{ Version: m.V1, Messages: msgs,