From e9259b783f84229b0101191d1714a93c1bbe2bb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96mer=20Faruk=20Irmak?= Date: Wed, 4 Sep 2024 13:33:51 +0300 Subject: [PATCH] feat: implement and integrate counter-based CCC (#982) Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com> Co-authored-by: Zhang Zhuo Co-authored-by: Xi Lin --- accounts/abi/bind/backends/simulated.go | 1 + cmd/utils/flags.go | 5 + core/evm.go | 8 +- core/rawdb/accessors_row_consumption.go | 5 + core/state_processor.go | 6 + core/state_transition.go | 1 + core/types/transaction.go | 27 +- core/vm/evm.go | 8 +- eth/backend.go | 2 +- miner/miner.go | 1 + miner/miner_test.go | 1 + miner/scroll_worker.go | 835 ++++++++++++++--------- miner/scroll_worker_test.go | 212 ++++-- params/version.go | 2 +- rollup/ccc/async_checker.go | 46 +- rollup/ccc/async_checker_test.go | 4 +- rollup/ccc/libzkp/Cargo.lock | 29 +- rollup/ccc/libzkp/Cargo.toml | 2 +- rollup/ccc/logger.go | 867 ++++++++++++++++++++++++ 19 files changed, 1645 insertions(+), 417 deletions(-) create mode 100644 rollup/ccc/logger.go diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 949210ae0b3d..3a105e2e5669 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -821,6 +821,7 @@ func (m callMsg) Value() *big.Int { return m.CallMsg.Value } func (m callMsg) Data() []byte { return m.CallMsg.Data } func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } func (m callMsg) IsL1MessageTx() bool { return false } +func (m callMsg) TxSize() common.StorageSize { return 0 } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4e1b2f86b9e5..3e4ad289a906 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1551,6 +1551,11 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) { log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!") } + + cfg.CCCMaxWorkers = runtime.GOMAXPROCS(0) + if ctx.GlobalIsSet(CircuitCapacityCheckWorkersFlag.Name) { + cfg.CCCMaxWorkers = int(ctx.GlobalUint(CircuitCapacityCheckWorkersFlag.Name)) + } } func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/core/evm.go b/core/evm.go index f9f33086c3a9..edbb08b9c24b 100644 --- a/core/evm.go +++ b/core/evm.go @@ -70,9 +70,11 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, chainConfig *p // NewEVMTxContext creates a new transaction context for a single transaction. func NewEVMTxContext(msg Message) vm.TxContext { return vm.TxContext{ - Origin: msg.From(), - To: msg.To(), - GasPrice: new(big.Int).Set(msg.GasPrice()), + Origin: msg.From(), + To: msg.To(), + GasPrice: new(big.Int).Set(msg.GasPrice()), + IsL1MessageTx: msg.IsL1MessageTx(), + TxSize: msg.TxSize(), } } diff --git a/core/rawdb/accessors_row_consumption.go b/core/rawdb/accessors_row_consumption.go index a737304ded37..35800a132e26 100644 --- a/core/rawdb/accessors_row_consumption.go +++ b/core/rawdb/accessors_row_consumption.go @@ -49,3 +49,8 @@ func ReadBlockRowConsumptionRLP(db ethdb.Reader, l2BlockHash common.Hash) rlp.Ra } return data } + +// DeleteBlockRowConsumption deletes a RowConsumption of the block from the database +func DeleteBlockRowConsumption(db ethdb.KeyValueWriter, l2BlockHash common.Hash) error { + return db.Delete(rowConsumptionKey(l2BlockHash)) +} diff --git a/core/state_processor.go b/core/state_processor.go index 8b4671d76768..58acf8747642 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,6 +17,7 @@ package core import ( + "errors" "fmt" "math/big" "time" @@ -132,6 +133,11 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon // Apply the transaction to the current state (included in the env). applyMessageStartTime := time.Now() result, err := ApplyMessage(evm, msg, gp, l1DataFee) + if evm.Config.Debug { + if erroringTracer, ok := evm.Config.Tracer.(interface{ Error() error }); ok { + err = errors.Join(err, erroringTracer.Error()) + } + } applyMessageTimer.Update(time.Since(applyMessageStartTime)) if err != nil { return nil, err diff --git a/core/state_transition.go b/core/state_transition.go index b498ca44c5ea..f86baee28672 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -90,6 +90,7 @@ type Message interface { Data() []byte AccessList() types.AccessList IsL1MessageTx() bool + TxSize() common.StorageSize } // ExecutionResult includes all output after executing given evm diff --git a/core/types/transaction.go b/core/types/transaction.go index ac19ef0f6a30..f7fbcd31c28e 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -752,6 +752,7 @@ type Message struct { accessList AccessList isFake bool isL1MessageTx bool + txSize common.StorageSize } func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice, gasFeeCap, gasTipCap *big.Int, data []byte, accessList AccessList, isFake bool) Message { @@ -785,6 +786,7 @@ func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { accessList: tx.AccessList(), isFake: false, isL1MessageTx: tx.IsL1MessageTx(), + txSize: tx.Size(), } // If baseFee provided, set gasPrice to effectiveGasPrice. if baseFee != nil { @@ -795,18 +797,19 @@ func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { return msg, err } -func (m Message) From() common.Address { return m.from } -func (m Message) To() *common.Address { return m.to } -func (m Message) GasPrice() *big.Int { return m.gasPrice } -func (m Message) GasFeeCap() *big.Int { return m.gasFeeCap } -func (m Message) GasTipCap() *big.Int { return m.gasTipCap } -func (m Message) Value() *big.Int { return m.amount } -func (m Message) Gas() uint64 { return m.gasLimit } -func (m Message) Nonce() uint64 { return m.nonce } -func (m Message) Data() []byte { return m.data } -func (m Message) AccessList() AccessList { return m.accessList } -func (m Message) IsFake() bool { return m.isFake } -func (m Message) IsL1MessageTx() bool { return m.isL1MessageTx } +func (m Message) From() common.Address { return m.from } +func (m Message) To() *common.Address { return m.to } +func (m Message) GasPrice() *big.Int { return m.gasPrice } +func (m Message) GasFeeCap() *big.Int { return m.gasFeeCap } +func (m Message) GasTipCap() *big.Int { return m.gasTipCap } +func (m Message) Value() *big.Int { return m.amount } +func (m Message) Gas() uint64 { return m.gasLimit } +func (m Message) Nonce() uint64 { return m.nonce } +func (m Message) Data() []byte { return m.data } +func (m Message) AccessList() AccessList { return m.accessList } +func (m Message) IsFake() bool { return m.isFake } +func (m Message) IsL1MessageTx() bool { return m.isL1MessageTx } +func (m Message) TxSize() common.StorageSize { return m.txSize } // copyAddressPtr copies an address. func copyAddressPtr(a *common.Address) *common.Address { diff --git a/core/vm/evm.go b/core/vm/evm.go index 07872c4a950e..b20e0a876fac 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -87,9 +87,11 @@ type BlockContext struct { // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - To *common.Address // Provides information for trace - GasPrice *big.Int // Provides information for GASPRICE + Origin common.Address // Provides information for ORIGIN + To *common.Address // Provides information for trace + IsL1MessageTx bool // Provides information for trace + TxSize common.StorageSize // Provides information for trace + GasPrice *big.Int // Provides information for GASPRICE } // EVM is the Ethereum Virtual Machine base object and provides diff --git a/eth/backend.go b/eth/backend.go index 8f1109d43e76..7651c72f29c6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -200,7 +200,7 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthCl return nil, err } if config.CheckCircuitCapacity { - eth.asyncChecker = ccc.NewAsyncChecker(eth.blockchain, config.CCCMaxWorkers, true) + eth.asyncChecker = ccc.NewAsyncChecker(eth.blockchain, config.CCCMaxWorkers, false) eth.asyncChecker.WithOnFailingBlock(func(b *types.Block, err error) { log.Warn("block failed CCC check, it will be reorged by the sequencer", "hash", b.Hash(), "err", err) }) diff --git a/miner/miner.go b/miner/miner.go index 4c79ef4d182e..f0920ade1376 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -59,6 +59,7 @@ type Config struct { StoreSkippedTxTraces bool // Whether store the wrapped traces when storing a skipped tx MaxAccountsNum int // Maximum number of accounts that miner will fetch the pending transactions of when building a new block + CCCMaxWorkers int // Maximum number of workers to use for async CCC tasks } // Miner creates blocks and searches for proof-of-work values. diff --git a/miner/miner_test.go b/miner/miner_test.go index 820961711160..d84c9aea703e 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -251,6 +251,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) { config := Config{ Etherbase: common.HexToAddress("123456789"), MaxAccountsNum: math.MaxInt, + CCCMaxWorkers: 2, } // Create chainConfig memdb := memorydb.New() diff --git a/miner/scroll_worker.go b/miner/scroll_worker.go index bc91a143899c..c8ccbe059f49 100644 --- a/miner/scroll_worker.go +++ b/miner/scroll_worker.go @@ -17,8 +17,8 @@ package miner import ( - "bytes" "errors" + "fmt" "math" "math/big" "sync" @@ -32,13 +32,13 @@ import ( "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/core/vm" "github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/metrics" "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/ccc" "github.com/scroll-tech/go-ethereum/rollup/fees" - "github.com/scroll-tech/go-ethereum/rollup/pipeline" "github.com/scroll-tech/go-ethereum/trie" ) @@ -52,18 +52,19 @@ const ( ) var ( + deadCh = make(chan time.Time) + + ErrUnexpectedL1MessageIndex = errors.New("unexpected L1 message index") + // Metrics for the skipped txs - l1TxGasLimitExceededCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/gas_limit_exceeded", nil) - l1TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/row_consumption_overflow", nil) - l2TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/row_consumption_overflow", nil) - l1TxCccUnknownErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/ccc_unknown_err", nil) - l2TxCccUnknownErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/ccc_unknown_err", nil) - l1TxStrangeErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/strange_err", nil) + l1SkippedCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1", nil) + l2SkippedCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2", nil) collectL1MsgsTimer = metrics.NewRegisteredTimer("miner/collect_l1_msgs", nil) prepareTimer = metrics.NewRegisteredTimer("miner/prepare", nil) collectL2Timer = metrics.NewRegisteredTimer("miner/collect_l2_txns", nil) l2CommitTimer = metrics.NewRegisteredTimer("miner/commit", nil) + cccStallTimer = metrics.NewRegisteredTimer("miner/ccc_stall", nil) commitReasonCCCCounter = metrics.NewRegisteredCounter("miner/commit_reason_ccc", nil) commitReasonDeadlineCounter = metrics.NewRegisteredCounter("miner/commit_reason_deadline", nil) @@ -77,6 +78,39 @@ type prioritizedTransaction struct { tx *types.Transaction } +// work represents the active block building task +type work struct { + deadlineTimer *time.Timer + deadlineReached bool + cccLogger *ccc.Logger + vmConfig vm.Config + + reorgReason error + + // accumulated state + nextL1MsgIndex uint64 + gasPool *core.GasPool + blockSize common.StorageSize + + header *types.Header + state *state.StateDB + txs types.Transactions + receipts types.Receipts + coalescedLogs []*types.Log +} + +func (w *work) deadlineCh() <-chan time.Time { + if w == nil { + return deadCh + } + return w.deadlineTimer.C +} + +type reorgTrigger struct { + block *types.Block + reason error +} + // worker is the main object which takes care of submitting new work to consensus engine // and gathering the sealing result. type worker struct { @@ -99,11 +133,10 @@ type worker struct { // Channels startCh chan struct{} exitCh chan struct{} + reorgCh chan reorgTrigger - wg sync.WaitGroup - - currentPipelineStart time.Time - currentPipeline *pipeline.Pipeline + wg sync.WaitGroup + current *work mu sync.RWMutex // The lock used to protect the coinbase and extra fields coinbase common.Address @@ -128,29 +161,33 @@ type worker struct { // External functions isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. - circuitCapacityChecker *ccc.Checker - prioritizedTx *prioritizedTransaction + prioritizedTx *prioritizedTransaction + + asyncChecker *ccc.AsyncChecker // Test hooks beforeTxHook func() // Method to call before processing a transaction. + + errCountdown int + skipTxHash common.Hash } func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { worker := &worker{ - config: config, - chainConfig: chainConfig, - engine: engine, - eth: eth, - mux: mux, - chain: eth.BlockChain(), - isLocalBlock: isLocalBlock, - txsCh: make(chan core.NewTxsEvent, txChanSize), - chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), - exitCh: make(chan struct{}), - startCh: make(chan struct{}, 1), - circuitCapacityChecker: ccc.NewChecker(true), - } - log.Info("created new worker", "CircuitCapacityChecker ID", worker.circuitCapacityChecker.ID) + config: config, + chainConfig: chainConfig, + engine: engine, + eth: eth, + mux: mux, + chain: eth.BlockChain(), + isLocalBlock: isLocalBlock, + txsCh: make(chan core.NewTxsEvent, txChanSize), + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), + exitCh: make(chan struct{}), + startCh: make(chan struct{}, 1), + reorgCh: make(chan reorgTrigger, 1), + } + worker.asyncChecker = ccc.NewAsyncChecker(worker.chain, config.CCCMaxWorkers, false).WithOnFailingBlock(worker.onBlockFailingCCC) // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) @@ -174,12 +211,6 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus return worker } -// getCCC returns a pointer to this worker's CCC instance. -// Only used in tests. -func (w *worker) getCCC() *ccc.Checker { - return w.circuitCapacityChecker -} - // setEtherbase sets the etherbase used to initialize the block coinbase field. func (w *worker) setEtherbase(addr common.Address) { w.mu.Lock() @@ -261,44 +292,93 @@ func (w *worker) close() { w.wg.Wait() } +// checkHeadRowConsumption will start some initial workers to CCC check block close to the HEAD +func (w *worker) checkHeadRowConsumption() error { + checkStart := uint64(1) + numOfBlocksToCheck := uint64(w.config.CCCMaxWorkers + 1) + currentHeight := w.chain.CurrentHeader().Number.Uint64() + if currentHeight > numOfBlocksToCheck { + checkStart = currentHeight - numOfBlocksToCheck + } + + for curBlockNum := checkStart; curBlockNum <= currentHeight; curBlockNum++ { + block := w.chain.GetBlockByNumber(curBlockNum) + // only spawn CCC checkers for blocks with no row consumption data stored in DB + if rawdb.ReadBlockRowConsumption(w.chain.Database(), block.Hash()) == nil { + if err := w.asyncChecker.Check(block); err != nil { + return err + } + } + } + + return nil +} + // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. func (w *worker) mainLoop() { defer w.wg.Done() + defer w.asyncChecker.Wait() defer w.txsSub.Unsubscribe() defer w.chainHeadSub.Unsubscribe() - - deadCh := make(chan *pipeline.Result) - pipelineResultCh := func() <-chan *pipeline.Result { - if w.currentPipeline == nil { - return deadCh + defer func() { + // training wheels on + // lets not crash the node and allow us some time to inspect + p := recover() + if p != nil { + log.Error("worker mainLoop panic", "panic", p) } - return w.currentPipeline.ResultCh - } + }() + var err error for { + if _, isRetryable := err.(retryableCommitError); isRetryable { + if _, err = w.tryCommitNewWork(time.Now(), w.current.header.ParentHash, w.current.reorgReason); err != nil { + continue + } + } else if err != nil { + log.Error("failed to mine block", "err", err) + w.current = nil + } + + // check for reorgs first to lower the chances of trying to handle another + // event eventhough a reorg is pending (due to Go `select` pseudo-randomly picking a case + // to execute if multiple of them are ready) + select { + case trigger := <-w.reorgCh: + err = w.handleReorg(&trigger) + continue + default: + } + select { case <-w.startCh: - w.startNewPipeline(time.Now().Unix()) - case <-w.chainHeadCh: - w.startNewPipeline(time.Now().Unix()) - case result := <-pipelineResultCh(): - w.handlePipelineResult(result) + if err := w.checkHeadRowConsumption(); err != nil { + log.Error("failed to start head checkers", "err", err) + return + } + + _, err = w.tryCommitNewWork(time.Now(), w.chain.CurrentHeader().Hash(), nil) + case trigger := <-w.reorgCh: + err = w.handleReorg(&trigger) + case chainHead := <-w.chainHeadCh: + if w.isCanonical(chainHead.Block.Header()) { + _, err = w.tryCommitNewWork(time.Now(), chainHead.Block.Hash(), nil) + } + case <-w.current.deadlineCh(): + w.current.deadlineReached = true + if len(w.current.txs) > 0 { + _, err = w.commit(false) + } case ev := <-w.txsCh: // Apply transactions to the pending state // // Note all transactions received may not be continuous with transactions // already included in the current mining block. These transactions will // be automatically eliminated. - if w.currentPipeline != nil { - txs := make(map[common.Address]types.Transactions) - signer := types.MakeSigner(w.chainConfig, w.currentPipeline.Header.Number) - for _, tx := range ev.Txs { - acc, _ := types.Sender(signer, tx) - txs[acc] = append(txs[acc], tx) - } - txset := types.NewTransactionsByPriceAndNonce(signer, txs, w.currentPipeline.Header.BaseFee) - if result := w.currentPipeline.TryPushTxns(txset, w.onTxFailingInPipeline); result != nil { - w.handlePipelineResult(result) + if w.current != nil { + shouldCommit, _ := w.processTxnSlice(ev.Txs) + if shouldCommit || w.current.deadlineReached { + _, err = w.commit(false) } } atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) @@ -316,19 +396,19 @@ func (w *worker) mainLoop() { // updateSnapshot updates pending snapshot block and state. // Note this function assumes the current variable is thread safe. -func (w *worker) updateSnapshot(current *pipeline.BlockCandidate) { +func (w *worker) updateSnapshot() { w.snapshotMu.Lock() defer w.snapshotMu.Unlock() w.snapshotBlock = types.NewBlock( - current.Header, - current.Txs, + w.current.header, + w.current.txs, nil, - current.Receipts, + w.current.receipts, trie.NewStackTrie(nil), ) - w.snapshotReceipts = copyReceipts(current.Receipts) - w.snapshotState = current.State.Copy() + w.snapshotReceipts = copyReceipts(w.current.receipts) + w.snapshotState = w.current.state.Copy() } func (w *worker) collectPendingL1Messages(startIndex uint64) []types.L1MessageTx { @@ -336,113 +416,132 @@ func (w *worker) collectPendingL1Messages(startIndex uint64) []types.L1MessageTx return rawdb.ReadL1MessagesFrom(w.eth.ChainDb(), startIndex, maxCount) } -// startNewPipeline generates several new sealing tasks based on the parent block. -func (w *worker) startNewPipeline(timestamp int64) { - - if w.currentPipeline != nil { - w.currentPipeline.Release() - w.currentPipeline = nil - } - - parent := w.chain.CurrentBlock() - - num := parent.Number() +// newWork +func (w *worker) newWork(now time.Time, parentHash common.Hash, reorgReason error) error { + parent := w.chain.GetBlockByHash(parentHash) header := &types.Header{ ParentHash: parent.Hash(), - Number: num.Add(num, common.Big1), + Number: new(big.Int).Add(parent.Number(), common.Big1), GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil), Extra: w.extra, - Time: uint64(timestamp), + Time: uint64(now.Unix()), } + + parentState, err := w.chain.StateAt(parent.Root()) + if err != nil { + return fmt.Errorf("failed to fetch parent state: %w", err) + } + // Set baseFee if we are on an EIP-1559 chain if w.chainConfig.IsCurie(header.Number) { - state, err := w.chain.StateAt(parent.Root()) - if err != nil { - log.Error("Failed to create mining context", "err", err) - return - } - parentL1BaseFee := fees.GetL1BaseFee(state) + parentL1BaseFee := fees.GetL1BaseFee(parentState) header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header(), parentL1BaseFee) } // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) if w.isRunning() { if w.coinbase == (common.Address{}) { - log.Error("Refusing to mine without etherbase") - return + return errors.New("refusing to mine without etherbase") } header.Coinbase = w.coinbase } prepareStart := time.Now() if err := w.engine.Prepare(w.chain, header); err != nil { - log.Error("Failed to prepare header for mining", "err", err) - return + return fmt.Errorf("failed to prepare header for mining: %w", err) } prepareTimer.UpdateSince(prepareStart) - // If we are care about TheDAO hard-fork check whether to override the extra-data or not - if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil { - // Check whether the block is among the fork extra-override range - limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) - if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { - // Depending whether we support or oppose the fork, override differently - if w.chainConfig.DAOForkSupport { - header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data - } - } + var nextL1MsgIndex uint64 + if dbVal := rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), header.ParentHash); dbVal != nil { + nextL1MsgIndex = *dbVal } - parentState, err := w.chain.StateAt(parent.Root()) + vmConfig := *w.chain.GetVMConfig() + cccLogger := ccc.NewLogger() + vmConfig.Debug = true + vmConfig.Tracer = cccLogger + + deadline := time.Unix(int64(header.Time), 0) + if w.chainConfig.Clique != nil && w.chainConfig.Clique.RelaxedPeriod { + // clique with relaxed period uses time.Now() as the header.Time, calculate the deadline + deadline = time.Unix(int64(header.Time+w.chainConfig.Clique.Period), 0) + } + + w.current = &work{ + deadlineTimer: time.NewTimer(time.Until(deadline)), + cccLogger: cccLogger, + vmConfig: vmConfig, + header: header, + state: parentState, + txs: types.Transactions{}, + receipts: types.Receipts{}, + coalescedLogs: []*types.Log{}, + gasPool: new(core.GasPool).AddGas(header.GasLimit), + nextL1MsgIndex: nextL1MsgIndex, + reorgReason: reorgReason, + } + return nil +} + +// tryCommitNewWork +func (w *worker) tryCommitNewWork(now time.Time, parent common.Hash, reorgReason error) (common.Hash, error) { + err := w.newWork(now, parent, reorgReason) if err != nil { - log.Error("failed to fetch parent state", "err", err) - return + return common.Hash{}, fmt.Errorf("failed creating new work: %w", err) } - // Apply special state transition at Curie block - if w.chainConfig.CurieBlock != nil && w.chainConfig.CurieBlock.Cmp(header.Number) == 0 { - misc.ApplyCurieHardFork(parentState) + shouldCommit, err := w.handleForks() + if err != nil { + return common.Hash{}, fmt.Errorf("failed handling forks: %w", err) + } - var nextL1MsgIndex uint64 - if dbVal := rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), header.ParentHash); dbVal != nil { - nextL1MsgIndex = *dbVal - } + // check if we are reorging + reorging := w.chain.GetBlockByNumber(w.current.header.Number.Uint64()) != nil + if !shouldCommit && reorging { + shouldCommit, err = w.processReorgedTxns(w.current.reorgReason) + } + if err != nil { + return common.Hash{}, fmt.Errorf("failed handling reorged txns: %w", err) + } - // zkEVM requirement: Curie transition block contains 0 transactions, bypass pipeline. - err = w.commit(&pipeline.Result{ - // Note: Signer nodes will not store CCC results for empty blocks in their database. - // In practice, this is acceptable, since this block will never overflow, and follower - // nodes will still store CCC results. - Rows: &types.RowConsumption{}, - FinalBlock: &pipeline.BlockCandidate{ - Header: header, - State: parentState, - Txs: types.Transactions{}, - Receipts: types.Receipts{}, - CoalescedLogs: []*types.Log{}, - NextL1MsgIndex: nextL1MsgIndex, - }, - }) + if !shouldCommit { + shouldCommit, err = w.processTxPool() + } + if err != nil { + return common.Hash{}, fmt.Errorf("failed processing tx pool: %w", err) + } - if err != nil { - log.Error("failed to commit Curie fork block", "reason", err) + if shouldCommit { + // if reorging, force committing even if we are not "running" + // this can happen when sequencer is instructed to shutdown while handling a reorg + // we should make sure reorg is not interrupted + if blockHash, err := w.commit(reorging); err != nil { + return common.Hash{}, fmt.Errorf("failed committing new work: %w", err) + } else { + return blockHash, nil } - - return } + return common.Hash{}, nil +} - // fetch l1Txs - var l1Messages []types.L1MessageTx - if w.chainConfig.Scroll.ShouldIncludeL1Messages() { - common.WithTimer(collectL1MsgsTimer, func() { - l1Messages = w.collectPendingL1Messages(*rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), parent.Hash())) - }) +// handleForks +func (w *worker) handleForks() (bool, error) { + if w.chainConfig.CurieBlock != nil && w.chainConfig.CurieBlock.Cmp(w.current.header.Number) == 0 { + misc.ApplyCurieHardFork(w.current.state) + return true, nil } + return false, nil +} +// processTxPool +func (w *worker) processTxPool() (bool, error) { tidyPendingStart := time.Now() // Fill the block with all available pending transactions. pending := w.eth.TxPool().PendingWithMax(false, w.config.MaxAccountsNum) + + // Allow txpool to be reorged as we build current block + w.eth.TxPool().ResumeReorgs() + // Split the pending transactions into locals and remotes localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending for _, account := range w.eth.TxPool().Locals() { @@ -453,165 +552,199 @@ func (w *worker) startNewPipeline(timestamp int64) { } collectL2Timer.UpdateSince(tidyPendingStart) - // Allow txpool to be reorged as we build current block - w.eth.TxPool().ResumeReorgs() - - var nextL1MsgIndex uint64 - if dbIndex := rawdb.ReadFirstQueueIndexNotInL2Block(w.chain.Database(), parent.Hash()); dbIndex != nil { - nextL1MsgIndex = *dbIndex - } else { - log.Error("failed to read nextL1MsgIndex", "parent", parent.Hash()) - return - } - - w.currentPipelineStart = time.Now() - pipelineCCC := w.getCCC() - if !w.isRunning() { - pipelineCCC = nil - } - w.currentPipeline = pipeline.NewPipeline(w.chain, *w.chain.GetVMConfig(), parentState, header, nextL1MsgIndex, pipelineCCC).WithBeforeTxHook(w.beforeTxHook) - - deadline := time.Unix(int64(header.Time), 0) - if w.chainConfig.Clique != nil && w.chainConfig.Clique.RelaxedPeriod { - // clique with relaxed period uses time.Now() as the header.Time, calculate the deadline - deadline = time.Unix(int64(header.Time+w.chainConfig.Clique.Period), 0) - } - - if err := w.currentPipeline.Start(deadline); err != nil { - log.Error("failed to start pipeline", "err", err) - return + // fetch l1Txs + var l1Messages []types.L1MessageTx + if w.chainConfig.Scroll.ShouldIncludeL1Messages() { + common.WithTimer(collectL1MsgsTimer, func() { + l1Messages = w.collectPendingL1Messages(w.current.nextL1MsgIndex) + }) } // Short circuit if there is no available pending transactions. // But if we disable empty precommit already, ignore it. Since // empty block is necessary to keep the liveness of the network. if len(localTxs) == 0 && len(remoteTxs) == 0 && len(l1Messages) == 0 && atomic.LoadUint32(&w.noempty) == 0 { - return + return false, nil } if w.chainConfig.Scroll.ShouldIncludeL1Messages() && len(l1Messages) > 0 { log.Trace("Processing L1 messages for inclusion", "count", len(l1Messages)) txs, err := types.NewL1MessagesByQueueIndex(l1Messages) if err != nil { - log.Error("Failed to create L1 message set", "l1Messages", l1Messages, "err", err) - return + return false, fmt.Errorf("failed to create L1 message set: %w", err) } - if result := w.currentPipeline.TryPushTxns(txs, w.onTxFailingInPipeline); result != nil { - w.handlePipelineResult(result) - return + if shouldCommit, err := w.processTxns(txs); err != nil { + return false, fmt.Errorf("failed to include l1 msgs: %w", err) + } else if shouldCommit { + return true, nil } } - signer := types.MakeSigner(w.chainConfig, header.Number) - if w.prioritizedTx != nil && w.currentPipeline.Header.Number.Uint64() > w.prioritizedTx.blockNumber { + signer := types.MakeSigner(w.chainConfig, w.current.header.Number) + if w.prioritizedTx != nil && w.current.header.Number.Uint64() > w.prioritizedTx.blockNumber { w.prioritizedTx = nil } if w.prioritizedTx != nil { from, _ := types.Sender(signer, w.prioritizedTx.tx) // error already checked before txList := map[common.Address]types.Transactions{from: []*types.Transaction{w.prioritizedTx.tx}} - txs := types.NewTransactionsByPriceAndNonce(signer, txList, header.BaseFee) - if result := w.currentPipeline.TryPushTxns(txs, w.onTxFailingInPipeline); result != nil { - w.handlePipelineResult(result) - return + txs := types.NewTransactionsByPriceAndNonce(signer, txList, w.current.header.BaseFee) + + if shouldCommit, err := w.processTxns(txs); err != nil { + return false, fmt.Errorf("failed to include prioritized tx: %w", err) + } else if shouldCommit { + return true, nil } } if len(localTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(signer, localTxs, header.BaseFee) - if result := w.currentPipeline.TryPushTxns(txs, w.onTxFailingInPipeline); result != nil { - w.handlePipelineResult(result) - return + txs := types.NewTransactionsByPriceAndNonce(signer, localTxs, w.current.header.BaseFee) + if shouldCommit, err := w.processTxns(txs); err != nil { + return false, fmt.Errorf("failed to include locals: %w", err) + } else if shouldCommit { + return true, nil } } + if len(remoteTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(signer, remoteTxs, header.BaseFee) - if result := w.currentPipeline.TryPushTxns(txs, w.onTxFailingInPipeline); result != nil { - w.handlePipelineResult(result) - return + txs := types.NewTransactionsByPriceAndNonce(signer, remoteTxs, w.current.header.BaseFee) + if shouldCommit, err := w.processTxns(txs); err != nil { + return false, fmt.Errorf("failed to include remotes: %w", err) + } else if shouldCommit { + return true, nil } } - // pipelineCCC was nil, so the block was built for RPC purposes only. Stop the pipeline immediately - // and update the pending block. - if pipelineCCC == nil { - w.currentPipeline.Stop() + return false, nil +} + +// processTxnSlice +func (w *worker) processTxnSlice(txns types.Transactions) (bool, error) { + txsMap := make(map[common.Address]types.Transactions) + signer := types.MakeSigner(w.chainConfig, w.current.header.Number) + for _, tx := range txns { + acc, _ := types.Sender(signer, tx) + txsMap[acc] = append(txsMap[acc], tx) } + txset := types.NewTransactionsByPriceAndNonce(signer, txsMap, w.current.header.BaseFee) + return w.processTxns(txset) } -func (w *worker) handlePipelineResult(res *pipeline.Result) error { - startingHeader := w.currentPipeline.Header - w.currentPipeline.Release() - w.currentPipeline = nil +// processReorgedTxns +func (w *worker) processReorgedTxns(reason error) (bool, error) { + reorgedBlock := w.chain.GetBlockByNumber(w.current.header.Number.Uint64()) + commitGasCounter.Dec(int64(reorgedBlock.GasUsed())) + reorgedTxns := reorgedBlock.Transactions() + var errorWithTxnIdx *ccc.ErrorWithTxnIdx + if len(reorgedTxns) > 0 && errors.As(reason, &errorWithTxnIdx) { + if errorWithTxnIdx.ShouldSkip { + w.skipTransaction(reorgedTxns[errorWithTxnIdx.TxIdx], reason) + } - if res.FinalBlock != nil { - w.updateSnapshot(res.FinalBlock) + // if errorWithTxnIdx.TxIdx is 0, we will end up creating an empty block. + // This is necessary to make sure that same height can not fail CCC check multiple times. + // Each reorg forces a block to be appended to the chain. If we let the same block to trigger + // multiple reorgs, we can't guarantee an upper bound on reorg depth anymore. We can revisit this + // when we can handle reorgs on sidechains that we are building to replace the canonical chain. + reorgedTxns = reorgedTxns[:errorWithTxnIdx.TxIdx] } - // Rows being nil without an OverflowingTx means that block didn't go thru CCC, - // which means that we are not the sequencer. Do not attempt to commit. - if res.Rows == nil && res.OverflowingTx == nil { - return nil - } + w.processTxnSlice(reorgedTxns) + return true, nil +} - if res.OverflowingTx != nil { - if res.FinalBlock == nil { - // first txn overflowed the circuit, skip - log.Info("Circuit capacity limit reached for a single tx", "tx", res.OverflowingTx.Hash().String(), - "isL1Message", res.OverflowingTx.IsL1MessageTx(), "reason", res.CCCErr.Error()) +// processTxns +func (w *worker) processTxns(txs types.OrderedTransactionSet) (bool, error) { + for { + tx := txs.Peek() + if tx == nil { + break + } + + shouldCommit, err := w.processTxn(tx) + if shouldCommit { + return true, nil + } - // Store skipped transaction in local db - overflowingTrace := res.OverflowingTrace - if !w.config.StoreSkippedTxTraces { - overflowingTrace = nil + switch { + case err == nil, errors.Is(err, core.ErrNonceTooLow): + txs.Shift() + default: + w.onTxFailing(w.current.txs.Len(), tx, err) + if errors.Is(err, ccc.ErrBlockRowConsumptionOverflow) && w.current.txs.Len() > 0 { + return true, nil } - rawdb.WriteSkippedTransaction(w.eth.ChainDb(), res.OverflowingTx, overflowingTrace, res.CCCErr.Error(), - startingHeader.Number.Uint64(), nil) - if overflowingL1MsgTx := res.OverflowingTx.AsL1MessageTx(); overflowingL1MsgTx != nil { - rawdb.WriteFirstQueueIndexNotInL2Block(w.eth.ChainDb(), startingHeader.ParentHash, overflowingL1MsgTx.QueueIndex+1) + if tx.IsL1MessageTx() { + txs.Shift() } else { - w.prioritizedTx = nil - w.eth.TxPool().RemoveTx(res.OverflowingTx.Hash(), true) - } - } else if !res.OverflowingTx.IsL1MessageTx() { - // prioritize overflowing L2 message as the first txn next block - // no need to prioritize L1 messages, they are fetched in order - // and processed first in every block anyways - w.prioritizedTx = &prioritizedTransaction{ - blockNumber: startingHeader.Number.Uint64() + 1, - tx: res.OverflowingTx, + txs.Pop() } } + } - switch { - case res.OverflowingTx.IsL1MessageTx() && - errors.Is(res.CCCErr, ccc.ErrBlockRowConsumptionOverflow): - l1TxRowConsumptionOverflowCounter.Inc(1) - case !res.OverflowingTx.IsL1MessageTx() && - errors.Is(res.CCCErr, ccc.ErrBlockRowConsumptionOverflow): - l2TxRowConsumptionOverflowCounter.Inc(1) - case res.OverflowingTx.IsL1MessageTx() && - errors.Is(res.CCCErr, ccc.ErrUnknown): - l1TxCccUnknownErrCounter.Inc(1) - case !res.OverflowingTx.IsL1MessageTx() && - errors.Is(res.CCCErr, ccc.ErrUnknown): - l2TxCccUnknownErrCounter.Inc(1) - } + return false, nil +} + +// processTxn +func (w *worker) processTxn(tx *types.Transaction) (bool, error) { + if w.beforeTxHook != nil { + w.beforeTxHook() } - var commitError error - if res.FinalBlock != nil { - if commitError = w.commit(res); commitError == nil { - return nil - } - log.Error("Commit failed", "header", res.FinalBlock.Header, "reason", commitError) - if _, isRetryable := commitError.(retryableCommitError); !isRetryable { - return commitError - } + // If we don't have enough gas for any further transactions then we're done + if w.current.gasPool.Gas() < params.TxGas { + return true, nil } - w.startNewPipeline(time.Now().Unix()) - return nil + + // If we have collected enough transactions then we're done + // Originally we only limit l2txs count, but now strictly limit total txs number. + if !w.chain.Config().Scroll.IsValidTxCount(w.current.txs.Len() + 1) { + return true, nil + } + + if tx.IsL1MessageTx() && tx.AsL1MessageTx().QueueIndex != w.current.nextL1MsgIndex { + // Continue, we might still be able to include some L2 messages + return false, ErrUnexpectedL1MessageIndex + } + + if !tx.IsL1MessageTx() && !w.chain.Config().Scroll.IsValidBlockSize(w.current.blockSize+tx.Size()) { + // can't fit this txn in this block, silently ignore and continue looking for more txns + return false, errors.New("tx too big") + } + + // Start executing the transaction + w.current.state.SetTxContext(tx.Hash(), w.current.txs.Len()) + + // create new snapshot for `core.ApplyTransaction` + snapState := w.current.state.Snapshot() + snapGasPool := *w.current.gasPool + snapGasUsed := w.current.header.GasUsed + snapCccLogger := w.current.cccLogger.Snapshot() + + w.forceTestErr(tx) + receipt, err := core.ApplyTransaction(w.chain.Config(), w.chain, nil /* coinbase will default to chainConfig.Scroll.FeeVaultAddress */, w.current.gasPool, + w.current.state, w.current.header, tx, &w.current.header.GasUsed, w.current.vmConfig) + if err != nil { + w.current.state.RevertToSnapshot(snapState) + *w.current.gasPool = snapGasPool + w.current.header.GasUsed = snapGasUsed + *w.current.cccLogger = *snapCccLogger + return false, err + } + + // Everything ok, collect the logs and shift in the next transaction from the same account + w.current.coalescedLogs = append(w.current.coalescedLogs, receipt.Logs...) + w.current.txs = append(w.current.txs, tx) + w.current.receipts = append(w.current.receipts, receipt) + + if !tx.IsL1MessageTx() { + // only consider block size limit for L2 transactions + w.current.blockSize += tx.Size() + } else { + w.current.nextL1MsgIndex = tx.AsL1MessageTx().QueueIndex + 1 + } + return false, nil } // retryableCommitError wraps an error that happened during commit phase and indicates that worker can retry to build a new block @@ -629,34 +762,31 @@ func (e retryableCommitError) Unwrap() error { // commit runs any post-transaction state modifications, assembles the final block // and commits new work if consensus engine is running. -func (w *worker) commit(res *pipeline.Result) error { +func (w *worker) commit(force bool) (common.Hash, error) { sealDelay := time.Duration(0) defer func(t0 time.Time) { l2CommitTimer.Update(time.Since(t0) - sealDelay) }(time.Now()) - if res.CCCErr != nil { - commitReasonCCCCounter.Inc(1) - } else { - commitReasonDeadlineCounter.Inc(1) + w.updateSnapshot() + if !w.isRunning() && !force { + return common.Hash{}, nil } - commitGasCounter.Inc(int64(res.FinalBlock.Header.GasUsed)) - block, err := w.engine.FinalizeAndAssemble(w.chain, res.FinalBlock.Header, res.FinalBlock.State, - res.FinalBlock.Txs, nil, res.FinalBlock.Receipts) + block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, w.current.state, + w.current.txs, nil, w.current.receipts) if err != nil { - return err + return common.Hash{}, err } sealHash := w.engine.SealHash(block.Header()) log.Info("Committing new mining work", "number", block.Number(), "sealhash", sealHash, - "txs", res.FinalBlock.Txs.Len(), - "gas", block.GasUsed(), "fees", totalFees(block, res.FinalBlock.Receipts), - "elapsed", common.PrettyDuration(time.Since(w.currentPipelineStart))) + "txs", w.current.txs.Len(), + "gas", block.GasUsed(), "fees", totalFees(block, w.current.receipts)) resultCh, stopCh := make(chan *types.Block), make(chan struct{}) if err := w.engine.Seal(w.chain, block, resultCh, stopCh); err != nil { - return err + return common.Hash{}, err } // Clique.Seal() will only wait for a second before giving up on us. So make sure there is nothing computational heavy // or a call that blocks between the call to Seal and the line below. Seal might introduce some delay, so we keep track of @@ -665,17 +795,17 @@ func (w *worker) commit(res *pipeline.Result) error { block = <-resultCh sealDelay = time.Since(sealStart) if block == nil { - return errors.New("missed seal response from consensus engine") + return common.Hash{}, errors.New("missed seal response from consensus engine") } // verify the generated block with local consensus engine to make sure everything is as expected if err = w.engine.VerifyHeader(w.chain, block.Header(), true); err != nil { - return retryableCommitError{inner: err} + return common.Hash{}, retryableCommitError{inner: err} } blockHash := block.Hash() - for i, receipt := range res.FinalBlock.Receipts { + for i, receipt := range w.current.receipts { // add block location fields receipt.BlockHash = blockHash receipt.BlockNumber = block.Number() @@ -686,7 +816,7 @@ func (w *worker) commit(res *pipeline.Result) error { } } - for _, log := range res.FinalBlock.CoalescedLogs { + for _, log := range w.current.coalescedLogs { log.BlockHash = blockHash } @@ -701,36 +831,27 @@ func (w *worker) commit(res *pipeline.Result) error { "Worker WriteFirstQueueIndexNotInL2Block", "number", block.Number(), "hash", blockHash.String(), - "nextL1MsgIndex", res.FinalBlock.NextL1MsgIndex, + "nextL1MsgIndex", w.current.nextL1MsgIndex, ) - rawdb.WriteFirstQueueIndexNotInL2Block(w.eth.ChainDb(), blockHash, res.FinalBlock.NextL1MsgIndex) + rawdb.WriteFirstQueueIndexNotInL2Block(w.eth.ChainDb(), blockHash, w.current.nextL1MsgIndex) } else { log.Trace( "Worker WriteFirstQueueIndexNotInL2Block: not overwriting existing index", "number", block.Number(), "hash", blockHash.String(), "index", *index, - "nextL1MsgIndex", res.FinalBlock.NextL1MsgIndex, + "nextL1MsgIndex", w.current.nextL1MsgIndex, ) } - // Store circuit row consumption. - log.Trace( - "Worker write block row consumption", - "id", w.circuitCapacityChecker.ID, - "number", block.Number(), - "hash", blockHash.String(), - "accRows", res.Rows, - ) // A new block event will trigger a reorg in the txpool, pause reorgs to defer this until we fetch txns for next block. // We may end up trying to process txns that we already included in the previous block, but they will all fail the nonce check w.eth.TxPool().PauseReorgs() - rawdb.WriteBlockRowConsumption(w.eth.ChainDb(), blockHash, res.Rows) // Commit block and state to database. - _, err = w.chain.WriteBlockWithState(block, res.FinalBlock.Receipts, res.FinalBlock.CoalescedLogs, res.FinalBlock.State, true) + _, err = w.chain.WriteBlockWithState(block, w.current.receipts, w.current.coalescedLogs, w.current.state, true) if err != nil { - return err + return common.Hash{}, err } log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealHash, "hash", blockHash) @@ -738,7 +859,20 @@ func (w *worker) commit(res *pipeline.Result) error { // Broadcast the block and announce chain insertion event w.mux.Post(core.NewMinedBlockEvent{Block: block}) - return nil + checkStart := time.Now() + if err = w.asyncChecker.Check(block); err != nil { + log.Error("failed to launch CCC background task", "err", err) + } + cccStallTimer.UpdateSince(checkStart) + + commitGasCounter.Inc(int64(block.GasUsed())) + if w.current.deadlineReached { + commitReasonDeadlineCounter.Inc(1) + } else { + commitReasonCCCCounter.Inc(1) + } + w.current = nil + return block.Hash(), nil } // copyReceipts makes a deep copy of the given receipts. @@ -751,57 +885,67 @@ func copyReceipts(receipts []*types.Receipt) []*types.Receipt { return result } -func (w *worker) onTxFailingInPipeline(txIndex int, tx *types.Transaction, err error) bool { +func (w *worker) onTxFailing(txIndex int, tx *types.Transaction, err error) { if !w.isRunning() { - return false + return } - writeTrace := func() { - var trace *types.BlockTrace - var errWithTrace *pipeline.ErrorWithTrace - if w.config.StoreSkippedTxTraces && errors.As(err, &errWithTrace) { - trace = errWithTrace.Trace + if errors.Is(err, ccc.ErrBlockRowConsumptionOverflow) { + if txIndex > 0 { + if !tx.IsL1MessageTx() { + // prioritize overflowing L2 message as the first txn next block + // no need to prioritize L1 messages, they are fetched in order + // and processed first in every block anyways + w.prioritizedTx = &prioritizedTransaction{ + blockNumber: w.current.header.Number.Uint64() + 1, + tx: tx, + } + } + return } - rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, trace, err.Error(), - w.currentPipeline.Header.Number.Uint64(), nil) - } - switch { - case errors.Is(err, core.ErrGasLimitReached) && tx.IsL1MessageTx(): - // If this block already contains some L1 messages try again in the next block. - if txIndex > 0 { - break + // first txn overflowed the circuit, skip + w.skipTransaction(tx, err) + } else if tx.IsL1MessageTx() { + if errors.Is(err, ErrUnexpectedL1MessageIndex) { + log.Warn( + "Unexpected L1 message queue index in worker", "got", tx.AsL1MessageTx().QueueIndex, + ) + return + } else if txIndex > 0 { + // If this block already contains some L1 messages try again in the next block. + return } - // A single L1 message leads to out-of-gas. Skip it. - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", - w.currentPipeline.Header.Number, "reason", "gas limit exceeded") - writeTrace() - l1TxGasLimitExceededCounter.Inc(1) - case errors.Is(err, core.ErrInsufficientFunds): + queueIndex := tx.AsL1MessageTx().QueueIndex + log.Warn("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", + w.current.header.Number, "reason", err) + rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, err.Error(), + w.current.header.Number.Uint64(), nil) + w.current.nextL1MsgIndex = queueIndex + 1 + l1SkippedCounter.Inc(1) + } else if errors.Is(err, core.ErrInsufficientFunds) { log.Trace("Skipping tx with insufficient funds", "tx", tx.Hash().String()) w.eth.TxPool().RemoveTx(tx.Hash(), true) + } +} - case errors.Is(err, pipeline.ErrUnexpectedL1MessageIndex): - log.Warn( - "Unexpected L1 message queue index in worker", - "got", tx.AsL1MessageTx().QueueIndex, - ) - case errors.Is(err, core.ErrGasLimitReached), errors.Is(err, core.ErrNonceTooLow), errors.Is(err, core.ErrNonceTooHigh), errors.Is(err, core.ErrTxTypeNotSupported): - break - default: - // Strange error - log.Debug("Transaction failed, account skipped", "hash", tx.Hash().String(), "err", err) - if tx.IsL1MessageTx() { - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", - w.currentPipeline.Header.Number, "reason", "strange error", "err", err) - writeTrace() - l1TxStrangeErrCounter.Inc(1) +// skipTransaction +func (w *worker) skipTransaction(tx *types.Transaction, err error) { + log.Info("Circuit capacity limit reached for a single tx", "isL1Message", tx.IsL1MessageTx(), "tx", tx.Hash().String()) + rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, err.Error(), + w.current.header.Number.Uint64(), nil) + if tx.IsL1MessageTx() { + w.current.nextL1MsgIndex = tx.AsL1MessageTx().QueueIndex + 1 + l1SkippedCounter.Inc(1) + } else { + if w.prioritizedTx != nil && w.prioritizedTx.tx.Hash() == tx.Hash() { + w.prioritizedTx = nil } + + w.eth.TxPool().RemoveTx(tx.Hash(), true) + l2SkippedCounter.Inc(1) } - return false } // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. @@ -813,3 +957,68 @@ func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { } return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) } + +func (w *worker) forceTestErr(tx *types.Transaction) { + if w.skipTxHash == tx.Hash() { + w.current.cccLogger.ForceError() + } + + w.errCountdown-- + if w.errCountdown == 0 { + w.current.cccLogger.ForceError() + } +} + +// scheduleCCCError schedules an CCC error with a countdown, only used in tests. +func (w *worker) scheduleCCCError(countdown int) { + w.errCountdown = countdown +} + +// skip forces a txn to be skipped by worker +func (w *worker) skip(txHash common.Hash) { + w.skipTxHash = txHash +} + +// onBlockFailingCCC is called when block produced by worker fails CCC +func (w *worker) onBlockFailingCCC(failingBlock *types.Block, err error) { + log.Warn("block failed CCC", "hash", failingBlock.Hash().Hex(), "number", failingBlock.NumberU64(), "err", err) + w.reorgCh <- reorgTrigger{ + block: failingBlock, + reason: err, + } +} + +// handleReorg reorgs all blocks following the trigger block +func (w *worker) handleReorg(trigger *reorgTrigger) error { + parentHash := trigger.block.ParentHash() + reorgReason := trigger.reason + + for { + if !w.isCanonical(trigger.block.Header()) { + // trigger block is no longer part of the canonical chain, we are done + return nil + } + + newBlockHash, err := w.tryCommitNewWork(time.Now(), parentHash, reorgReason) + if err != nil { + return err + } + + // we created replacement blocks for all existing blocks in canonical chain, but not quite ready to commit the new HEAD + if newBlockHash == (common.Hash{}) { + // force committing the new canonical head to trigger a reorg in blockchain + // otherwise we might ignore CCC errors from the new side chain since it is not canonical yet + newBlockHash, err = w.commit(true) + if err != nil { + return err + } + } + + parentHash = newBlockHash + reorgReason = nil // clear reorg reason after trigger block gets reorged + } +} + +func (w *worker) isCanonical(header *types.Header) bool { + return w.chain.GetBlockByNumber(header.Number.Uint64()).Hash() == header.Hash() +} diff --git a/miner/scroll_worker_test.go b/miner/scroll_worker_test.go index a542b5c08b86..407d508cf819 100644 --- a/miner/scroll_worker_test.go +++ b/miner/scroll_worker_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/scroll-tech/go-ethereum/accounts" "github.com/scroll-tech/go-ethereum/common" @@ -38,7 +39,6 @@ import ( "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/rollup/ccc" "github.com/scroll-tech/go-ethereum/rollup/sync_service" ) @@ -73,6 +73,7 @@ var ( Recommit: time.Second, GasCeil: params.GenesisGasLimit, MaxAccountsNum: math.MaxInt, + CCCMaxWorkers: 2, } ) @@ -788,7 +789,7 @@ func TestOversizedTxThenNormal(t *testing.T) { switch blockNum { case 0: // schedule to skip 2nd call to ccc - w.getCCC().ScheduleError(2, ccc.ErrBlockRowConsumptionOverflow) + w.scheduleCCCError(2) return false case 1: // include #0, fail on #1, then seal the block @@ -803,7 +804,7 @@ func TestOversizedTxThenNormal(t *testing.T) { assert.Equal(uint64(1), *queueIndex) // schedule to skip next call to ccc - w.getCCC().ScheduleError(1, ccc.ErrBlockRowConsumptionOverflow) + w.scheduleCCCError(1) return false case 2: @@ -869,7 +870,7 @@ func TestPrioritizeOverflowTx(t *testing.T) { // Process 2 transactions with gas order: tx0 > tx1, tx1 will overflow. b.txPool.AddRemotesSync([]*types.Transaction{tx0, tx1}) - w.getCCC().ScheduleError(2, ccc.ErrBlockRowConsumptionOverflow) + w.scheduleCCCError(2) w.start() select { @@ -903,7 +904,7 @@ func TestPrioritizeOverflowTx(t *testing.T) { t.Fatalf("timeout") } - w.getCCC().Skip(tx4.Hash(), ccc.ErrBlockRowConsumptionOverflow) + w.skip(tx4.Hash()) assert.Equal([]error{nil, nil, nil}, b.txPool.AddRemotesSync([]*types.Transaction{tx3, tx4, tx5})) w.start() @@ -984,8 +985,7 @@ func TestSkippedTransactionDatabaseEntries(t *testing.T) { }) } -func TestSealBlockAfterCliquePeriod(t *testing.T) { - assert := assert.New(t) +func TestPending(t *testing.T) { var ( engine consensus.Engine chainConfig *params.ChainConfig @@ -1005,85 +1005,183 @@ func TestSealBlockAfterCliquePeriod(t *testing.T) { Tracer: vm.NewStructLogger(&vm.LogConfig{EnableMemory: true, EnableReturnData: true})}, nil, nil) defer chain.Stop() - // Add artificial delay to transaction processing. - w.beforeTxHook = func() { - time.Sleep(time.Duration(chainConfig.Clique.Period) * 1 * time.Second) + // Define 3 transactions: + // A --> B (nonce: 0, gas: 20) + tx0, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(100000000000000000), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) + // A --> B (nonce: 1, gas: 5) + tx1, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress)+1, testUserAddress, big.NewInt(0), params.TxGas, big.NewInt(5*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) + // B --> A (nonce: 0, gas: 20) + tx2, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testUserAddress), testBankAddress, big.NewInt(0), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testUserKey) + // B --> A (nonce: 1, gas: 20) + tx3, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testUserAddress)+1, testBankAddress, big.NewInt(0), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testUserKey) + // B --> A (nonce: 2, gas: 20) + tx4, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testUserAddress)+2, testBankAddress, big.NewInt(0), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testUserKey) + // A --> B (nonce: 2, gas: 5) + tx5, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress)+2, testUserAddress, big.NewInt(0), params.TxGas, big.NewInt(5*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) + + b.txPool.AddRemotesSync([]*types.Transaction{tx0, tx1, tx2, tx3, tx4, tx5}) + // start building pending block + w.startCh <- struct{}{} + + time.Sleep(time.Second) + pending := w.pendingBlock() + assert.NotNil(t, pending) + assert.NotEmpty(t, pending.Transactions()) +} + +func TestReorg(t *testing.T) { + var ( + engine consensus.Engine + chainConfig *params.ChainConfig + db = rawdb.NewMemoryDatabase() + ) + chainConfig = params.AllCliqueProtocolChanges + chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000, RelaxedPeriod: true} + chainConfig.Scroll.FeeVaultAddress = &common.Address{} + engine = clique.New(chainConfig.Clique, db) + + maxTxPerBlock := 2 + chainConfig.Scroll.MaxTxPerBlock = &maxTxPerBlock + chainConfig.Scroll.L1Config = ¶ms.L1Config{ + NumL1MessagesPerBlock: 10, } - // Wait for mined blocks. - sub := w.mux.Subscribe(core.NewMinedBlockEvent{}) - defer sub.Unsubscribe() + chainConfig.LondonBlock = big.NewInt(0) + w, b := newTestWorker(t, chainConfig, engine, db, 0) + defer w.close() - // Insert 2 non-l1msg txs - b.txPool.AddLocal(b.newRandomTx(true)) - b.txPool.AddLocal(b.newRandomTx(false)) + // This test chain imports the mined blocks. + b.genesis.MustCommit(db) + chain, _ := core.NewBlockChain(db, nil, b.chain.Config(), engine, vm.Config{}, nil, nil) + defer chain.Stop() + + // Insert local tx + for i := 0; i < 40; i++ { + b.txPool.AddLocal(b.newRandomTx(true)) + } + + const firstReorgHeight = 5 + w.asyncChecker.ScheduleError(firstReorgHeight, 1) // Start mining! w.start() - select { - case ev := <-sub.Chan(): - block := ev.Data.(core.NewMinedBlockEvent).Block - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("failed to insert new mined block %d: %v", block.NumberU64(), err) + // Wait for mined blocks. + sub := w.mux.Subscribe(core.NewMinedBlockEvent{}) + defer sub.Unsubscribe() + + var oldBlock *types.Block + var newBlock *types.Block + +firstReorg: + for { + select { + case ev := <-sub.Chan(): + block := ev.Data.(core.NewMinedBlockEvent).Block + if block.NumberU64() == firstReorgHeight { + if oldBlock == nil { + oldBlock = block + } else { + newBlock = block + break firstReorg + } + } + case <-time.After(3 * time.Second): // Worker needs 1s to include new changes. + t.Fatalf("timeout") } - assert.Equal(1, len(block.Transactions())) // only packed 1 tx, not 2 - case <-time.After(5 * time.Second): - t.Fatalf("timeout") } - select { - case ev := <-sub.Chan(): - block := ev.Data.(core.NewMinedBlockEvent).Block - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("failed to insert new mined block %d: %v", block.NumberU64(), err) + require.Equal(t, oldBlock.NumberU64(), newBlock.NumberU64()) + // should skip second txn + require.Equal(t, oldBlock.Transactions()[:1].Len(), newBlock.Transactions().Len()) + for i := 0; i < newBlock.Transactions().Len(); i++ { + require.Equal(t, oldBlock.Transactions()[:1][i].Hash(), newBlock.Transactions()[i].Hash()) + } + + time.Sleep(time.Second * 5) + + const secondReorgHeight = 15 + w.asyncChecker.ScheduleError(secondReorgHeight, 0) + + sub.Unsubscribe() + + // Insert local tx + for i := 0; i < 20; i++ { + b.txPool.AddLocal(b.newRandomTx(true)) + } + + // resubscribe + sub = w.mux.Subscribe(core.NewMinedBlockEvent{}) + defer sub.Unsubscribe() + + oldBlock = nil + newBlock = nil + +secondReorg: + for { + select { + case ev := <-sub.Chan(): + block := ev.Data.(core.NewMinedBlockEvent).Block + if block.NumberU64() == secondReorgHeight { + if oldBlock == nil { + oldBlock = block + } else { + newBlock = block + break secondReorg + } + } + case <-time.After(3 * time.Second): // Worker needs 1s to include new changes. + t.Fatalf("timeout") } - assert.Equal(1, len(block.Transactions())) - case <-time.After(5 * time.Second): - t.Fatalf("timeout") + } + + require.Equal(t, oldBlock.NumberU64(), newBlock.NumberU64()) + // should skip first txn and the next txn will fail nonce check + require.Equal(t, 0, newBlock.Transactions().Len()) + for i := 0; i < newBlock.Transactions().Len(); i++ { + require.Equal(t, oldBlock.Transactions()[1:][i].Hash(), newBlock.Transactions()[i].Hash()) } } -func TestPending(t *testing.T) { +func TestRestartHeadCCC(t *testing.T) { var ( engine consensus.Engine chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) chainConfig = params.AllCliqueProtocolChanges - chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} + chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000, RelaxedPeriod: true} chainConfig.Scroll.FeeVaultAddress = &common.Address{} engine = clique.New(chainConfig.Clique, db) + + maxTxPerBlock := 2 + chainConfig.Scroll.MaxTxPerBlock = &maxTxPerBlock + chainConfig.Scroll.L1Config = ¶ms.L1Config{ + NumL1MessagesPerBlock: 10, + } + + chainConfig.LondonBlock = big.NewInt(0) w, b := newTestWorker(t, chainConfig, engine, db, 0) defer w.close() // This test chain imports the mined blocks. b.genesis.MustCommit(db) - chain, _ := core.NewBlockChain(db, nil, b.chain.Config(), engine, vm.Config{ - Debug: true, - Tracer: vm.NewStructLogger(&vm.LogConfig{EnableMemory: true, EnableReturnData: true})}, nil, nil) - defer chain.Stop() - // Define 3 transactions: - // A --> B (nonce: 0, gas: 20) - tx0, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(100000000000000000), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) - // A --> B (nonce: 1, gas: 5) - tx1, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress)+1, testUserAddress, big.NewInt(0), params.TxGas, big.NewInt(5*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) - // B --> A (nonce: 0, gas: 20) - tx2, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testUserAddress), testBankAddress, big.NewInt(0), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testUserKey) - // B --> A (nonce: 1, gas: 20) - tx3, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testUserAddress)+1, testBankAddress, big.NewInt(0), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testUserKey) - // B --> A (nonce: 2, gas: 20) - tx4, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testUserAddress)+2, testBankAddress, big.NewInt(0), params.TxGas, big.NewInt(20*params.InitialBaseFee), nil), types.HomesteadSigner{}, testUserKey) - // A --> B (nonce: 2, gas: 5) - tx5, _ := types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress)+2, testUserAddress, big.NewInt(0), params.TxGas, big.NewInt(5*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) + // Insert local tx + for i := 0; i < 10; i++ { + b.txPool.AddLocal(b.newRandomTx(true)) + } - b.txPool.AddRemotesSync([]*types.Transaction{tx0, tx1, tx2, tx3, tx4, tx5}) - // start building pending block - w.startCh <- struct{}{} + // Start mining! + w.start() + time.Sleep(time.Second * 5) + w.stop() + headHash := w.chain.CurrentHeader().Hash() + rawdb.DeleteBlockRowConsumption(db, headHash) + require.Nil(t, rawdb.ReadBlockRowConsumption(db, headHash)) + w.start() time.Sleep(time.Second) - pending := w.pendingBlock() - assert.NotNil(t, pending) - assert.NotEmpty(t, pending.Transactions()) + // head should be rechecked by CCC + require.NotNil(t, rawdb.ReadBlockRowConsumption(db, headHash)) } diff --git a/params/version.go b/params/version.go index 222269799f0b..840baaf01553 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 7 // Minor version component of the current release - VersionPatch = 5 // Patch version component of the current release + VersionPatch = 6 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string ) diff --git a/rollup/ccc/async_checker.go b/rollup/ccc/async_checker.go index c1e061a59a65..a070810f7aae 100644 --- a/rollup/ccc/async_checker.go +++ b/rollup/ccc/async_checker.go @@ -48,16 +48,21 @@ type AsyncChecker struct { currentHead *types.Header forkCtx context.Context forkCtxCancelFunc context.CancelFunc + + // tests + blockNumberToFail uint64 + txnIdxToFail uint64 } type ErrorWithTxnIdx struct { - txIdx uint + TxIdx uint err error - shouldSkip bool + ShouldSkip bool + AccRc *types.RowConsumption } func (e *ErrorWithTxnIdx) Error() string { - return fmt.Sprintf("txn at index %d failed with %s", e.txIdx, e.err) + return fmt.Sprintf("txn at index %d failed with %s (rc = %s)", e.TxIdx, e.err, fmt.Sprint(e.AccRc)) } func (e *ErrorWithTxnIdx) Unwrap() error { @@ -94,8 +99,10 @@ func (c *AsyncChecker) Wait() { // Check spawns an async CCC verification task. func (c *AsyncChecker) Check(block *types.Block) error { if block.NumberU64() > c.currentHead.Number.Uint64()+1 { - log.Error("non continuous chain observed in AsyncChecker", "prev", c.currentHead, "got", block.Header()) - } else if block.ParentHash() != c.currentHead.Hash() { + log.Warn("non continuous chain observed in AsyncChecker", "prev", c.currentHead, "got", block.Header()) + } + + if block.ParentHash() != c.currentHead.Hash() { // seems like there is a fork happening, a block from the canonical chain must have failed CCC check // assume the incoming block is the new tip in the fork c.forkCtx, c.forkCtxCancelFunc = context.WithCancel(context.Background()) @@ -106,7 +113,11 @@ func (c *AsyncChecker) Check(block *types.Block) error { // all blocks in the same fork share the same context to allow terminating them all at once if needed ctx, ctxCancelFunc := c.forkCtx, c.forkCtxCancelFunc c.workers.Go(func() stream.Callback { - return c.checkerTask(block, checker, ctx, ctxCancelFunc) + taskCb := c.checkerTask(block, checker, ctx, ctxCancelFunc) + return func() { + taskCb() + c.freeCheckers <- checker + } }) return nil } @@ -126,7 +137,6 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con checkStart := time.Now() defer func() { checkTimer.UpdateSince(checkStart) - c.freeCheckers <- ccc activeWorkersGauge.Dec(1) }() @@ -148,6 +158,15 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con } } + if c.blockNumberToFail == block.NumberU64() { + err = &ErrorWithTxnIdx{ + TxIdx: uint(c.txnIdxToFail), + err: err, + } + c.blockNumberToFail = 0 + return failingCallback + } + statedb, err := c.bc.StateAt(parent.Root()) if err != nil { return failingCallback @@ -158,7 +177,7 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con gasPool := new(core.GasPool).AddGas(header.GasLimit) ccc.Reset() - var accRc *types.RowConsumption + accRc := new(types.RowConsumption) for txIdx, tx := range block.Transactions() { if !isForkStillActive(forkCtx) { return noopCb @@ -168,11 +187,12 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con curRc, err = c.checkTxAndApply(parent, header, statedb, gasPool, tx, ccc) if err != nil { err = &ErrorWithTxnIdx{ - txIdx: uint(txIdx), + TxIdx: uint(txIdx), err: err, // if the txn is the first in block or the additional resource utilization caused // by this txn alone is enough to overflow the circuit, skip - shouldSkip: txIdx == 0 || curRc.Difference(*accRc).IsOverflown(), + ShouldSkip: txIdx == 0 || curRc.Difference(*accRc).IsOverflown(), + AccRc: curRc, } return failingCallback } @@ -222,3 +242,9 @@ func (c *AsyncChecker) checkTxAndApply(parent *types.Block, header *types.Header } return rc, nil } + +// ScheduleError forces a block to error on a given transaction index +func (c *AsyncChecker) ScheduleError(blockNumber uint64, txnIndx uint64) { + c.blockNumberToFail = blockNumber + c.txnIdxToFail = txnIndx +} diff --git a/rollup/ccc/async_checker_test.go b/rollup/ccc/async_checker_test.go index d5a201a5b694..bbfcd9b99996 100644 --- a/rollup/ccc/async_checker_test.go +++ b/rollup/ccc/async_checker_test.go @@ -76,6 +76,6 @@ func TestAsyncChecker(t *testing.T) { time.Sleep(3 * time.Second) require.Equal(t, reorgBlocks[3].Hash(), failingBlockHash) - require.Equal(t, uint(3), errWithIdx.txIdx) - require.True(t, errWithIdx.shouldSkip) + require.Equal(t, uint(3), errWithIdx.TxIdx) + require.True(t, errWithIdx.ShouldSkip) } diff --git a/rollup/ccc/libzkp/Cargo.lock b/rollup/ccc/libzkp/Cargo.lock index 2b93169b6e05..f923299e4b63 100644 --- a/rollup/ccc/libzkp/Cargo.lock +++ b/rollup/ccc/libzkp/Cargo.lock @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "aggregator" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "ark-std 0.3.0", "bitstream-io", @@ -537,7 +537,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bus-mapping" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "eth-types", "ethers-core", @@ -1126,7 +1126,7 @@ dependencies = [ [[package]] name = "eth-types" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "base64 0.13.1", "ethers-core", @@ -1283,7 +1283,7 @@ dependencies = [ [[package]] name = "external-tracer" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "eth-types", "geth-utils", @@ -1465,7 +1465,7 @@ dependencies = [ [[package]] name = "gadgets" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "eth-types", "halo2_proofs", @@ -1488,7 +1488,7 @@ dependencies = [ [[package]] name = "geth-utils" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "env_logger 0.10.0", "gobuild", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "mock" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "eth-types", "ethers-core", @@ -2252,7 +2252,7 @@ dependencies = [ [[package]] name = "mpt-zktrie" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "eth-types", "halo2curves", @@ -2635,17 +2635,18 @@ dependencies = [ [[package]] name = "poseidon-base" version = "0.1.0" -source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#7b96835c6201afdbfaf3d13d641efbaaf5db2d20" +source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#6cc36ab9dfa153f554ff7b84305f39838366a8df" dependencies = [ "bitvec", "halo2curves", "lazy_static", + "once_cell", ] [[package]] name = "poseidon-circuit" version = "0.1.0" -source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#7b96835c6201afdbfaf3d13d641efbaaf5db2d20" +source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#6cc36ab9dfa153f554ff7b84305f39838366a8df" dependencies = [ "ff", "halo2_proofs", @@ -2724,7 +2725,7 @@ dependencies = [ [[package]] name = "prover" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "aggregator", "anyhow", @@ -4361,7 +4362,7 @@ dependencies = [ [[package]] name = "zkevm-circuits" version = "0.11.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.8#1f2f15c48edcd56ad05bad9dc04bfbec1ed36c05" dependencies = [ "array-init", "bus-mapping", @@ -4419,7 +4420,7 @@ dependencies = [ [[package]] name = "zktrie" version = "0.3.0" -source = "git+https://github.com/scroll-tech/zktrie.git?branch=main#23181f209e94137f74337b150179aeb80c72e7c8" +source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.7#23181f209e94137f74337b150179aeb80c72e7c8" dependencies = [ "gobuild", "zktrie_rust", @@ -4428,7 +4429,7 @@ dependencies = [ [[package]] name = "zktrie_rust" version = "0.3.0" -source = "git+https://github.com/scroll-tech/zktrie.git?branch=main#23181f209e94137f74337b150179aeb80c72e7c8" +source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.7#23181f209e94137f74337b150179aeb80c72e7c8" dependencies = [ "hex", "lazy_static", diff --git a/rollup/ccc/libzkp/Cargo.toml b/rollup/ccc/libzkp/Cargo.toml index dbc8a30e8581..e34369bf91ca 100644 --- a/rollup/ccc/libzkp/Cargo.toml +++ b/rollup/ccc/libzkp/Cargo.toml @@ -23,7 +23,7 @@ poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" } [dependencies] -prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", default-features = false, features = ["parallel_syn", "scroll", "strict-ccc"] } +prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.8", default-features = false, features = ["parallel_syn", "scroll", "strict-ccc"] } anyhow = "1.0" base64 = "0.13.0" diff --git a/rollup/ccc/logger.go b/rollup/ccc/logger.go new file mode 100644 index 000000000000..adf4301fd4c9 --- /dev/null +++ b/rollup/ccc/logger.go @@ -0,0 +1,867 @@ +package ccc + +import ( + "maps" + "math/big" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/core/vm" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/eth/tracers" + "github.com/scroll-tech/go-ethereum/log" +) + +const ( + sigCountMax = 127 + ecAddCountMax = 50 + ecMulCountMax = 50 + ecPairingCountMax = 2 + rowUsageMax = 1_000_000 + keccakRounds = 24 + keccakRowsPerRound = 12 + keccakRowsPerChunk = (keccakRounds + 1) * keccakRowsPerRound +) + +var _ vm.EVMLogger = (*Logger)(nil) + +// Logger is a tracer that keeps track of resource usages of each subcircuit +// that Scroll's halo2 based zkEVM has. Some subcircuits are not tracked +// here for the following reasons. +// +// rlp: worker already keeps track of how big a block is and the block size limit +// it uses is way below what the rlp circuit allows +// pi: row usage purely depends on the number txns and we already have a limit +// on how many txns that worker will pack in a block +// poseidon: not straight forward to track in block building phase. We can do +// worst case estimation down the line if needed. +// mpt: not straight forward to track in block building phase. We can do +// worst case estimation down the line if needed. +// tx: row usage depends on the length of raw txns and the number of storage +// slots and/or accounts accessed. With the current gas limit of 10M, it is not possible +// to overflow the circuit. +type Logger struct { + currentEnv *vm.EVM + isCreate bool + codesAccessed map[common.Hash]bool + + evmUsage uint64 + stateUsage uint64 + bytecodeUsage uint64 + sigCount uint64 + ecAddCount uint64 + ecMulCount uint64 + ecPairingCount uint64 + copyUsage uint64 + sha256Usage uint64 + expUsage uint64 + modExpUsage uint64 + keccakUsage uint64 + + l2TxnsRlpSize uint64 +} + +func NewLogger() *Logger { + const miscKeccakUsage = 50_000 // heuristically selected safe number to account for Rust side implementation details + const miscBytecodeUsage = 50_000 // to account for the inaccuracies in bytecode tracking + return &Logger{ + codesAccessed: make(map[common.Hash]bool), + keccakUsage: miscKeccakUsage, + bytecodeUsage: miscBytecodeUsage, + } +} + +// Snapshot creates an independent copy of the logger +func (l *Logger) Snapshot() *Logger { + newL := *l + newL.codesAccessed = maps.Clone(newL.codesAccessed) + newL.currentEnv = nil + return &newL +} + +// logBytecodeAccess logs access to the bytecode identified by the given code hash +func (l *Logger) logBytecodeAccess(codeHash common.Hash, codeSize uint64) { + if codeHash != (common.Hash{}) && !l.codesAccessed[codeHash] { + l.bytecodeUsage += codeSize + 1 + l.codesAccessed[codeHash] = true + } +} + +// logBytecodeAccessAt logs access to the bytecode at the given addr +func (l *Logger) logBytecodeAccessAt(addr common.Address) { + codeHash := l.currentEnv.StateDB.GetKeccakCodeHash(addr) + l.logBytecodeAccess(codeHash, l.currentEnv.StateDB.GetCodeSize(addr)) +} + +// logRawBytecode logs access to a raw byte code +// useful for CREATE/CREATE2 flows +func (l *Logger) logRawBytecode(code []byte) { + l.logBytecodeAccess(crypto.Keccak256Hash(code), uint64(len(code))) +} + +// computeKeccakRows computes the number of rows used in keccak256 for the given bytes array length +func computeKeccakRows(length uint64) uint64 { + return ((length + 135) / 136) * keccakRowsPerChunk +} + +// logPrecompileAccess checks if the invoked address is a precompile and increments +// resource usage of associated subcircuit +func (l *Logger) logPrecompileAccess(to common.Address, inputLen uint64, inputFn func(int64, int64) ([]byte, error)) { + l.logCopy(inputLen) + var outputLen uint64 + switch to { + case common.BytesToAddress([]byte{1}): // &ecrecover{}, + l.sigCount++ + l.keccakUsage += computeKeccakRows(64) + outputLen = 32 + case common.BytesToAddress([]byte{2}): // &sha256hash{}, + l.logSha256(inputLen) + outputLen = 32 + case common.BytesToAddress([]byte{3}): // &ripemd160hashDisabled{}, + case common.BytesToAddress([]byte{4}): // &dataCopy{}, + outputLen = inputLen + case common.BytesToAddress([]byte{5}): // &bigModExp{eip2565: true}, + const rowsPerModExpCall = 39962 + l.modExpUsage += rowsPerModExpCall + if inputLen >= 96 { + input, err := inputFn(64, 32) + if err == nil { + outputLen = new(big.Int).SetBytes(input).Uint64() // mSize + } + } + case common.BytesToAddress([]byte{6}): // &bn256AddIstanbul{}, + l.ecAddCount++ + outputLen = 64 + case common.BytesToAddress([]byte{7}): // &bn256ScalarMulIstanbul{}, + l.ecMulCount++ + outputLen = 64 + case common.BytesToAddress([]byte{8}): // &bn256PairingIstanbul{}, + l.ecPairingCount++ + outputLen = 32 + case common.BytesToAddress([]byte{9}): // &blake2FDisabled{}, + } + l.logCopy(2 * outputLen) +} + +// logCall logs call to a given address, regardless of the address being a precompile or not +func (l *Logger) logCall(to common.Address, inputLen uint64, inputFn func(int64, int64) ([]byte, error)) { + l.logBytecodeAccessAt(to) + l.logPrecompileAccess(to, inputLen, inputFn) +} + +func (l *Logger) logCopy(len uint64) { + l.copyUsage += len * 2 +} + +func (l *Logger) logSha256(inputLen uint64) { + const blockRows = 2114 + const blockSizeInBytes = 64 + const minPaddingBytes = 9 + + numBlocks := (inputLen + minPaddingBytes + blockSizeInBytes - 1) / blockSizeInBytes + l.sha256Usage += numBlocks * blockRows +} + +func (l *Logger) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + l.currentEnv = env + l.isCreate = create + if !l.isCreate { + l.logCall(to, uint64(len(input)), func(argOffset, argLen int64) ([]byte, error) { + return input[argOffset : argOffset+argLen], nil + }) + } else { + l.logRawBytecode(input) // init bytecode + } + + if !env.TxContext.IsL1MessageTx { + l.sigCount++ + l.l2TxnsRlpSize += uint64(env.TxContext.TxSize) + } + l.keccakUsage += computeKeccakRows(uint64(env.TxContext.TxSize)) + l.keccakUsage += computeKeccakRows(64) // ecrecover per txn +} + +func (l *Logger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if err != nil { + return + } + + l.evmUsage += evmUsagePerOpCode[op] + l.stateUsage += stateUsagePerOpCode[op](scope, depth) + + getInputFn := func(inputOffset int64) func(int64, int64) ([]byte, error) { + return func(argOffset, argLen int64) ([]byte, error) { + input, err := tracers.GetMemoryCopyPadded(scope.Memory, inputOffset+argOffset, argLen) + if err != nil { + log.Warn("failed to read call input", "err", err) + } + return input, err + } + } + + switch op { + case vm.EXTCODECOPY: + l.logBytecodeAccessAt(scope.Stack.Back(0).Bytes20()) + l.logCopy(scope.Stack.Back(3).Uint64()) + case vm.CALLDATACOPY, vm.RETURNDATACOPY, vm.CODECOPY, vm.MCOPY, vm.CREATE, vm.CREATE2: + l.logCopy(scope.Stack.Back(2).Uint64()) + case vm.SHA3: + l.keccakUsage += computeKeccakRows(scope.Stack.Back(1).Uint64()) + l.logCopy(scope.Stack.Back(1).Uint64()) + case vm.LOG0, vm.LOG1, vm.LOG2, vm.LOG3, vm.LOG4, vm.RETURN, vm.REVERT: + l.logCopy(scope.Stack.Back(1).Uint64()) + case vm.DELEGATECALL, vm.STATICCALL: + inputOffset := int64(scope.Stack.Back(2).Uint64()) + inputLen := int64(scope.Stack.Back(3).Uint64()) + l.logCall(scope.Stack.Back(1).Bytes20(), uint64(inputLen), getInputFn(inputOffset)) + case vm.CALL, vm.CALLCODE: + inputOffset := int64(scope.Stack.Back(3).Uint64()) + inputLen := int64(scope.Stack.Back(4).Uint64()) + l.logCall(scope.Stack.Back(1).Bytes20(), uint64(inputLen), getInputFn(inputOffset)) + case vm.EXP: + const rowsPerExpCall = 8 + l.expUsage += rowsPerExpCall + } +} + +func (l *Logger) CaptureStateAfter(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if err != nil { + return + } + + switch op { + case vm.CREATE, vm.CREATE2: + l.logBytecodeAccessAt(scope.Stack.Back(0).Bytes20()) // deployed bytecode + } +} + +func (l *Logger) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + switch typ { + case vm.CREATE, vm.CREATE2: + l.logRawBytecode(input) // init bytecode + } +} + +func (l *Logger) CaptureExit(output []byte, gasUsed uint64, err error) { + +} + +func (l *Logger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + +} + +func (l *Logger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { + if l.isCreate && err != nil { + l.logRawBytecode(output) // deployed bytecode + } +} + +// Error returns an error if executed txns triggered an overflow +// Caller should revert some transactions and close the block +func (l *Logger) Error() error { + if l.RowConsumption().IsOverflown() { + return ErrBlockRowConsumptionOverflow + } + return nil +} + +// RowConsumption returns the accumulated resource utilization for each subcircuit so far +func (l *Logger) RowConsumption() types.RowConsumption { + return types.RowConsumption{ + { + Name: "evm", + RowNumber: l.evmUsage, + }, { + Name: "state", + RowNumber: l.stateUsage, + }, { + Name: "bytecode", + RowNumber: l.bytecodeUsage, + }, { + Name: "sig", + RowNumber: uint64(rowUsageMax * (float64(l.sigCount) / sigCountMax)), + }, { + Name: "ecc", + RowNumber: max( + // multiply with types.RowConsumptionLimit here, confidence factor is 1.0 on rust side + uint64(types.RowConsumptionLimit*(float64(l.ecAddCount)/ecAddCountMax)), + uint64(types.RowConsumptionLimit*(float64(l.ecMulCount)/ecMulCountMax)), + uint64(types.RowConsumptionLimit*(float64(l.ecPairingCount)/ecPairingCountMax)), + ), + }, { + Name: "copy", + RowNumber: l.copyUsage, + }, { + Name: "sha256", + RowNumber: l.sha256Usage, + }, { + Name: "exp", + RowNumber: l.expUsage, + }, { + Name: "mod_exp", + RowNumber: l.modExpUsage, + }, { + Name: "keccak", + RowNumber: l.keccakUsage + computeKeccakRows(l.l2TxnsRlpSize), + }, + } +} + +// ForceError makes sure to trigger an error on the next step, should only be used in tests +func (l *Logger) ForceError() { + l.evmUsage += types.RowConsumptionLimit + 1 + l.stateUsage += types.RowConsumptionLimit + 1 + l.bytecodeUsage += types.RowConsumptionLimit + 1 +} + +// evm circuit resource usage per OpCode +var evmUsagePerOpCode = [256]uint64{ + 2, // STOP (0) + 3, // ADD (1) + 4, // MUL (2) + 3, // SUB (3) + 4, // DIV (4) + 10, // SDIV (5) + 4, // MOD (6) + 10, // SMOD (7) + 9, // ADDMOD (8) + 10, // MULMOD (9) + 3, // EXP (10) + 2, // SIGNEXTEND (11) + 0, // UNDEFINED (12) + 0, // UNDEFINED (13) + 0, // UNDEFINED (14) + 0, // UNDEFINED (15) + 3, // LT (16) + 3, // GT (17) + 3, // SLT (18) + 3, // SGT (19) + 3, // EQ (20) + 1, // ISZERO (21) + 4, // AND (22) + 4, // OR (23) + 4, // XOR (24) + 4, // NOT (25) + 2, // BYTE (26) + 5, // SHL (27) + 5, // SHR (28) + 5, // SAR (29) + 0, // UNDEFINED (30) + 0, // UNDEFINED (31) + 2, // SHA3 (32) + 0, // UNDEFINED (33) + 0, // UNDEFINED (34) + 0, // UNDEFINED (35) + 0, // UNDEFINED (36) + 0, // UNDEFINED (37) + 0, // UNDEFINED (38) + 0, // UNDEFINED (39) + 0, // UNDEFINED (40) + 0, // UNDEFINED (41) + 0, // UNDEFINED (42) + 0, // UNDEFINED (43) + 0, // UNDEFINED (44) + 0, // UNDEFINED (45) + 0, // UNDEFINED (46) + 0, // UNDEFINED (47) + 1, // ADDRESS (48) + 2, // BALANCE (49) + 1, // ORIGIN (50) + 1, // CALLER (51) + 1, // CALLVALUE (52) + 8, // CALLDATALOAD (53) + 1, // CALLDATASIZE (54) + 2, // CALLDATACOPY (55) + 2, // CODESIZE (56) + 2, // CODECOPY (57) + 1, // GASPRICE (58) + 2, // EXTCODESIZE (59) + 3, // EXTCODECOPY (60) + 1, // RETURNDATASIZE (61) + 4, // RETURNDATACOPY (62) + 1, // EXTCODEHASH (63) + 3, // BLOCKHASH (64) + 1, // COINBASE (65) + 1, // TIMESTAMP (66) + 1, // NUMBER (67) + 1, // DIFFICULTY (68) + 1, // GASLIMIT (69) + 1, // CHAINID (70) + 1, // SELFBALANCE (71) + 1, // BASEFEE (72) + 0, // UNDEFINED (73) + 0, // UNDEFINED (74) + 0, // UNDEFINED (75) + 0, // UNDEFINED (76) + 0, // UNDEFINED (77) + 0, // UNDEFINED (78) + 0, // UNDEFINED (79) + 1, // POP (80) + 5, // MLOAD (81) + 5, // MSTORE (82) + 5, // MSTORE8 (83) + 2, // SLOAD (84) + 3, // SSTORE (85) + 2, // JUMP (86) + 2, // JUMPI (87) + 1, // PC (88) + 1, // MSIZE (89) + 1, // GAS (90) + 1, // JUMPDEST (91) + 2, // TLOAD (92) + 3, // TSTORE (93) + 2, // MCOPY (94) + 1, // PUSH0 (95) + 1, // PUSH1 (96) + 1, // PUSH2 (97) + 1, // PUSH3 (98) + 1, // PUSH4 (99) + 1, // PUSH5 (100) + 1, // PUSH6 (101) + 1, // PUSH7 (102) + 1, // PUSH8 (103) + 1, // PUSH9 (104) + 1, // PUSH10 (105) + 1, // PUSH11 (106) + 1, // PUSH12 (107) + 1, // PUSH13 (108) + 1, // PUSH14 (109) + 1, // PUSH15 (110) + 1, // PUSH16 (111) + 1, // PUSH17 (112) + 1, // PUSH18 (113) + 1, // PUSH19 (114) + 1, // PUSH20 (115) + 1, // PUSH21 (116) + 1, // PUSH22 (117) + 1, // PUSH23 (118) + 1, // PUSH24 (119) + 1, // PUSH25 (120) + 1, // PUSH26 (121) + 1, // PUSH27 (122) + 1, // PUSH28 (123) + 1, // PUSH29 (124) + 1, // PUSH30 (125) + 1, // PUSH31 (126) + 1, // PUSH32 (127) + 1, // DUP1 (128) + 1, // DUP2 (129) + 1, // DUP3 (130) + 1, // DUP4 (131) + 1, // DUP5 (132) + 1, // DUP6 (133) + 1, // DUP7 (134) + 1, // DUP8 (135) + 1, // DUP9 (136) + 1, // DUP10 (137) + 1, // DUP11 (138) + 1, // DUP12 (139) + 1, // DUP13 (140) + 1, // DUP14 (141) + 1, // DUP15 (142) + 1, // DUP16 (143) + 1, // SWAP1 (144) + 1, // SWAP2 (145) + 1, // SWAP3 (146) + 1, // SWAP4 (147) + 1, // SWAP5 (148) + 1, // SWAP6 (149) + 1, // SWAP7 (150) + 1, // SWAP8 (151) + 1, // SWAP9 (152) + 1, // SWAP10 (153) + 1, // SWAP11 (154) + 1, // SWAP12 (155) + 1, // SWAP13 (156) + 1, // SWAP14 (157) + 1, // SWAP15 (158) + 1, // SWAP16 (159) + 2, // LOG0 (160) + 2, // LOG1 (161) + 2, // LOG2 (162) + 2, // LOG3 (163) + 2, // LOG4 (164) + 0, // UNDEFINED (165) + 0, // UNDEFINED (166) + 0, // UNDEFINED (167) + 0, // UNDEFINED (168) + 0, // UNDEFINED (169) + 0, // UNDEFINED (170) + 0, // UNDEFINED (171) + 0, // UNDEFINED (172) + 0, // UNDEFINED (173) + 0, // UNDEFINED (174) + 0, // UNDEFINED (175) + 0, // UNDEFINED (176) + 0, // UNDEFINED (177) + 0, // UNDEFINED (178) + 0, // UNDEFINED (179) + 0, // UNDEFINED (180) + 0, // UNDEFINED (181) + 0, // UNDEFINED (182) + 0, // UNDEFINED (183) + 0, // UNDEFINED (184) + 0, // UNDEFINED (185) + 0, // UNDEFINED (186) + 0, // UNDEFINED (187) + 0, // UNDEFINED (188) + 0, // UNDEFINED (189) + 0, // UNDEFINED (190) + 0, // UNDEFINED (191) + 0, // UNDEFINED (192) + 0, // UNDEFINED (193) + 0, // UNDEFINED (194) + 0, // UNDEFINED (195) + 0, // UNDEFINED (196) + 0, // UNDEFINED (197) + 0, // UNDEFINED (198) + 0, // UNDEFINED (199) + 0, // UNDEFINED (200) + 0, // UNDEFINED (201) + 0, // UNDEFINED (202) + 0, // UNDEFINED (203) + 0, // UNDEFINED (204) + 0, // UNDEFINED (205) + 0, // UNDEFINED (206) + 0, // UNDEFINED (207) + 0, // UNDEFINED (208) + 0, // UNDEFINED (209) + 0, // UNDEFINED (210) + 0, // UNDEFINED (211) + 0, // UNDEFINED (212) + 0, // UNDEFINED (213) + 0, // UNDEFINED (214) + 0, // UNDEFINED (215) + 0, // UNDEFINED (216) + 0, // UNDEFINED (217) + 0, // UNDEFINED (218) + 0, // UNDEFINED (219) + 0, // UNDEFINED (220) + 0, // UNDEFINED (221) + 0, // UNDEFINED (222) + 0, // UNDEFINED (223) + 0, // UNDEFINED (224) + 0, // UNDEFINED (225) + 0, // UNDEFINED (226) + 0, // UNDEFINED (227) + 0, // UNDEFINED (228) + 0, // UNDEFINED (229) + 0, // UNDEFINED (230) + 0, // UNDEFINED (231) + 0, // UNDEFINED (232) + 0, // UNDEFINED (233) + 0, // UNDEFINED (234) + 0, // UNDEFINED (235) + 0, // UNDEFINED (236) + 0, // UNDEFINED (237) + 0, // UNDEFINED (238) + 0, // UNDEFINED (239) + 9, // CREATE (240) + 12, // CALL (241) + 12, // CALLCODE (242) + 4, // RETURN (243) + 12, // DELEGATECALL (244) + 9, // CREATE2 (245) + 0, // UNDEFINED (246) + 0, // UNDEFINED (247) + 0, // UNDEFINED (248) + 0, // UNDEFINED (249) + 12, // STATICCALL (250) + 0, // UNDEFINED (251) + 0, // UNDEFINED (252) + 4, // REVERT (253) + 0, // INVALID (254) + 0, // SELFDESTRUCT (255) +} + +func constantStateUsage(usage uint64) func(*vm.ScopeContext, int) uint64 { + return func(_ *vm.ScopeContext, _ int) uint64 { + return usage + } +} + +func logStateUsage(size uint64) func(*vm.ScopeContext, int) uint64 { + return func(scope *vm.ScopeContext, _ int) uint64 { + return 2*(scope.Stack.Back(1).Uint64()/32) + 7 + 2*size + } +} + +// state circuit resource usage per OpCode +var stateUsagePerOpCode = [256]func(*vm.ScopeContext, int) uint64{ + constantStateUsage(13), // STOP (0) + constantStateUsage(3), // ADD (1) + constantStateUsage(3), // MUL (2) + constantStateUsage(3), // SUB (3) + constantStateUsage(3), // DIV (4) + constantStateUsage(3), // SDIV (5) + constantStateUsage(3), // MOD (6) + constantStateUsage(3), // SMOD (7) + constantStateUsage(4), // ADDMOD (8) + constantStateUsage(4), // MULMOD (9) + constantStateUsage(3), // EXP (10) + constantStateUsage(3), // SIGNEXTEND (11) + constantStateUsage(0), // UNDEFINED (12) + constantStateUsage(0), // UNDEFINED (13) + constantStateUsage(0), // UNDEFINED (14) + constantStateUsage(0), // UNDEFINED (15) + constantStateUsage(3), // LT (16) + constantStateUsage(3), // GT (17) + constantStateUsage(3), // SLT (18) + constantStateUsage(3), // SGT (19) + constantStateUsage(3), // EQ (20) + constantStateUsage(2), // ISZERO (21) + constantStateUsage(3), // AND (22) + constantStateUsage(3), // OR (23) + constantStateUsage(3), // XOR (24) + constantStateUsage(2), // NOT (25) + constantStateUsage(3), // BYTE (26) + constantStateUsage(3), // SHL (27) + constantStateUsage(3), // SHR (28) + constantStateUsage(3), // SAR (29) + constantStateUsage(0), // UNDEFINED (30) + constantStateUsage(0), // UNDEFINED (31) + func(scope *vm.ScopeContext, _ int) uint64 { + // let n = # bytes, then row_consumption = (n/32) + 3 + return scope.Stack.Back(1).Uint64()/32 + 3 + }, // SHA3 (32) + constantStateUsage(0), // UNDEFINED (33) + constantStateUsage(0), // UNDEFINED (34) + constantStateUsage(0), // UNDEFINED (35) + constantStateUsage(0), // UNDEFINED (36) + constantStateUsage(0), // UNDEFINED (37) + constantStateUsage(0), // UNDEFINED (38) + constantStateUsage(0), // UNDEFINED (39) + constantStateUsage(0), // UNDEFINED (40) + constantStateUsage(0), // UNDEFINED (41) + constantStateUsage(0), // UNDEFINED (42) + constantStateUsage(0), // UNDEFINED (43) + constantStateUsage(0), // UNDEFINED (44) + constantStateUsage(0), // UNDEFINED (45) + constantStateUsage(0), // UNDEFINED (46) + constantStateUsage(0), // UNDEFINED (47) + constantStateUsage(2), // ADDRESS (48) + constantStateUsage(7), // BALANCE (49) + constantStateUsage(2), // ORIGIN (50) + constantStateUsage(2), // CALLER (51) + constantStateUsage(2), // CALLVALUE (52) + constantStateUsage(7), // CALLDATALOAD (53) + constantStateUsage(2), // CALLDATASIZE (54) + func(scope *vm.ScopeContext, depth int) uint64 { + // let n = # bytes in calldata, then row_consumption = (n/32)*2 + (is_root? 5 : 6) + constant := uint64(5) + if depth != 0 { + constant = 6 + } + return 2*(scope.Stack.Back(2).Uint64()/32) + constant + }, // CALLDATACOPY (55) + constantStateUsage(1), // CODESIZE (56) + func(scope *vm.ScopeContext, _ int) uint64 { + // let n = # bytes in code, then row_consumption = (n/32) + 3 + return scope.Stack.Back(2).Uint64()/32 + 3 + }, // CODECOPY (57) + constantStateUsage(2), // GASPRICE (58) + constantStateUsage(7), // EXTCODESIZE (59) + func(scope *vm.ScopeContext, _ int) uint64 { + // let n = # bytes in code, then row_consumption = (n/32) + 9 + return scope.Stack.Back(3).Uint64()/32 + 3 + }, // EXTCODECOPY (60) + constantStateUsage(2), // RETURNDATASIZE (61) + func(scope *vm.ScopeContext, _ int) uint64 { + // let n = # of bytes to return, then row_consumption = (n/32)*2 + 6 + return 2*(scope.Stack.Back(2).Uint64()/32) + 6 + }, // RETURNDATACOPY (62) + constantStateUsage(7), // EXTCODEHASH (63) + constantStateUsage(2), // BLOCKHASH (64) + constantStateUsage(1), // COINBASE (65) + constantStateUsage(1), // TIMESTAMP (66) + constantStateUsage(1), // NUMBER (67) + constantStateUsage(1), // DIFFICULTY (68) + constantStateUsage(1), // GASLIMIT (69) + constantStateUsage(1), // CHAINID (70) + constantStateUsage(3), // SELFBALANCE (71) + constantStateUsage(1), // BASEFEE (72) + constantStateUsage(0), // UNDEFINED (73) + constantStateUsage(0), // UNDEFINED (74) + constantStateUsage(0), // UNDEFINED (75) + constantStateUsage(0), // UNDEFINED (76) + constantStateUsage(0), // UNDEFINED (77) + constantStateUsage(0), // UNDEFINED (78) + constantStateUsage(0), // UNDEFINED (79) + constantStateUsage(1), // POP (80) + constantStateUsage(4), // MLOAD (81) + constantStateUsage(4), // MSTORE (82) + constantStateUsage(3), // MSTORE8 (83) + constantStateUsage(9), // SLOAD (84) + constantStateUsage(11), // SSTORE (85) + constantStateUsage(1), // JUMP (86) + constantStateUsage(2), // JUMPI (87) + constantStateUsage(1), // PC (88) + constantStateUsage(1), // MSIZE (89) + constantStateUsage(1), // GAS (90) + constantStateUsage(0), // JUMPDEST (91) + constantStateUsage(5), // TLOAD (92) + constantStateUsage(8), // TSTORE (93) + constantStateUsage(7), // MCOPY (94) + constantStateUsage(1), // PUSH0 (95) + constantStateUsage(1), // PUSH1 (96) + constantStateUsage(1), // PUSH2 (97) + constantStateUsage(1), // PUSH3 (98) + constantStateUsage(1), // PUSH4 (99) + constantStateUsage(1), // PUSH5 (100) + constantStateUsage(1), // PUSH6 (101) + constantStateUsage(1), // PUSH7 (102) + constantStateUsage(1), // PUSH8 (103) + constantStateUsage(1), // PUSH9 (104) + constantStateUsage(1), // PUSH10 (105) + constantStateUsage(1), // PUSH11 (106) + constantStateUsage(1), // PUSH12 (107) + constantStateUsage(1), // PUSH13 (108) + constantStateUsage(1), // PUSH14 (109) + constantStateUsage(1), // PUSH15 (110) + constantStateUsage(1), // PUSH16 (111) + constantStateUsage(1), // PUSH17 (112) + constantStateUsage(1), // PUSH18 (113) + constantStateUsage(1), // PUSH19 (114) + constantStateUsage(1), // PUSH20 (115) + constantStateUsage(1), // PUSH21 (116) + constantStateUsage(1), // PUSH22 (117) + constantStateUsage(1), // PUSH23 (118) + constantStateUsage(1), // PUSH24 (119) + constantStateUsage(1), // PUSH25 (120) + constantStateUsage(1), // PUSH26 (121) + constantStateUsage(1), // PUSH27 (122) + constantStateUsage(1), // PUSH28 (123) + constantStateUsage(1), // PUSH29 (124) + constantStateUsage(1), // PUSH30 (125) + constantStateUsage(1), // PUSH31 (126) + constantStateUsage(1), // PUSH32 (127) + constantStateUsage(2), // DUP1 (128) + constantStateUsage(2), // DUP2 (129) + constantStateUsage(2), // DUP3 (130) + constantStateUsage(2), // DUP4 (131) + constantStateUsage(2), // DUP5 (132) + constantStateUsage(2), // DUP6 (133) + constantStateUsage(2), // DUP7 (134) + constantStateUsage(2), // DUP8 (135) + constantStateUsage(2), // DUP9 (136) + constantStateUsage(2), // DUP10 (137) + constantStateUsage(2), // DUP11 (138) + constantStateUsage(2), // DUP12 (139) + constantStateUsage(2), // DUP13 (140) + constantStateUsage(2), // DUP14 (141) + constantStateUsage(2), // DUP15 (142) + constantStateUsage(2), // DUP16 (143) + constantStateUsage(4), // SWAP1 (144) + constantStateUsage(4), // SWAP2 (145) + constantStateUsage(4), // SWAP3 (146) + constantStateUsage(4), // SWAP4 (147) + constantStateUsage(4), // SWAP5 (148) + constantStateUsage(4), // SWAP6 (149) + constantStateUsage(4), // SWAP7 (150) + constantStateUsage(4), // SWAP8 (151) + constantStateUsage(4), // SWAP9 (152) + constantStateUsage(4), // SWAP10 (153) + constantStateUsage(4), // SWAP11 (154) + constantStateUsage(4), // SWAP12 (155) + constantStateUsage(4), // SWAP13 (156) + constantStateUsage(4), // SWAP14 (157) + constantStateUsage(4), // SWAP15 (158) + constantStateUsage(4), // SWAP16 (159) + logStateUsage(0), // LOG0 (160) + logStateUsage(1), // LOG1 (161) + logStateUsage(2), // LOG2 (162) + logStateUsage(3), // LOG3 (163) + logStateUsage(4), // LOG4 (164) + constantStateUsage(0), // UNDEFINED (165) + constantStateUsage(0), // UNDEFINED (166) + constantStateUsage(0), // UNDEFINED (167) + constantStateUsage(0), // UNDEFINED (168) + constantStateUsage(0), // UNDEFINED (169) + constantStateUsage(0), // UNDEFINED (170) + constantStateUsage(0), // UNDEFINED (171) + constantStateUsage(0), // UNDEFINED (172) + constantStateUsage(0), // UNDEFINED (173) + constantStateUsage(0), // UNDEFINED (174) + constantStateUsage(0), // UNDEFINED (175) + constantStateUsage(0), // UNDEFINED (176) + constantStateUsage(0), // UNDEFINED (177) + constantStateUsage(0), // UNDEFINED (178) + constantStateUsage(0), // UNDEFINED (179) + constantStateUsage(0), // UNDEFINED (180) + constantStateUsage(0), // UNDEFINED (181) + constantStateUsage(0), // UNDEFINED (182) + constantStateUsage(0), // UNDEFINED (183) + constantStateUsage(0), // UNDEFINED (184) + constantStateUsage(0), // UNDEFINED (185) + constantStateUsage(0), // UNDEFINED (186) + constantStateUsage(0), // UNDEFINED (187) + constantStateUsage(0), // UNDEFINED (188) + constantStateUsage(0), // UNDEFINED (189) + constantStateUsage(0), // UNDEFINED (190) + constantStateUsage(0), // UNDEFINED (191) + constantStateUsage(0), // UNDEFINED (192) + constantStateUsage(0), // UNDEFINED (193) + constantStateUsage(0), // UNDEFINED (194) + constantStateUsage(0), // UNDEFINED (195) + constantStateUsage(0), // UNDEFINED (196) + constantStateUsage(0), // UNDEFINED (197) + constantStateUsage(0), // UNDEFINED (198) + constantStateUsage(0), // UNDEFINED (199) + constantStateUsage(0), // UNDEFINED (200) + constantStateUsage(0), // UNDEFINED (201) + constantStateUsage(0), // UNDEFINED (202) + constantStateUsage(0), // UNDEFINED (203) + constantStateUsage(0), // UNDEFINED (204) + constantStateUsage(0), // UNDEFINED (205) + constantStateUsage(0), // UNDEFINED (206) + constantStateUsage(0), // UNDEFINED (207) + constantStateUsage(0), // UNDEFINED (208) + constantStateUsage(0), // UNDEFINED (209) + constantStateUsage(0), // UNDEFINED (210) + constantStateUsage(0), // UNDEFINED (211) + constantStateUsage(0), // UNDEFINED (212) + constantStateUsage(0), // UNDEFINED (213) + constantStateUsage(0), // UNDEFINED (214) + constantStateUsage(0), // UNDEFINED (215) + constantStateUsage(0), // UNDEFINED (216) + constantStateUsage(0), // UNDEFINED (217) + constantStateUsage(0), // UNDEFINED (218) + constantStateUsage(0), // UNDEFINED (219) + constantStateUsage(0), // UNDEFINED (220) + constantStateUsage(0), // UNDEFINED (221) + constantStateUsage(0), // UNDEFINED (222) + constantStateUsage(0), // UNDEFINED (223) + constantStateUsage(0), // UNDEFINED (224) + constantStateUsage(0), // UNDEFINED (225) + constantStateUsage(0), // UNDEFINED (226) + constantStateUsage(0), // UNDEFINED (227) + constantStateUsage(0), // UNDEFINED (228) + constantStateUsage(0), // UNDEFINED (229) + constantStateUsage(0), // UNDEFINED (230) + constantStateUsage(0), // UNDEFINED (231) + constantStateUsage(0), // UNDEFINED (232) + constantStateUsage(0), // UNDEFINED (233) + constantStateUsage(0), // UNDEFINED (234) + constantStateUsage(0), // UNDEFINED (235) + constantStateUsage(0), // UNDEFINED (236) + constantStateUsage(0), // UNDEFINED (237) + constantStateUsage(0), // UNDEFINED (238) + constantStateUsage(0), // UNDEFINED (239) + constantStateUsage(42), // CREATE (240) + constantStateUsage(26), // CALL (241) + constantStateUsage(22), // CALLCODE (242) + constantStateUsage(273), // RETURN (243) + constantStateUsage(23), // DELEGATECALL (244) + constantStateUsage(43), // CREATE2 (245) + constantStateUsage(0), // UNDEFINED (246) + constantStateUsage(0), // UNDEFINED (247) + constantStateUsage(0), // UNDEFINED (248) + constantStateUsage(0), // UNDEFINED (249) + constantStateUsage(21), // STATICCALL (250) + constantStateUsage(0), // UNDEFINED (251) + constantStateUsage(0), // UNDEFINED (252) + constantStateUsage(274), // REVERT (253) + constantStateUsage(0), // INVALID (254) + constantStateUsage(0), // SELFDESTRUCT (255) +}