diff --git a/DEV_CHAIN.md b/DEV_CHAIN.md index d42159f2b64..b77ece8e456 100644 --- a/DEV_CHAIN.md +++ b/DEV_CHAIN.md @@ -24,6 +24,10 @@ On the terminal you can type the following command to start node1. ```bash ./erigon --datadir=dev --chain=dev --private.api.addr=localhost:9090 --mine +``` +Or, you could start the rpcdaemon internally together +```bash +./erigon --datadir=dev --chain=dev --private.api.addr=localhost:9090 --mine --http.api=eth,erigon,web3,net,debug,trace,txpool,parity,admin --http.corsdomain="*" ``` Argument notes: @@ -32,6 +36,7 @@ On the terminal you can type the following command to start node1. * private.api.addr=localhost:9090 : Tells where Eigon is going to listen for connections. * mine : Add this if you want the node to mine. * dev.period : Add this to specify the timing interval among blocks. Number of seconds MUST be > 0 (if you want empty blocks) otherwise the default value 0 does not allow mining of empty blocks. + * http.api: List of services to start on http (rpc) access The result will be something like this: @@ -62,11 +67,18 @@ To tell Node 2 where Node 1 is we will use the Enode info of Node 1 we saved bef Open terminal 3 and navigate to erigon/build/bin folder. Paste in the following command the Enode info and run it, be careful to remove the last part ?discport=0. +The node info of the first peer can also be obtained with an admin RPC call +```bash + curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc": "2.0", "method": "admin_nodeInfo", "params": [], "id":83}' localhost:8545 + ``` + ```bash ./erigon --datadir=dev2 --chain=dev --private.api.addr=localhost:9091 \ --staticpeers="enode://d30d079163d7b69fcb261c0538c0c3faba4fb4429652970e60fa25deb02a789b4811e98b468726ba0be63b9dc925a019f433177eb6b45c23bb78892f786d8f7a@127.0.0.1:53171" \ --nodiscover ``` + +You might face a conflict with ports if you run it on the same machine. To specify different ports use, for instance ``--torrent.port 42079``, you might consider specifying all the other flags too: ``--port --http.port --authrpc.port `` To check if the nodes are connected, you can go to the log of both nodes and look for the line diff --git a/Makefile b/Makefile index 159fd90ee20..e7819620718 100644 --- a/Makefile +++ b/Makefile @@ -180,7 +180,6 @@ clean: ## devtools: installs dev tools (and checks for npm installation etc.) devtools: # Notice! If you adding new binary - add it also to cmd/hack/binary-deps/main.go file - $(GOBUILD) -o $(GOBIN)/go-bindata github.com/kevinburke/go-bindata/go-bindata $(GOBUILD) -o $(GOBIN)/gencodec github.com/fjl/gencodec $(GOBUILD) -o $(GOBIN)/abigen ./cmd/abigen $(GOBUILD) -o $(GOBIN)/codecgen github.com/ugorji/go/codec/codecgen diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 03455c973e3..933b60842b4 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -26,14 +26,14 @@ import ( "time" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" state2 "github.com/ledgerwatch/erigon-lib/state" types2 "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon/accounts/abi" @@ -42,6 +42,7 @@ import ( "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -49,6 +50,7 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/event" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages" ) @@ -166,11 +168,11 @@ func (b *SimulatedBackend) Rollback() { } func (b *SimulatedBackend) emptyPendingBlock() { - chain, _ := core.GenerateChain(b.m.ChainConfig, b.prependBlock, b.m.Engine, b.m.DB, 1, func(int, *core.BlockGen) {}, false /* intermediateHashes */) - b.pendingBlock = chain.Blocks[0] - b.pendingReceipts = chain.Receipts[0] - b.pendingHeader = chain.Headers[0] - b.gasPool = new(core.GasPool).AddGas(b.pendingHeader.GasLimit).AddDataGas(params.MaxDataGasPerBlock) + blockChain, _ := core.GenerateChain(b.m.ChainConfig, b.prependBlock, b.m.Engine, b.m.DB, 1, func(int, *core.BlockGen) {}) + b.pendingBlock = blockChain.Blocks[0] + b.pendingReceipts = blockChain.Receipts[0] + b.pendingHeader = blockChain.Headers[0] + b.gasPool = new(core.GasPool).AddGas(b.pendingHeader.GasLimit).AddDataGas(chain.MaxDataGasPerBlock) if b.pendingReaderTx != nil { b.pendingReaderTx.Rollback() } @@ -750,7 +752,8 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac &b.pendingHeader.Coinbase, b.gasPool, b.pendingState, state.NewNoopWriter(), b.pendingHeader, tx, - &b.pendingHeader.GasUsed, vm.Config{}); err != nil { + &b.pendingHeader.GasUsed, b.pendingHeader.DataGasUsed, + vm.Config{}); err != nil { return err } //fmt.Printf("==== Start producing block %d\n", (b.prependBlock.NumberU64() + 1)) @@ -759,7 +762,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac block.AddTxWithChain(b.getHeader, b.m.Engine, tx) } block.AddTxWithChain(b.getHeader, b.m.Engine, tx) - }, false /* intermediateHashes */) + }) if err != nil { return err } @@ -804,7 +807,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { block.AddTxWithChain(b.getHeader, b.m.Engine, tx) } block.OffsetTime(int64(adjustment.Seconds())) - }, false /* intermediateHashes */) + }) if err != nil { return err } @@ -832,6 +835,6 @@ func (m callMsg) Data() []byte { return m.CallMsg.Data } func (m callMsg) AccessList() types2.AccessList { return m.CallMsg.AccessList } func (m callMsg) IsFree() bool { return false } -func (m callMsg) DataGas() uint64 { return params.DataGasPerBlob * uint64(len(m.CallMsg.DataHashes)) } +func (m callMsg) DataGas() uint64 { return misc.GetDataGasUsed(len(m.CallMsg.DataHashes)) } func (m callMsg) MaxFeePerDataGas() *uint256.Int { return m.CallMsg.MaxFeePerDataGas } func (m callMsg) DataHashes() []libcommon.Hash { return m.CallMsg.DataHashes } diff --git a/cl/clparams/config.go b/cl/clparams/config.go index 5c4c280a853..d8f3e543925 100644 --- a/cl/clparams/config.go +++ b/cl/clparams/config.go @@ -813,7 +813,8 @@ func gnosisConfig() BeaconChainConfig { cfg.BaseRewardFactor = 25 cfg.SlotsPerEpoch = 16 cfg.EpochsPerSyncCommitteePeriod = 512 - cfg.CapellaForkEpoch = math.MaxUint64 + cfg.CapellaForkEpoch = 648704 + cfg.CapellaForkVersion = 0x03000064 cfg.DenebForkEpoch = math.MaxUint64 cfg.InitializeForkSchedule() return cfg diff --git a/cmd/devnet/README.md b/cmd/devnet/README.md index fe97104cc27..b761abbcfae 100644 --- a/cmd/devnet/README.md +++ b/cmd/devnet/README.md @@ -1,4 +1,130 @@ # Devnet This is an automated tool run on the devnet that simulates p2p connection between nodes and ultimately tests operations on them. -See [DEV_CHAIN](https://github.com/ledgerwatch/erigon/blob/devel/DEV_CHAIN.md) for a manual version. \ No newline at end of file +See [DEV_CHAIN](https://github.com/ledgerwatch/erigon/blob/devel/DEV_CHAIN.md) for a manual version. + +The devnet code performs 3 main functions: + +* It runs a series of internal Erigon nodes which will connect to each other to form an internal P2P network +* It allows for the specification of a series of scenarios which will be run against the nodes on that internal network +* It can optionally run a `support` connection which allows the nodes on the network to be connected to the Erigon diagnostic system + +The specification of both nodes and scenarios for the devenet is done by specifying configuraion objects. These objects are currently build in code using go `structs` but are cabable of being read as configuration. + +## Devnet runtime start-up + +The devnet runs as a single `go` process which can be started with the following arguments: + +| Arg | Required | Default | Description | +| --- | -------- | ------- | ----------- | +| datadir | Y | | The data directory for the devnet contains all the devnet nodes data and logs | +| chain | N | dev | The devnet chain to run currently supported: dev or bor-devnet | +| bor.withoutheimdall | N | false | Bor specific - tells the devnet to run without a heimdall service. With this flag only a single validator is supported on the devnet | +| metrics | N | false | Enable metrics collection and reporting from devnet nodes | +| metrics.node | N | 0 | At the moment only one node on the network can produce metrics. This value specifies index of the node in the cluster to attach to | +| metrics.port | N | 6060 | The network port of the node to connect to for gather ing metrics | +| diagnostics.url | N | | URL of the diagnostics system provided by the support team, include unique session PIN, if this is specified the devnet will start a `support` tunnel and connect to the diagnostics platform to provide metrics from the specified node on the devnet | +| insecure | N | false | Used if `diagnostics.url` is set to allow communication with diagnostics system using self-signed TLS certificates | + +## Network Configuration + +Networks configurations are currently specified in code in `main.go` in the `selectNetwork` function. This contains a series of `structs` with the following structue, for eample: + +```go + return &devnet.Network{ + DataDir: dataDir, + Chain: networkname.DevChainName, + Logger: logger, + BasePrivateApiAddr: "localhost:10090", + BaseRPCAddr: "localhost:8545", + Nodes: []devnet.Node{ + args.Miner{ + Node: args.Node{ + ConsoleVerbosity: "0", + DirVerbosity: "5", + }, + AccountSlots: 200, + }, + args.NonMiner{ + Node: args.Node{ + ConsoleVerbosity: "0", + DirVerbosity: "5", + }, + }, + }, + }, nil +``` + +Base IP's and addresses are iterated for each node in the network - to ensure that when the network starts there are no port clashes as the entire nework operates in a single process, hence shares a common host. Individual nodes will be configured with a default set of command line arguments dependent on type. To see the default arguments per node look at the `args\node.go` file where these are specified as tags on the struct members. + +## Scenario Configuration + +Scenarios are similarly specified in code in `main.go` in the `action` function. This is the initial configration: + +```go + scenarios.Scenario{ + Name: "all", + Steps: []*scenarios.Step{ + {Text: "InitSubscriptions", Args: []any{[]requests.SubMethod{requests.Methods.ETHNewHeads}}}, + {Text: "PingErigonRpc"}, + {Text: "CheckTxPoolContent", Args: []any{0, 0, 0}}, + {Text: "SendTxWithDynamicFee", Args: []any{recipientAddress, services.DevAddress, sendValue}}, + {Text: "AwaitBlocks", Args: []any{2 * time.Second}}, + }, + }) +``` + +Scenarios are created a groups of steps which are created by regestering a `step` handler too see an example of this take a look at the `commands\ping.go` file which adds a ping rpc method (see `PingErigonRpc` above). + +This illustrates the registratio process. The `init` function in the file registers the method with the `scenarios` package - which uses the function name as the default step name. Others can be added with additional string arguments fo the `StepHandler` call where they will treated as regular expressions to be matched when processing scenario steps. + +```go +func init() { + scenarios.MustRegisterStepHandlers( + scenarios.StepHandler(PingErigonRpc), + ) +} +``` +Each step method will be called with a `context.Context` as its initial argument. This context provides access to the underlying devnet - so the sptep handler can use it for processing. + +```go +func PingErigonRpc(ctx context.Context) error { + ... +} +``` +The devnet currently supports the following context methods: + +```go +func Logger(ctx go_context.Context) log.Logger +``` + +Fetch the devnet logger - which can be used for logging step processing. + +```go +func SelectNode(ctx go_context.Context, selector ...interface{}) +``` + +This method selects a node on the network the selector argument can be either an `int` index or an implementation of the `network.NodeSelector` interface. If no selector is specified a either the `current node` will be returned or a node will be selected at random from the network. + +```go +func SelectMiner(ctx go_context.Context, selector ...interface{}) +``` + +This method selects a mining node on the network the selector argument can be either an `int` index or an implementation of the `network.NodeSelector` interface. If no selector is specified a either the `current node` will be returned or a miner will be selected at random from the network. + +```go +func SelectNonMiner(ctx go_context.Context, selector ...interface{}) +``` + +This method selects a non mining node on the network the selector argument can be either an `int` index or an implementation of the `network.NodeSelector` interface. If no selector is specified a either the `current node` will be returned or a non-miner will be selected at random from the network. + +```go +func WithCurrentNode(ctx go_context.Context, selector interface{}) Context +``` +This method sets the `current node` on the network. This can be called to create a context with a fixed node which can be passed to subsequent step functions so that they will operate on a defined network node. + +```go +func CurrentNode(ctx go_context.Context) Node +``` + +This method returns the current node from the network context. diff --git a/cmd/devnet/args/node.go b/cmd/devnet/args/node.go index a7a9defb5b7..b2fa84b12b9 100644 --- a/cmd/devnet/args/node.go +++ b/cmd/devnet/args/node.go @@ -12,29 +12,33 @@ import ( type Node struct { requests.RequestGenerator `arg:"-"` - BuildDir string `arg:"positional" default:"./build/bin/devnet"` - DataDir string `arg:"--datadir" default:"./dev"` - Chain string `arg:"--chain" default:"dev"` - Port int `arg:"--port"` - AllowedPorts string `arg:"--p2p.allowed-ports"` - NAT string `arg:"--nat" default:"none"` - ConsoleVerbosity string `arg:"--log.console.verbosity" default:"0"` - DirVerbosity string `arg:"--log.dir.verbosity"` - LogDirPath string `arg:"--log.dir.path"` - LogDirPrefix string `arg:"--log.dir.prefix"` - P2PProtocol string `arg:"--p2p.protocol" default:"68"` - Downloader string `arg:"--no-downloader" default:"true"` - WS string `arg:"--ws" flag:"" default:"true"` - PrivateApiAddr string `arg:"--private.api.addr" default:"localhost:9090"` - HttpPort int `arg:"--http.port" default:"8545"` - HttpVHosts string `arg:"--http.vhosts"` - AuthRpcPort int `arg:"--authrpc.port" default:"8551"` - AuthRpcVHosts string `arg:"--authrpc.vhosts"` - WSPort int `arg:"-" default:"8546"` // flag not defined - GRPCPort int `arg:"-" default:"8547"` // flag not defined - TCPPort int `arg:"-" default:"8548"` // flag not defined - StaticPeers string `arg:"--staticpeers"` - WithoutHeimdall bool `arg:"--bor.withoutheimdall" flag:"" default:"false"` + BuildDir string `arg:"positional" default:"./build/bin/devnet" json:"builddir"` + DataDir string `arg:"--datadir" default:"./dev" json:"datadir"` + Chain string `arg:"--chain" default:"dev" json:"chain"` + Port int `arg:"--port" json:"port,omitempty"` + AllowedPorts string `arg:"--p2p.allowed-ports" json:"p2p.allowed-ports,omitempty"` + NAT string `arg:"--nat" default:"none" json:"nat"` + ConsoleVerbosity string `arg:"--log.console.verbosity" default:"0" json:"log.console.verbosity"` + DirVerbosity string `arg:"--log.dir.verbosity" json:"log.dir.verbosity,omitempty"` + LogDirPath string `arg:"--log.dir.path" json:"log.dir.path,omitempty"` + LogDirPrefix string `arg:"--log.dir.prefix" json:"log.dir.prefix,omitempty"` + P2PProtocol string `arg:"--p2p.protocol" default:"68" json:"p2p.protocol"` + Snapshots bool `arg:"--snapshots" flag:"" default:"false" json:"snapshots,omitempty"` + Downloader string `arg:"--no-downloader" default:"true" json:"no-downloader"` + WS string `arg:"--ws" flag:"" default:"true" json:"ws"` + PrivateApiAddr string `arg:"--private.api.addr" default:"localhost:9090" json:"private.api.addr"` + HttpPort int `arg:"--http.port" default:"8545" json:"http.port"` + HttpVHosts string `arg:"--http.vhosts" json:"http.vhosts"` + AuthRpcPort int `arg:"--authrpc.port" default:"8551" json:"authrpc.port"` + AuthRpcVHosts string `arg:"--authrpc.vhosts" json:"authrpc.vhosts"` + WSPort int `arg:"-" default:"8546" json:"-"` // flag not defined + GRPCPort int `arg:"-" default:"8547" json:"-"` // flag not defined + TCPPort int `arg:"-" default:"8548" json:"-"` // flag not defined + Metrics bool `arg:"--metrics" flag:"" default:"false" json:"metrics"` + MetricsPort int `arg:"--metrics.port" json:"metrics.port,omitempty"` + MetricsAddr string `arg:"--metrics.addr" json:"metrics.addr,omitempty"` + StaticPeers string `arg:"--staticpeers" json:"staticpeers,omitempty"` + WithoutHeimdall bool `arg:"--bor.withoutheimdall" flag:"" default:"false" json:"bor.withoutheimdall,omitempty"` } func (node *Node) configure(base Node, nodeNumber int) error { @@ -47,6 +51,12 @@ func (node *Node) configure(base Node, nodeNumber int) error { node.StaticPeers = base.StaticPeers + node.Metrics = base.Metrics + node.MetricsPort = base.MetricsPort + node.MetricsAddr = base.MetricsAddr + + node.Snapshots = base.Snapshots + var err error node.PrivateApiAddr, _, err = portFromBase(base.PrivateApiAddr, nodeNumber, 1) @@ -68,12 +78,12 @@ func (node *Node) configure(base Node, nodeNumber int) error { type Miner struct { Node - Mine bool `arg:"--mine" flag:"true"` - DevPeriod int `arg:"--dev.period"` - BorPeriod int `arg:"--bor.period"` - BorMinBlockSize int `arg:"--bor.minblocksize"` - HttpApi string `arg:"--http.api" default:"admin,eth,erigon,web3,net,debug,trace,txpool,parity,ots"` - AccountSlots int `arg:"--txpool.accountslots" default:"16"` + Mine bool `arg:"--mine" flag:"true" json:"mine"` + DevPeriod int `arg:"--dev.period" json:"dev.period"` + BorPeriod int `arg:"--bor.period" json:"bor.period"` + BorMinBlockSize int `arg:"--bor.minblocksize" json:"bor.minblocksize"` + HttpApi string `arg:"--http.api" default:"admin,eth,erigon,web3,net,debug,trace,txpool,parity,ots" json:"http.api"` + AccountSlots int `arg:"--txpool.accountslots" default:"16" json:"txpool.accountslots"` } func (m Miner) Configure(baseNode Node, nodeNumber int) (int, interface{}, error) { @@ -99,9 +109,9 @@ func (n Miner) IsMiner() bool { type NonMiner struct { Node - HttpApi string `arg:"--http.api" default:"admin,eth,debug,net,trace,web3,erigon,txpool"` - TorrentPort string `arg:"--torrent.port" default:"42070"` - NoDiscover string `arg:"--nodiscover" flag:"" default:"true"` + HttpApi string `arg:"--http.api" default:"admin,eth,debug,net,trace,web3,erigon,txpool" json:"http.api"` + TorrentPort string `arg:"--torrent.port" default:"42070" json:"torrent.port"` + NoDiscover string `arg:"--nodiscover" flag:"" default:"true" json:"nodiscover"` } func (n NonMiner) Configure(baseNode Node, nodeNumber int) (int, interface{}, error) { diff --git a/cmd/devnet/devnet/network.go b/cmd/devnet/devnet/network.go index 8a12d602a30..97188f8d816 100644 --- a/cmd/devnet/devnet/network.go +++ b/cmd/devnet/devnet/network.go @@ -20,6 +20,7 @@ import ( erigonapp "github.com/ledgerwatch/erigon/turbo/app" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" ) type Network struct { @@ -28,13 +29,14 @@ type Network struct { Logger log.Logger BasePrivateApiAddr string BaseRPCAddr string + Snapshots bool Nodes []Node wg sync.WaitGroup peers []string } // Start starts the process for two erigon nodes running on the dev chain -func (nw *Network) Start() error { +func (nw *Network) Start(ctx *cli.Context) error { type configurable interface { Configure(baseNode args.Node, nodeNumber int) (int, interface{}, error) @@ -57,11 +59,23 @@ func (nw *Network) Start() error { Chain: nw.Chain, HttpPort: apiPortNo, PrivateApiAddr: nw.BasePrivateApiAddr, + Snapshots: nw.Snapshots, } + metricsEnabled := ctx.Bool("metrics") + metricsNode := ctx.Int("metrics.node") + for i, node := range nw.Nodes { if configurable, ok := node.(configurable); ok { - nodePort, args, err := configurable.Configure(baseNode, i) + + base := baseNode + + if metricsEnabled && metricsNode == i { + base.Metrics = true + base.MetricsPort = ctx.Int("metrics.port") + } + + nodePort, args, err := configurable.Configure(base, i) if err == nil { node, err = nw.startNode(fmt.Sprintf("http://%s:%d", apiHost, nodePort), args, i) @@ -76,14 +90,23 @@ func (nw *Network) Start() error { // get the enode of the node // - note this has the side effect of waiting for the node to start - if enode, err := getEnode(node); err == nil { - nw.peers = append(nw.peers, enode) - baseNode.StaticPeers = strings.Join(nw.peers, ",") + enode, err := getEnode(node) - // TODO do we need to call AddPeer to the nodes to make them aware of this one - // the current model only works for an appending node network where the peers gossip - // connections - not sure if this is the case ? + if err != nil { + if errors.Is(err, devnetutils.ErrInvalidEnodeString) { + continue + } + + nw.Stop() + return err } + + nw.peers = append(nw.peers, enode) + baseNode.StaticPeers = strings.Join(nw.peers, ",") + + // TODO do we need to call AddPeer to the nodes to make them aware of this one + // the current model only works for an appending node network where the peers gossip + // connections - not sure if this is the case ? } } @@ -102,9 +125,11 @@ func (nw *Network) startNode(nodeAddr string, cfg interface{}, nodeNumber int) ( nw.wg.Add(1) node := node{ + sync.Mutex{}, requests.NewRequestGenerator(nodeAddr, nw.Logger), cfg, &nw.wg, + make(chan error), nil, } @@ -126,17 +151,18 @@ func (nw *Network) startNode(nodeAddr string, cfg interface{}, nodeNumber int) ( app := erigonapp.MakeApp(fmt.Sprintf("node-%d", nodeNumber), node.run, erigoncli.DefaultFlags) if err := app.Run(args); err != nil { - _, printErr := fmt.Fprintln(os.Stderr, err) - if printErr != nil { - nw.Logger.Warn("Error writing app run error to stderr", "err", printErr) - } + nw.Logger.Warn("App run returned error", "node", fmt.Sprintf("node-%d", nodeNumber), "err", err) } }() - return node, nil + if err = <-node.startErr; err != nil { + return nil, err + } + + return &node, nil } -// getEnode returns the enode of the mining node +// getEnode returns the enode of the netowrk node func getEnode(n Node) (string, error) { reqCount := 0 @@ -144,6 +170,12 @@ func getEnode(n Node) (string, error) { nodeInfo, err := n.AdminNodeInfo() if err != nil { + if r, ok := n.(*node); ok { + if !r.running() { + return "", err + } + } + if reqCount < 10 { var urlErr *url.Error if errors.As(err, &urlErr) { @@ -151,7 +183,7 @@ func getEnode(n Node) (string, error) { if errors.As(urlErr.Err, &opErr) { var callErr *os.SyscallError if errors.As(opErr.Err, &callErr) { - if callErr.Syscall == "connectex" { + if strings.HasPrefix(callErr.Syscall, "connect") { reqCount++ time.Sleep(time.Duration(devnetutils.RandomInt(5)) * time.Second) continue @@ -181,14 +213,21 @@ func (nw *Network) Run(ctx go_context.Context, scenario scenarios.Scenario) erro func (nw *Network) Stop() { type stoppable interface { Stop() + running() bool } - for _, n := range nw.Nodes { - if stoppable, ok := n.(stoppable); ok { - stoppable.Stop() + for i, n := range nw.Nodes { + if stoppable, ok := n.(stoppable); ok && stoppable.running() { + nw.Logger.Info("Stopping", "node", i) + go stoppable.Stop() } } + nw.Logger.Info("Waiting for nodes to stop") + nw.Wait() +} + +func (nw *Network) Wait() { nw.wg.Wait() } diff --git a/cmd/devnet/devnet/node.go b/cmd/devnet/devnet/node.go index d14761fdc3a..450e5f99b40 100644 --- a/cmd/devnet/devnet/node.go +++ b/cmd/devnet/devnet/node.go @@ -4,6 +4,7 @@ import ( go_context "context" "sync" + "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/cmd/devnet/args" "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/params" @@ -29,19 +30,40 @@ func (f NodeSelectorFunc) Test(ctx go_context.Context, node Node) bool { } type node struct { + sync.Mutex requests.RequestGenerator - args interface{} - wg *sync.WaitGroup - ethNode *enode.ErigonNode + args interface{} + wg *sync.WaitGroup + startErr chan error + ethNode *enode.ErigonNode } func (n *node) Stop() { + var toClose *enode.ErigonNode + + n.Lock() if n.ethNode != nil { - toClose := n.ethNode + toClose = n.ethNode n.ethNode = nil + } + n.Unlock() + + if toClose != nil { toClose.Close() } + n.done() +} + +func (n *node) running() bool { + n.Lock() + defer n.Unlock() + return n.startErr == nil && n.ethNode != nil +} + +func (n *node) done() { + n.Lock() + defer n.Unlock() if n.wg != nil { wg := n.wg n.wg = nil @@ -49,7 +71,7 @@ func (n *node) Stop() { } } -func (n node) IsMiner() bool { +func (n *node) IsMiner() bool { _, isMiner := n.args.(args.Miner) return isMiner } @@ -59,6 +81,17 @@ func (n *node) run(ctx *cli.Context) error { var logger log.Logger var err error + defer n.done() + defer func() { + n.Lock() + if n.startErr != nil { + close(n.startErr) + n.startErr = nil + } + n.ethNode = nil + n.Unlock() + }() + if logger, err = debug.Setup(ctx, false /* rootLogger */); err != nil { return err } @@ -68,8 +101,22 @@ func (n *node) run(ctx *cli.Context) error { nodeCfg := enode.NewNodConfigUrfave(ctx, logger) ethCfg := enode.NewEthConfigUrfave(ctx, nodeCfg, logger) + // These are set to prevent disk and page size churn which can be excessive + // when running multiple nodes + // MdbxGrowthStep impacts disk usage, MdbxDBSizeLimit impacts page file usage + nodeCfg.MdbxGrowthStep = 32 * datasize.MB + nodeCfg.MdbxDBSizeLimit = 512 * datasize.MB + n.ethNode, err = enode.New(nodeCfg, ethCfg, logger) + n.Lock() + if n.startErr != nil { + n.startErr <- err + close(n.startErr) + n.startErr = nil + } + n.Unlock() + if err != nil { logger.Error("Node startup", "err", err) return err diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go index 960e0b5abaf..ad1633416ba 100644 --- a/cmd/devnet/devnetutils/utils.go +++ b/cmd/devnet/devnetutils/utils.go @@ -3,6 +3,7 @@ package devnetutils import ( "crypto/rand" "encoding/binary" + "errors" "fmt" "net" "os" @@ -19,29 +20,28 @@ import ( "github.com/ledgerwatch/log/v3" ) +var ErrInvalidEnodeString = errors.New("invalid enode string") + // ClearDevDB cleans up the dev folder used for the operations func ClearDevDB(dataDir string, logger log.Logger) error { logger.Info("Deleting nodes' data folders") - nodeNumber := 0 - for { + for nodeNumber := 0; nodeNumber < 100; nodeNumber++ { // Arbitrary number nodeDataDir := filepath.Join(dataDir, fmt.Sprintf("%d", nodeNumber)) fileInfo, err := os.Stat(nodeDataDir) if err != nil { if os.IsNotExist(err) { - break + continue } return err } - if fileInfo.IsDir() { - if err := os.RemoveAll(nodeDataDir); err != nil { - return err - } - logger.Info("SUCCESS => Deleted", "datadir", nodeDataDir) - } else { - break + if !fileInfo.IsDir() { + continue + } + if err := os.RemoveAll(nodeDataDir); err != nil { + return err } - nodeNumber++ + logger.Info("SUCCESS => Deleted", "datadir", nodeDataDir) } return nil } @@ -56,7 +56,7 @@ func HexToInt(hexStr string) uint64 { // UniqueIDFromEnode returns the unique ID from a node's enode, removing the `?discport=0` part func UniqueIDFromEnode(enode string) (string, error) { if len(enode) == 0 { - return "", fmt.Errorf("invalid enode string") + return "", ErrInvalidEnodeString } // iterate through characters in the string until we reach '?' @@ -73,14 +73,14 @@ func UniqueIDFromEnode(enode string) (string, error) { } if ati == 0 { - return "", fmt.Errorf("invalid enode string") + return "", ErrInvalidEnodeString } if _, apiPort, err := net.SplitHostPort(enode[ati+1 : i]); err != nil { - return "", fmt.Errorf("invalid enode string") + return "", ErrInvalidEnodeString } else { if _, err := strconv.Atoi(apiPort); err != nil { - return "", fmt.Errorf("invalid enode string") + return "", ErrInvalidEnodeString } } diff --git a/cmd/devnet/main.go b/cmd/devnet/main.go index 28db249ac8c..8f88a82ccde 100644 --- a/cmd/devnet/main.go +++ b/cmd/devnet/main.go @@ -4,12 +4,16 @@ import ( "context" "fmt" "os" + "os/signal" "path/filepath" + dbg "runtime/debug" + "syscall" "time" _ "github.com/ledgerwatch/erigon/cmd/devnet/commands" + "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon/cmd/devnet/args" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils" @@ -21,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils/flags" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/app" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/erigon/turbo/logging" "github.com/urfave/cli/v2" @@ -44,6 +49,38 @@ var ( Name: "bor.withoutheimdall", Usage: "Run without Heimdall service", } + + MetricsEnabledFlag = cli.BoolFlag{ + Name: "metrics", + Usage: "Enable metrics collection and reporting", + } + + MetricsNodeFlag = cli.IntFlag{ + Name: "metrics.node", + Usage: "Which node of the cluster to attach to", + Value: 0, + } + + MetricsPortFlag = cli.IntFlag{ + Name: "metrics.port", + Usage: "Metrics HTTP server listening port", + Value: metrics.DefaultConfig.Port, + } + + DiagnosticsURLFlag = cli.StringFlag{ + Name: "diagnostics.url", + Usage: "URL of the diagnostics system provided by the support team, include unique session PIN", + } + + insecureFlag = cli.BoolFlag{ + Name: "insecure", + Usage: "Allows communication with diagnostics system using self-signed TLS certificates", + } + + metricsURLsFlag = cli.StringSliceFlag{ + Name: "metrics.urls", + Usage: "internal flag", + } ) type PanicHandler struct { @@ -68,6 +105,12 @@ func main() { &DataDirFlag, &ChainFlag, &WithoutHeimdallFlag, + &MetricsEnabledFlag, + &MetricsNodeFlag, + &MetricsPortFlag, + &DiagnosticsURLFlag, + &insecureFlag, + &metricsURLsFlag, } app.After = func(ctx *cli.Context) error { @@ -109,12 +152,43 @@ func action(ctx *cli.Context) error { return err } + metrics := ctx.Bool("metrics") + + if metrics { + // TODO should get this from the network as once we have multiple nodes we'll need to iterate the + // nodes and create a series of urls - for the moment only one is supported + ctx.Set("metrics.urls", fmt.Sprintf("http://localhost:%d/debug/metrics/", ctx.Int("metrics.port"))) + } + // start the network with each node in a go routine logger.Info("Starting Network") - if err := network.Start(); err != nil { + if err := network.Start(ctx); err != nil { return fmt.Errorf("Network start failed: %w", err) } + go func() { + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT) + + switch s := <-signalCh; s { + case syscall.SIGTERM: + logger.Info("Stopping network") + network.Stop() + + case syscall.SIGINT: + logger.Info("Terminating network") + os.Exit(-int(syscall.SIGINT)) + } + }() + + diagnosticsUrl := ctx.String("diagnostics.url") + + if metrics && len(diagnosticsUrl) > 0 { + go func() { + app.ConnectDiagnostics(ctx, logger) + }() + } + runCtx := devnet.WithCliContext(context.Background(), ctx) if ctx.String(ChainFlag.Name) == networkname.DevChainName { @@ -137,8 +211,13 @@ func action(ctx *cli.Context) error { }, }) - logger.Info("Stopping Network") - network.Stop() + if metrics && len(diagnosticsUrl) > 0 { + logger.Info("Waiting") + network.Wait() + } else { + logger.Info("Stopping Network") + network.Stop() + } return nil } @@ -156,6 +235,7 @@ func selectNetwork(ctx *cli.Context, logger log.Logger) (*devnet.Network, error) Logger: logger, BasePrivateApiAddr: "localhost:10090", BaseRPCAddr: "localhost:8545", + //Snapshots: true, Nodes: []devnet.Node{ args.Miner{ Node: args.Node{ diff --git a/cmd/devnet/requests/request_generator.go b/cmd/devnet/requests/request_generator.go index c51cb49bf36..1d54b30f998 100644 --- a/cmd/devnet/requests/request_generator.go +++ b/cmd/devnet/requests/request_generator.go @@ -175,7 +175,7 @@ func (req *requestGenerator) PingErigonRpc() CallResult { func NewRequestGenerator(target string, logger log.Logger) RequestGenerator { return &requestGenerator{ client: &http.Client{ - Timeout: time.Second * 600, + Timeout: time.Second * 10, }, reqID: 1, logger: logger, diff --git a/cmd/erigon-el/backend/backend.go b/cmd/erigon-el/backend/backend.go index 87b69d5e2ed..fa314a1c279 100644 --- a/cmd/erigon-el/backend/backend.go +++ b/cmd/erigon-el/backend/backend.go @@ -173,7 +173,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) ( } // Assemble the Ethereum object - chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, logger) + chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger) if err != nil { return nil, err } @@ -421,8 +421,8 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) ( } else { consensusConfig = &config.Ethash } - backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, - config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), false /* readonly */, logger) + backend.engine = ethconsensusconfig.CreateConsensusEngine(stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, + config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, false /* readonly */, logger) backend.forkValidator = engineapi.NewForkValidator(currentBlockNumber, inMemoryExecution, tmpdir, backend.blockReader) if err != nil { diff --git a/cmd/erigon/main.go b/cmd/erigon/main.go index 7a28293cfb4..0fd23f8709e 100644 --- a/cmd/erigon/main.go +++ b/cmd/erigon/main.go @@ -8,6 +8,7 @@ import ( "reflect" "strings" + "github.com/VictoriaMetrics/metrics" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/log/v3" "github.com/pelletier/go-toml" @@ -58,7 +59,10 @@ func runErigon(cliCtx *cli.Context) error { } // initializing the node and providing the current git commit there + logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) + erigonInfoGauge := metrics.GetOrCreateCounter(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit)) + erigonInfoGauge.Set(1) nodeCfg := node.NewNodConfigUrfave(cliCtx, logger) ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg, logger) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index f51c5ac753b..5d7f5dafd6e 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -12,6 +12,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/core/rawdb/blockio" + "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/secp256k1" @@ -1603,7 +1604,7 @@ func overrideStorageMode(db kv.RwDB, logger log.Logger) error { }) } -func initConsensusEngine(cc *chain2.Config, datadir string, db kv.RwDB, logger log.Logger) (engine consensus.Engine) { +func initConsensusEngine(cc *chain2.Config, dir string, db kv.RwDB, logger log.Logger) (engine consensus.Engine) { config := ethconfig.Defaults var consensusConfig interface{} @@ -1617,6 +1618,6 @@ func initConsensusEngine(cc *chain2.Config, datadir string, db kv.RwDB, logger l } else { consensusConfig = &config.Ethash } - return ethconsensusconfig.CreateConsensusEngine(cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, - HeimdallgRPCAddress, HeimdallURL, config.WithoutHeimdall, datadir, db.ReadOnly(), logger) + return ethconsensusconfig.CreateConsensusEngine(&nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, + HeimdallgRPCAddress, HeimdallURL, config.WithoutHeimdall, db.ReadOnly(), logger) } diff --git a/cmd/integration/commands/state_domains.go b/cmd/integration/commands/state_domains.go index d30efbc702d..2c1602eea24 100644 --- a/cmd/integration/commands/state_domains.go +++ b/cmd/integration/commands/state_domains.go @@ -39,7 +39,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/params" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/erigon/turbo/services" @@ -511,8 +510,9 @@ func (b *blockProcessor) applyBlock( header := block.Header() b.vmConfig.Debug = true - gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) + gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(chain2.MaxDataGasPerBlock) usedGas := new(uint64) + usedDataGas := new(uint64) var receipts types.Receipts rules := b.chainConfig.Rules(block.NumberU64(), block.Time()) @@ -545,7 +545,7 @@ func (b *blockProcessor) applyBlock( ibs.SetTxContext(tx.Hash(), block.Hash(), i) ct := exec3.NewCallTracer() b.vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(b.chainConfig, getHashFn, b.engine, nil, gp, ibs, b.writer, header, tx, usedGas, b.vmConfig) + receipt, _, err := core.ApplyTransaction(b.chainConfig, getHashFn, b.engine, nil, gp, ibs, b.writer, header, tx, usedGas, usedDataGas, b.vmConfig) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/pics/state.go b/cmd/pics/state.go index 81c40e87fdc..f3bf0ea2534 100644 --- a/cmd/pics/state.go +++ b/cmd/pics/state.go @@ -410,7 +410,7 @@ func initialState1() error { block.AddTx(tx) } contractBackend.Commit() - }, true) + }) if err != nil { return err } diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 491e8735a28..5bbf457bfea 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -329,6 +329,7 @@ The following table shows the current implementation status of Erigon's RPC daem | bor_getSignersAtHash | Yes | Bor only | | bor_getCurrentProposer | Yes | Bor only | | bor_getCurrentValidators | Yes | Bor only | +| bor_getSnapshotProposerSequence | Yes | Bor only | | bor_getRootHash | Yes | Bor only | ### GraphQL diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 39f744a2f94..522615871ee 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -104,6 +104,7 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().StringVar(&cfg.GRPCListenAddress, "grpc.addr", nodecfg.DefaultGRPCHost, "GRPC server listening interface") rootCmd.PersistentFlags().IntVar(&cfg.GRPCPort, "grpc.port", nodecfg.DefaultGRPCPort, "GRPC server listening port") rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check") + rootCmd.PersistentFlags().Float64Var(ðconfig.Defaults.RPCTxFeeCap, utils.RPCGlobalTxFeeCapFlag.Name, utils.RPCGlobalTxFeeCapFlag.Value, utils.RPCGlobalTxFeeCapFlag.Usage) rootCmd.PersistentFlags().BoolVar(&cfg.TCPServerEnabled, "tcp", false, "Enable TCP server") rootCmd.PersistentFlags().StringVar(&cfg.TCPListenAddress, "tcp.addr", nodecfg.DefaultTCPHost, "TCP server listening interface") diff --git a/cmd/rpcdaemon/commands/bor_api.go b/cmd/rpcdaemon/commands/bor_api.go index 4cf6f295ab2..5db34d8370a 100644 --- a/cmd/rpcdaemon/commands/bor_api.go +++ b/cmd/rpcdaemon/commands/bor_api.go @@ -18,6 +18,7 @@ type BorAPI interface { GetSignersAtHash(hash common.Hash) ([]common.Address, error) GetCurrentProposer() (common.Address, error) GetCurrentValidators() ([]*valset.Validator, error) + GetSnapshotProposerSequence(blockNrOrHash *rpc.BlockNumberOrHash) (BlockSigners, error) GetRootHash(start uint64, end uint64) (string, error) } diff --git a/cmd/rpcdaemon/commands/bor_helper.go b/cmd/rpcdaemon/commands/bor_helper.go index dccdc363621..2d50122708b 100644 --- a/cmd/rpcdaemon/commands/bor_helper.go +++ b/cmd/rpcdaemon/commands/bor_helper.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "sort" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" @@ -160,3 +161,16 @@ func author(api *BorImpl, tx kv.Tx, header *types.Header) (common.Address, error config, _ := api.chainConfig(tx) return ecrecover(header, config.Bor) } + +func rankMapDifficulties(values map[common.Address]uint64) []difficultiesKV { + ss := make([]difficultiesKV, 0, len(values)) + for k, v := range values { + ss = append(ss, difficultiesKV{k, v}) + } + + sort.Slice(ss, func(i, j int) bool { + return ss[i].Difficulty > ss[j].Difficulty + }) + + return ss +} diff --git a/cmd/rpcdaemon/commands/bor_snapshot.go b/cmd/rpcdaemon/commands/bor_snapshot.go index 1bde2086118..27287735562 100644 --- a/cmd/rpcdaemon/commands/bor_snapshot.go +++ b/cmd/rpcdaemon/commands/bor_snapshot.go @@ -193,6 +193,98 @@ func (api *BorImpl) GetCurrentValidators() ([]*valset.Validator, error) { return snap.ValidatorSet.Validators, nil } +type BlockSigners struct { + Signers []difficultiesKV + Diff int + Author common.Address +} + +type difficultiesKV struct { + Signer common.Address + Difficulty uint64 +} + +func (api *BorImpl) GetSnapshotProposerSequence(blockNrOrHash *rpc.BlockNumberOrHash) (BlockSigners, error) { + // init chain db + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return BlockSigners{}, err + } + defer tx.Rollback() + + // Retrieve the requested block number (or current if none requested) + var header *types.Header + if blockNrOrHash == nil { + header = rawdb.ReadCurrentHeader(tx) + } else { + if blockNr, ok := blockNrOrHash.Number(); ok { + if blockNr == rpc.LatestBlockNumber { + header = rawdb.ReadCurrentHeader(tx) + } else { + header, err = getHeaderByNumber(ctx, blockNr, api, tx) + } + } else { + if blockHash, ok := blockNrOrHash.Hash(); ok { + header, err = getHeaderByHash(ctx, api, tx, blockHash) + } + } + } + + // Ensure we have an actually valid block + if header == nil || err != nil { + return BlockSigners{}, errUnknownBlock + } + + // init consensus db + borTx, err := api.borDb.BeginRo(ctx) + if err != nil { + return BlockSigners{}, err + } + defer borTx.Rollback() + + parent, err := getHeaderByNumber(ctx, rpc.BlockNumber(int64(header.Number.Uint64()-1)), api, tx) + if parent == nil || err != nil { + return BlockSigners{}, errUnknownBlock + } + snap, err := snapshot(ctx, api, tx, borTx, parent) + + var difficulties = make(map[common.Address]uint64) + + if err != nil { + return BlockSigners{}, err + } + + proposer := snap.ValidatorSet.GetProposer().Address + proposerIndex, _ := snap.ValidatorSet.GetByAddress(proposer) + + signers := snap.signers() + for i := 0; i < len(signers); i++ { + tempIndex := i + if tempIndex < proposerIndex { + tempIndex = tempIndex + len(signers) + } + + difficulties[signers[i]] = uint64(len(signers) - (tempIndex - proposerIndex)) + } + + rankedDifficulties := rankMapDifficulties(difficulties) + + author, err := author(api, tx, header) + if err != nil { + return BlockSigners{}, err + } + + diff := int(difficulties[author]) + blockSigners := BlockSigners{ + Signers: rankedDifficulties, + Diff: diff, + Author: author, + } + + return blockSigners, nil +} + // GetRootHash returns the merkle root of the start to end block headers func (api *BorImpl) GetRootHash(start, end uint64) (string, error) { length := end - start + 1 diff --git a/cmd/rpcdaemon/commands/call_traces_test.go b/cmd/rpcdaemon/commands/call_traces_test.go index 7e45325bbc6..c1288d4b0e9 100644 --- a/cmd/rpcdaemon/commands/call_traces_test.go +++ b/cmd/rpcdaemon/commands/call_traces_test.go @@ -44,7 +44,7 @@ func TestCallTraceOneByOne(t *testing.T) { m := stages.Mock(t) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { gen.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } @@ -79,7 +79,7 @@ func TestCallTraceUnwind(t *testing.T) { var err error chainA, err = core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { gen.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chainA: %v", err) } @@ -89,7 +89,7 @@ func TestCallTraceUnwind(t *testing.T) { } else { gen.SetCoinbase(common.Address{2}) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chainB: %v", err) } @@ -151,7 +151,7 @@ func TestFilterNoAddresses(t *testing.T) { m := stages.Mock(t) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { gen.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } @@ -202,7 +202,7 @@ func TestFilterAddressIntersection(t *testing.T) { t.Fatal(err) } block.AddTx(txn) - }, false /* intermediateHashes */) + }) require.NoError(t, err, "generate chain") err = m.InsertChain(chain, nil) diff --git a/cmd/rpcdaemon/commands/erigon_receipts_test.go b/cmd/rpcdaemon/commands/erigon_receipts_test.go index 29c5df7e928..8e237839148 100644 --- a/cmd/rpcdaemon/commands/erigon_receipts_test.go +++ b/cmd/rpcdaemon/commands/erigon_receipts_test.go @@ -213,7 +213,7 @@ func mockWithGenerator(t *testing.T, blocks int, generator func(int, *core.Block Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, }, testKey, false) if blocks > 0 { - chain, _ := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, blocks, generator, true) + chain, _ := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, blocks, generator) err := m.InsertChain(chain, nil) require.NoError(t, err) } diff --git a/cmd/rpcdaemon/commands/eth_api_test.go b/cmd/rpcdaemon/commands/eth_api_test.go index 7b4d8d283a9..dbc6dc8a4d4 100644 --- a/cmd/rpcdaemon/commands/eth_api_test.go +++ b/cmd/rpcdaemon/commands/eth_api_test.go @@ -120,7 +120,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { - }, true) + }) if err != nil { t.Fatal(err) } @@ -141,7 +141,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { - }, true) + }) if err != nil { t.Fatal(err) } diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index 3679758a1ab..6023e071e1b 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -519,7 +519,7 @@ func chainWithDeployedContract(t *testing.T) (*stages.MockSentry, libcommon.Addr assert.NoError(t, err) block.AddTx(txn) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index 0ddc1cac756..8a05c99ef94 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -9,6 +9,8 @@ import ( "github.com/RoaringBitmap/roaring" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" @@ -17,7 +19,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/consensus" @@ -30,7 +31,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/eth/filters" "github.com/ledgerwatch/erigon/ethdb/cbor" - "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" @@ -49,7 +49,8 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *chai } usedGas := new(uint64) - gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) + usedDataGas := new(uint64) + gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(chain.MaxDataGasPerBlock) noopWriter := state.NewNoopWriter() @@ -65,7 +66,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *chai header := block.Header() for i, txn := range block.Transactions() { ibs.SetTxContext(txn.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, noopWriter, header, txn, usedGas, usedDataGas, vm.Config{}) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index 959200d364c..79501c1249a 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -24,7 +24,7 @@ func TestEthSubscribe(t *testing.T) { m, require := stages.Mock(t), require.New(t) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 7, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) require.NoError(err) b, err := rlp.EncodeToBytes(ð.BlockHeadersPacket66{ @@ -53,7 +53,7 @@ func TestEthSubscribe(t *testing.T) { highestSeenHeader := chain.TopBlock.NumberU64() hook := stages.NewHook(m.Ctx, m.Notifications, m.Sync, m.BlockReader, m.ChainConfig, m.Log, m.UpdateHead) - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, logger, m.BlockReader, hook); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, logger, m.BlockReader, hook); err != nil { t.Fatal(err) } diff --git a/cmd/rpcdaemon/commands/eth_system_test.go b/cmd/rpcdaemon/commands/eth_system_test.go index 281595859aa..c5db8900752 100644 --- a/cmd/rpcdaemon/commands/eth_system_test.go +++ b/cmd/rpcdaemon/commands/eth_system_test.go @@ -76,7 +76,7 @@ func createGasPriceTestKV(t *testing.T, chainSize int) *stages.MockSentry { t.Fatalf("failed to create tx: %v", txErr) } b.AddTx(tx) - }, false) + }) if err != nil { t.Error(err) } diff --git a/cmd/rpcdaemon/commands/send_transaction_test.go b/cmd/rpcdaemon/commands/send_transaction_test.go index a1d40d7c20d..125a6d4c796 100644 --- a/cmd/rpcdaemon/commands/send_transaction_test.go +++ b/cmd/rpcdaemon/commands/send_transaction_test.go @@ -40,7 +40,7 @@ func TestSendRawTransaction(t *testing.T) { chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { b.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) + }) require.NoError(err) { // Do 1 step to start txPool @@ -64,10 +64,10 @@ func TestSendRawTransaction(t *testing.T) { for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } - m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed + m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed initialCycle := stages.MockInsertAsInitialCycle - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, logger, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, logger, m.BlockReader, nil); err != nil { t.Fatal(err) } } diff --git a/cmd/rpcdaemon/commands/txpool_api_test.go b/cmd/rpcdaemon/commands/txpool_api_test.go index cf6353ada70..94b7090c7a3 100644 --- a/cmd/rpcdaemon/commands/txpool_api_test.go +++ b/cmd/rpcdaemon/commands/txpool_api_test.go @@ -26,7 +26,7 @@ func TestTxPoolContent(t *testing.T) { m, require := stages.MockWithTxPool(t), require.New(t) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) require.NoError(err) err = m.InsertChain(chain, nil) require.NoError(err) diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index f2d7c1e119b..e5a46b5d062 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -91,7 +91,7 @@ func CreateTestSentry(t *testing.T) (*stages.MockSentry, *core.ChainPack, []*cor // Generate empty chain to have some orphaned blocks for tests orphanedChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 5, func(i int, block *core.BlockGen) { - }, true) + }) if err != nil { t.Fatal(err) } @@ -276,7 +276,7 @@ func generateChain( block.AddTx(txn) } contractBackend.Commit() - }, true) + }) } type IsMiningMock struct{} @@ -422,7 +422,7 @@ func CreateTestSentryForTraces(t *testing.T) *stages.MockSentry { tx, _ := types.SignTx(types.NewTransaction(0, a2, u256.Num0, 50000, u256.Num1, []byte{0x01, 0x00, 0x01, 0x00}), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -528,7 +528,7 @@ func CreateTestSentryForTracesCollision(t *testing.T) *stages.MockSentry { tx, _ = types.SignTx(types.NewTransaction(2, bb, u256.Num0, 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } diff --git a/cmd/sentinel/sentinel/handlers/handlers.go b/cmd/sentinel/sentinel/handlers/handlers.go index 79c560e49bf..00b868be508 100644 --- a/cmd/sentinel/sentinel/handlers/handlers.go +++ b/cmd/sentinel/sentinel/handlers/handlers.go @@ -94,7 +94,7 @@ func (c *ConsensusHandlers) wrapStreamHandler(name string, fn func(s network.Str err = fn(s) if err != nil { l["err"] = err - log.Error("[pubsubhandler] stream handler", l) + log.Trace("[pubsubhandler] stream handler", l) // TODO: maybe we should log this _ = s.Reset() return diff --git a/cmd/state/commands/erigon4.go b/cmd/state/commands/erigon4.go index 30481b9a3aa..b391ccb9572 100644 --- a/cmd/state/commands/erigon4.go +++ b/cmd/state/commands/erigon4.go @@ -15,17 +15,14 @@ import ( "github.com/VictoriaMetrics/metrics" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" - "github.com/ledgerwatch/erigon-lib/commitment" - chain2 "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/commitment" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -40,9 +37,11 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" + "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) var ( @@ -389,8 +388,9 @@ func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *StateR header := block.Header() vmConfig.Debug = true - gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) + gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(chain2.MaxDataGasPerBlock) usedGas := new(uint64) + usedDataGas := new(uint64) var receipts types.Receipts rules := chainConfig.Rules(block.NumberU64(), block.Time()) txNum := txNumStart @@ -424,7 +424,7 @@ func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *StateR ibs.SetTxContext(tx.Hash(), block.Hash(), i) ct := exec3.NewCallTracer() vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(chainConfig, getHashFn, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig) + receipt, _, err := core.ApplyTransaction(chainConfig, getHashFn, engine, nil, gp, ibs, ww, header, tx, usedGas, usedDataGas, vmConfig) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } @@ -617,6 +617,6 @@ func initConsensusEngine(cc *chain2.Config, snapshots *freezeblocks.RoSnapshots, } else { consensusConfig = &config.Ethash } - return ethconsensusconfig.CreateConsensusEngine(cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, - config.HeimdallURL, config.WithoutHeimdall, datadirCli, true /* readonly */, logger) + return ethconsensusconfig.CreateConsensusEngine(&nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, + config.HeimdallURL, config.WithoutHeimdall, true /* readonly */, logger) } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index 7644eda0009..e08c47b3b62 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -13,17 +13,14 @@ import ( "time" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" + chain2 "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/consensus" @@ -35,6 +32,9 @@ import ( "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) var ( @@ -710,8 +710,9 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta chainConfig *chain2.Config, getHeader func(hash libcommon.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config, trace bool, logger log.Logger) (types.Receipts, error) { header := block.Header() vmConfig.TraceJumpDest = true - gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) + gp := new(core.GasPool).AddGas(block.GasLimit()).AddDataGas(chain2.MaxDataGasPerBlock) usedGas := new(uint64) + usedDataGas := new(uint64) var receipts types.Receipts if chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 { misc.ApplyDAOHardFork(ibs) @@ -720,7 +721,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta rules := chainConfig.Rules(block.NumberU64(), block.Time()) for i, tx := range block.Transactions() { ibs.SetTxContext(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, usedDataGas, vmConfig) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index 6a179b71b2d..7dbb7a74a59 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -156,8 +156,8 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) { } // Block initialisation //fmt.Printf("txNum=%d, blockNum=%d, initialisation of the block\n", txTask.TxNum, txTask.BlockNum) - syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */) + syscall := func(contract libcommon.Address, data []byte, ibs *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { + return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, constCall /* constCall */) } rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, txTask.Txs, txTask.Uncles, syscall) case txTask.Final: diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index 7e450796153..6efcd766f06 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -322,8 +322,8 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error { } } else if txTask.TxIndex == -1 { // Block initialisation - syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, txTask.Header, rw.engine, false /* constCall */) + syscall := func(contract libcommon.Address, data []byte, ibState *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { + return core.SysCallContract(contract, data, rw.chainConfig, ibState, header, rw.engine, constCall /* constCall */) } rw.engine.Initialize(rw.chainConfig, rw.chain, txTask.Header, ibs, txTask.Txs, txTask.Uncles, syscall) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c11689b1cf6..c7e811bedb1 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -38,7 +38,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon-lib/direct" downloadercfg2 "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" - "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" "github.com/ledgerwatch/erigon/cl/clparams" @@ -710,7 +709,7 @@ var ( DbPageSizeFlag = cli.StringFlag{ Name: "db.pagesize", Usage: "DB is splitted to 'pages' of fixed size. Can't change DB creation. Must be power of 2 and '256b <= pagesize <= 64kb'. Default: equal to OperationSystem's pageSize. Bigger pageSize causing: 1. More writes to disk during commit 2. Smaller b-tree high 3. Less fragmentation 4. Less overhead on 'free-pages list' maintainance (a bit faster Put/Commit) 5. If expecting DB-size > 8Tb then set pageSize >= 8Kb", - Value: datasize.ByteSize(kv.DefaultPageSize()).String(), + Value: "8KB", } DbSizeLimitFlag = cli.StringFlag{ Name: "db.size.limit", diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 445265c391d..6adbeae827e 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -17,36 +17,26 @@ package aura import ( - "bytes" - "container/list" - "context" "errors" "fmt" "math/big" - "sort" "sync" "sync/atomic" "time" - lru "github.com/hashicorp/golang-lru/v2" "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/secp256k1" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/aura/contracts" "github.com/ledgerwatch/erigon/consensus/clique" "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" ) @@ -61,41 +51,6 @@ Not implemented features from OS: Repo with solidity sources: https://github.com/poanetwork/posdao-contracts */ -type StepDurationInfo struct { - TransitionStep uint64 - TransitionTimestamp uint64 - StepDuration uint64 -} - -// EpochTransitionProof - Holds 2 proofs inside: ValidatorSetProof and FinalityProof -type EpochTransitionProof struct { - SignalNumber uint64 - SetProof []byte - FinalityProof []byte -} - -// ValidatorSetProof - validator set proof -type ValidatorSetProof struct { - Header *types.Header - Receipts types.Receipts -} - -// FirstValidatorSetProof state-dependent proofs for the safe contract: -// only "first" proofs are such. -type FirstValidatorSetProof struct { // TODO: whaaat? here is no state! - ContractAddress libcommon.Address - Header *types.Header -} - -type EpochTransition struct { - /// Block hash at which the transition occurred. - BlockHash libcommon.Hash - /// Block number at which the transition occurred. - BlockNumber uint64 - /// "transition/epoch" proof from the engine combined with a finality proof. - ProofRlp []byte -} - type Step struct { calibrate bool // whether calibration is enabled. inner atomic.Uint64 @@ -137,11 +92,6 @@ func (s *Step) optCalibrate() bool { return true } -type PermissionedStep struct { - inner *Step - canPropose atomic.Bool -} - type ReceivedStepHashes map[uint64]map[libcommon.Address]libcommon.Hash //BTreeMap<(u64, Address), H256> // nolint @@ -291,36 +241,6 @@ type AuRa struct { certifierLock sync.RWMutex } -type GasLimitOverride struct { - cache *lru.Cache[libcommon.Hash, *uint256.Int] -} - -func NewGasLimitOverride() *GasLimitOverride { - // The number of recent block hashes for which the gas limit override is memoized. - const GasLimitOverrideCacheCapacity = 10 - - cache, err := lru.New[libcommon.Hash, *uint256.Int](GasLimitOverrideCacheCapacity) - if err != nil { - panic("error creating prefetching cache for blocks") - } - return &GasLimitOverride{cache: cache} -} - -func (pb *GasLimitOverride) Pop(hash libcommon.Hash) *uint256.Int { - if val, ok := pb.cache.Get(hash); ok && val != nil { - pb.cache.Remove(hash) - return val - } - return nil -} - -func (pb *GasLimitOverride) Add(hash libcommon.Hash, b *uint256.Int) { - if b == nil { - return - } - pb.cache.ContainsOrAdd(hash, b) -} - func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) { auraParams, err := FromJson(spec) if err != nil { @@ -400,54 +320,6 @@ func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) { return c, nil } -type epochReader interface { - GetEpoch(blockHash libcommon.Hash, blockN uint64) (transitionProof []byte, err error) - GetPendingEpoch(blockHash libcommon.Hash, blockN uint64) (transitionProof []byte, err error) - FindBeforeOrEqualNumber(number uint64) (blockNum uint64, blockHash libcommon.Hash, transitionProof []byte, err error) -} -type epochWriter interface { - epochReader - PutEpoch(blockHash libcommon.Hash, blockN uint64, transitionProof []byte) (err error) - PutPendingEpoch(blockHash libcommon.Hash, blockN uint64, transitionProof []byte) (err error) -} - -type NonTransactionalEpochReader struct { - db kv.RwDB -} - -func newEpochReader(db kv.RwDB) *NonTransactionalEpochReader { - return &NonTransactionalEpochReader{db: db} -} - -func (cr *NonTransactionalEpochReader) GetEpoch(hash libcommon.Hash, number uint64) (v []byte, err error) { - return v, cr.db.View(context.Background(), func(tx kv.Tx) error { - v, err = rawdb.ReadEpoch(tx, number, hash) - return err - }) -} -func (cr *NonTransactionalEpochReader) PutEpoch(hash libcommon.Hash, number uint64, proof []byte) error { - return cr.db.UpdateNosync(context.Background(), func(tx kv.RwTx) error { - return rawdb.WriteEpoch(tx, number, hash, proof) - }) -} -func (cr *NonTransactionalEpochReader) GetPendingEpoch(hash libcommon.Hash, number uint64) (v []byte, err error) { - return v, cr.db.View(context.Background(), func(tx kv.Tx) error { - v, err = rawdb.ReadPendingEpoch(tx, number, hash) - return err - }) -} -func (cr *NonTransactionalEpochReader) PutPendingEpoch(hash libcommon.Hash, number uint64, proof []byte) error { - return cr.db.UpdateNosync(context.Background(), func(tx kv.RwTx) error { - return rawdb.WritePendingEpoch(tx, number, hash, proof) - }) -} -func (cr *NonTransactionalEpochReader) FindBeforeOrEqualNumber(number uint64) (blockNum uint64, blockHash libcommon.Hash, transitionProof []byte, err error) { - return blockNum, blockHash, transitionProof, cr.db.View(context.Background(), func(tx kv.Tx) error { - blockNum, blockHash, transitionProof, err = rawdb.FindEpochBeforeOrEqualNumber(tx, number) - return err - }) -} - // A helper accumulator function mapping a step duration and a step duration transition timestamp // to the corresponding step number and the correct starting second of the step. func nextStepTimeDuration(info StepDurationInfo, time uint64) (uint64, uint64, bool) { @@ -496,10 +368,6 @@ func (c *AuRa) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Hea return ethash.VerifyHeaderBasics(chain, header, parent, true /*checkTimestamp*/, c.HasGasLimitContract() /*skipGasLimit*/) } -func (c *AuRa) HasGasLimitContract() bool { - return len(c.cfg.BlockRewardContractTransitions) != 0 -} - // nolint func (c *AuRa) hasReceivedStepHashes(step uint64, author libcommon.Address, newHash libcommon.Hash) bool { /* @@ -763,12 +631,19 @@ func (c *AuRa) Prepare(chain consensus.ChainHeaderReader, header *types.Header, //return nil } -func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SystemCall) { +func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscallCustom consensus.SysCallCustom) { blockNum := header.Number.Uint64() + + //Check block gas limit from smart contract, if applicable + c.verifyGasLimitOverride(config, chain, header, state, syscallCustom) + for address, rewrittenCode := range c.cfg.RewriteBytecode[blockNum] { state.SetCode(address, rewrittenCode) } + syscall := func(addr libcommon.Address, data []byte) ([]byte, error) { + return syscallCustom(addr, data, state, header, true) + } c.certifierLock.Lock() if c.cfg.Registrar != nil && c.certifier == nil && config.IsLondon(blockNum) { c.certifier = getCertifier(*c.cfg.Registrar, syscall) @@ -1185,17 +1060,6 @@ func (c *AuRa) CalcDifficulty(chain consensus.ChainHeaderReader, time, parentTim currentStep := c.step.inner.inner.Load() currentEmptyStepsLen := 0 return calculateScore(parentStep, currentStep, uint64(currentEmptyStepsLen)).ToBig() - - /* TODO: do I need gasLimit override logic here ? - if let Some(gas_limit) = self.gas_limit_override(header) { - trace!(target: "engine", "Setting gas limit to {} for block {}.", gas_limit, header.number()); - let parent_gas_limit = *parent.gas_limit(); - header.set_gas_limit(gas_limit); - if parent_gas_limit != gas_limit { - info!(target: "engine", "Block gas limit was changed from {} to {}.", parent_gas_limit, gas_limit); - } - } - */ } // calculateScore - analog of PoW difficulty: @@ -1328,71 +1192,6 @@ func (c *AuRa) CalculateRewards(_ *chain.Config, header *types.Header, _ []*type return []consensus.Reward{r}, nil } -func callBlockRewardAbi(contractAddr libcommon.Address, syscall consensus.SystemCall, beneficiaries []libcommon.Address, rewardKind []consensus.RewardKind) ([]libcommon.Address, []*uint256.Int) { - castedKind := make([]uint16, len(rewardKind)) - for i := range rewardKind { - castedKind[i] = uint16(rewardKind[i]) - } - packed, err := blockRewardAbi().Pack("reward", beneficiaries, castedKind) - if err != nil { - panic(err) - } - out, err := syscall(contractAddr, packed) - if err != nil { - panic(err) - } - if len(out) == 0 { - return nil, nil - } - res, err := blockRewardAbi().Unpack("reward", out) - if err != nil { - panic(err) - } - beneficiariesRes := res[0].([]libcommon.Address) - rewardsBig := res[1].([]*big.Int) - rewardsU256 := make([]*uint256.Int, len(rewardsBig)) - for i := 0; i < len(rewardsBig); i++ { - var overflow bool - rewardsU256[i], overflow = uint256.FromBig(rewardsBig[i]) - if overflow { - panic("Overflow in callBlockRewardAbi") - } - } - return beneficiariesRes, rewardsU256 -} - -func blockRewardAbi() abi.ABI { - a, err := abi.JSON(bytes.NewReader(contracts.BlockReward)) - if err != nil { - panic(err) - } - return a -} - -func certifierAbi() abi.ABI { - a, err := abi.JSON(bytes.NewReader(contracts.Certifier)) - if err != nil { - panic(err) - } - return a -} - -func registrarAbi() abi.ABI { - a, err := abi.JSON(bytes.NewReader(contracts.Registrar)) - if err != nil { - panic(err) - } - return a -} - -func withdrawalAbi() abi.ABI { - a, err := abi.JSON(bytes.NewReader(contracts.Withdrawal)) - if err != nil { - panic(err) - } - return a -} - // See https://github.com/gnosischain/specs/blob/master/execution/withdrawals.md func (c *AuRa) ExecuteSystemWithdrawals(withdrawals []*types.Withdrawal, syscall consensus.SystemCall) error { if c.cfg.WithdrawalContractAddress == nil { @@ -1419,40 +1218,6 @@ func (c *AuRa) ExecuteSystemWithdrawals(withdrawals []*types.Withdrawal, syscall return err } -func getCertifier(registrar libcommon.Address, syscall consensus.SystemCall) *libcommon.Address { - hashedKey, err := common.HashData([]byte("service_transaction_checker")) - if err != nil { - panic(err) - } - packed, err := registrarAbi().Pack("getAddress", hashedKey, "A") - if err != nil { - panic(err) - } - out, err := syscall(registrar, packed) - if err != nil { - panic(err) - } - if len(out) == 0 { - return nil - } - res, err := registrarAbi().Unpack("getAddress", out) - if err != nil { - panic(err) - } - certifier := res[0].(libcommon.Address) - return &certifier -} - -// An empty step message that is included in a seal, the only difference is that it doesn't include -// the `parent_hash` in order to save space. The included signature is of the original empty step -// message, which can be reconstructed by using the parent hash of the block in which this sealed -// empty message is included. -// nolint -type SealedEmptyStep struct { - signature []byte // H520 - step uint64 -} - /* // extracts the empty steps from the header seal. should only be called when there are 3 fields in the seal // (i.e. header.number() >= self.empty_steps_transition). @@ -1487,300 +1252,3 @@ func headerEmptyStepsRaw(header *types.Header) []byte { return header.Seal[2] } */ - -// A message broadcast by authorities when it's their turn to seal a block but there are no -// transactions. Other authorities accumulate these messages and later include them in the seal as -// proof. -// -// An empty step message is created _instead of_ a block if there are no pending transactions. -// It cannot itself be a parent, and `parent_hash` always points to the most recent block. E.g.: -// - Validator A creates block `bA`. -// - Validator B has no pending transactions, so it signs an empty step message `mB` -// instead whose hash points to block `bA`. -// - Validator C also has no pending transactions, so it also signs an empty step message `mC` -// instead whose hash points to block `bA`. -// - Validator D creates block `bD`. The parent is block `bA`, and the header includes `mB` and `mC`. -type EmptyStep struct { - // The signature of the other two fields, by the message's author. - signature []byte // H520 - // This message's step number. - step uint64 - // The hash of the most recent block. - parentHash libcommon.Hash // H256 -} - -func (s *EmptyStep) Less(other *EmptyStep) bool { - if s.step < other.step { - return true - } - if bytes.Compare(s.parentHash[:], other.parentHash[:]) < 0 { - return true - } - if bytes.Compare(s.signature, other.signature) < 0 { - return true - } - return false -} -func (s *EmptyStep) LessOrEqual(other *EmptyStep) bool { - if s.step <= other.step { - return true - } - if bytes.Compare(s.parentHash[:], other.parentHash[:]) <= 0 { - return true - } - if bytes.Compare(s.signature, other.signature) <= 0 { - return true - } - return false -} - -// Returns `true` if the message has a valid signature by the expected proposer in the message's step. -func (s *EmptyStep) verify(validators ValidatorSet) (bool, error) { //nolint - //sRlp, err := EmptyStepRlp(s.step, s.parentHash) - //if err != nil { - // return false, err - //} - //message := crypto.Keccak256(sRlp) - - /* - let correct_proposer = step_proposer(validators, &self.parent_hash, self.step); - - publickey::verify_address(&correct_proposer, &self.signature.into(), &message) - .map_err(|e| e.into()) - */ - return true, nil -} - -// nolint -func (s *EmptyStep) author() (libcommon.Address, error) { - sRlp, err := EmptyStepRlp(s.step, s.parentHash) - if err != nil { - return libcommon.Address{}, err - } - message := crypto.Keccak256(sRlp) - public, err := secp256k1.RecoverPubkey(message, s.signature) - if err != nil { - return libcommon.Address{}, err - } - ecdsa, err := crypto.UnmarshalPubkeyStd(public) - if err != nil { - return libcommon.Address{}, err - } - return crypto.PubkeyToAddress(*ecdsa), nil -} - -type EmptyStepSet struct { - lock sync.Mutex - list []*EmptyStep -} - -func (s *EmptyStepSet) Less(i, j int) bool { return s.list[i].Less(s.list[j]) } -func (s *EmptyStepSet) Swap(i, j int) { s.list[i], s.list[j] = s.list[j], s.list[i] } -func (s *EmptyStepSet) Len() int { return len(s.list) } - -func (s *EmptyStepSet) Sort() { - s.lock.Lock() - defer s.lock.Unlock() - sort.Stable(s) -} - -func (s *EmptyStepSet) ForEach(f func(int, *EmptyStep)) { - s.lock.Lock() - defer s.lock.Unlock() - for i, el := range s.list { - f(i, el) - } -} - -func EmptyStepFullRlp(signature []byte, emptyStepRlp []byte) ([]byte, error) { - type A struct { - s []byte - r []byte - } - - return rlp.EncodeToBytes(A{s: signature, r: emptyStepRlp}) -} - -func EmptyStepRlp(step uint64, parentHash libcommon.Hash) ([]byte, error) { - type A struct { - s uint64 - h libcommon.Hash - } - return rlp.EncodeToBytes(A{s: step, h: parentHash}) -} - -// nolint -type unAssembledHeader struct { - hash libcommon.Hash - number uint64 - signers []libcommon.Address -} -type unAssembledHeaders struct { - l *list.List -} - -func (u unAssembledHeaders) PushBack(header *unAssembledHeader) { u.l.PushBack(header) } -func (u unAssembledHeaders) PushFront(header *unAssembledHeader) { u.l.PushFront(header) } -func (u unAssembledHeaders) Pop() *unAssembledHeader { - e := u.l.Front() - if e == nil { - return nil - } - u.l.Remove(e) - return e.Value.(*unAssembledHeader) -} -func (u unAssembledHeaders) Front() *unAssembledHeader { - e := u.l.Front() - if e == nil { - return nil - } - return e.Value.(*unAssembledHeader) -} - -// RollingFinality checker for authority round consensus. -// Stores a chain of unfinalized hashes that can be pushed onto. -// nolint -type RollingFinality struct { - headers unAssembledHeaders //nolint - signers *SimpleList - signCount map[libcommon.Address]uint - lastPushed *libcommon.Hash // Option, -} - -// NewRollingFinality creates a blank finality checker under the given validator set. -func NewRollingFinality(signers []libcommon.Address) *RollingFinality { - return &RollingFinality{ - signers: NewSimpleList(signers), - headers: unAssembledHeaders{l: list.New()}, - signCount: map[libcommon.Address]uint{}, - } -} - -// Clears the finality status, but keeps the validator set. -func (f *RollingFinality) print(num uint64) { - if num > DEBUG_LOG_FROM { - h := f.headers - fmt.Printf("finality_heads: %d\n", num) - i := 0 - for e := h.l.Front(); e != nil; e = e.Next() { - i++ - a := e.Value.(*unAssembledHeader) - fmt.Printf("\t%d,%x\n", a.number, a.signers[0]) - } - if i == 0 { - fmt.Printf("\tempty\n") - } - } -} - -func (f *RollingFinality) clear() { - f.headers = unAssembledHeaders{l: list.New()} - f.signCount = map[libcommon.Address]uint{} - f.lastPushed = nil -} - -// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) -// -// Fails if `signer` isn't a member of the active validator set. -// Returns a list of all newly finalized headers. -func (f *RollingFinality) push(head libcommon.Hash, num uint64, signers []libcommon.Address) (newlyFinalized []unAssembledHeader, err error) { - for i := range signers { - if !f.hasSigner(signers[i]) { - return nil, fmt.Errorf("unknown validator") - } - } - - f.addSigners(signers) - f.headers.PushBack(&unAssembledHeader{hash: head, number: num, signers: signers}) - - for f.isFinalized() { - e := f.headers.Pop() - if e == nil { - panic("headers length always greater than sign count length") - } - f.removeSigners(e.signers) - newlyFinalized = append(newlyFinalized, *e) - } - f.lastPushed = &head - return newlyFinalized, nil -} - -// isFinalized returns whether the first entry in `self.headers` is finalized. -func (f *RollingFinality) isFinalized() bool { - e := f.headers.Front() - if e == nil { - return false - } - return len(f.signCount)*2 > len(f.signers.validators) -} -func (f *RollingFinality) hasSigner(signer libcommon.Address) bool { - for j := range f.signers.validators { - if f.signers.validators[j] == signer { - return true - - } - } - return false -} -func (f *RollingFinality) addSigners(signers []libcommon.Address) bool { - for i := range signers { - count, ok := f.signCount[signers[i]] - if ok { - f.signCount[signers[i]] = count + 1 - } else { - f.signCount[signers[i]] = 1 - } - } - return false -} -func (f *RollingFinality) removeSigners(signers []libcommon.Address) { - for i := range signers { - count, ok := f.signCount[signers[i]] - if !ok { - panic("all hashes in `header` should have entries in `sign_count` for their signers") - //continue - } - if count <= 1 { - delete(f.signCount, signers[i]) - } else { - f.signCount[signers[i]] = count - 1 - } - } -} -func (f *RollingFinality) buildAncestrySubChain(get func(hash libcommon.Hash) ([]libcommon.Address, libcommon.Hash, libcommon.Hash, uint64, bool), parentHash, epochTransitionHash libcommon.Hash) error { // starts from chainHeadParentHash - f.clear() - - for { - signers, blockHash, newParentHash, blockNum, ok := get(parentHash) - if !ok { - return nil - } - if blockHash == epochTransitionHash { - return nil - } - for i := range signers { - if !f.hasSigner(signers[i]) { - return fmt.Errorf("unknown validator: blockNum=%d", blockNum) - } - } - if f.lastPushed == nil { - copyHash := parentHash - f.lastPushed = ©Hash - } - f.addSigners(signers) - f.headers.PushFront(&unAssembledHeader{hash: blockHash, number: blockNum, signers: signers}) - // break when we've got our first finalized block. - if f.isFinalized() { - e := f.headers.Pop() - if e == nil { - panic("we just pushed a block") - } - f.removeSigners(e.signers) - //log.Info("[aura] finality encountered already finalized block", "hash", e.hash.String(), "number", e.number) - break - } - - parentHash = newParentHash - } - return nil -} diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index 1862ac9360b..d83ee16c2a2 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -1,6 +1,8 @@ package aura_test import ( + "math/big" + "strings" "testing" "github.com/stretchr/testify/require" @@ -8,8 +10,10 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/consensus/aura" "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/ledgerwatch/erigon/turbo/trie" @@ -56,3 +60,61 @@ func TestEmptyBlock(t *testing.T) { err = m.InsertChain(chain, nil) require.NoError(err) } + +func TestAuRaSkipGasLimit(t *testing.T) { + require := require.New(t) + genesis := core.GnosisGenesisBlock() + genesis.Config.TerminalTotalDifficultyPassed = false + genesis.Config.Aura.BlockGasLimitContractTransitions = map[uint64]libcommon.Address{0: libcommon.HexToAddress("0x4000000000000000000000000000000000000001")} + + chainConfig := genesis.Config + auraDB := memdb.NewTestDB(t) + engine, err := aura.NewAuRa(chainConfig.Aura, auraDB) + require.NoError(err) + m := stages.MockWithGenesisEngine(t, genesis, engine, false) + + difficlty, _ := new(big.Int).SetString("340282366920938463463374607431768211454", 10) + //Populate a sample valid header for a Pre-merge block + // - actually sampled from 5000th block in chiado + validPreMergeHeader := &types.Header{ + ParentHash: libcommon.HexToHash("0x102482332de853f2f8967263e77e71d4fddf68fd5d84b750b2ddb7e501052097"), + UncleHash: libcommon.HexToHash("0x0"), + Coinbase: libcommon.HexToAddress("0x14747a698Ec1227e6753026C08B29b4d5D3bC484"), + Root: libcommon.HexToHash("0x0"), + TxHash: libcommon.HexToHash("0x0"), + ReceiptHash: libcommon.HexToHash("0x0"), + Bloom: types.BytesToBloom(nil), + Difficulty: difficlty, + Number: big.NewInt(5000), + GasLimit: 12500000, + GasUsed: 0, + Time: 1664049551, + Extra: []byte{}, + Nonce: [8]byte{0, 0, 0, 0, 0, 0, 0, 0}, + } + + syscallCustom := func(libcommon.Address, []byte, *state.IntraBlockState, *types.Header, bool) ([]byte, error) { + //Packing as constructor gives the same effect as unpacking the returned value + json := `[{"inputs": [{"internalType": "uint256","name": "blockGasLimit","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]` + fakeAbi, err := abi.JSON(strings.NewReader(json)) + require.NoError(err) + + fakeVal, err := fakeAbi.Pack("", big.NewInt(12500000)) + return fakeVal, err + } + require.NotPanics(func() { + m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, validPreMergeHeader, nil, nil, nil, syscallCustom) + }) + + invalidPreMergeHeader := validPreMergeHeader + invalidPreMergeHeader.GasLimit = 12_123456 //a different, wrong gasLimit + require.Panics(func() { + m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPreMergeHeader, nil, nil, nil, syscallCustom) + }) + + invalidPostMergeHeader := invalidPreMergeHeader + invalidPostMergeHeader.Difficulty = big.NewInt(0) //zero difficulty detected as PoS + require.NotPanics(func() { + m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPostMergeHeader, nil, nil, nil, syscallCustom) + }) +} diff --git a/consensus/aura/contract_abi.go b/consensus/aura/contract_abi.go new file mode 100644 index 00000000000..7f806f515d8 --- /dev/null +++ b/consensus/aura/contract_abi.go @@ -0,0 +1,136 @@ +package aura + +import ( + "bytes" + "math/big" + + "github.com/holiman/uint256" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + + "github.com/ledgerwatch/erigon/accounts/abi" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/aura/contracts" +) + +func callBlockRewardAbi(contractAddr libcommon.Address, syscall consensus.SystemCall, beneficiaries []libcommon.Address, rewardKind []consensus.RewardKind) ([]libcommon.Address, []*uint256.Int) { + castedKind := make([]uint16, len(rewardKind)) + for i := range rewardKind { + castedKind[i] = uint16(rewardKind[i]) + } + packed, err := blockRewardAbi().Pack("reward", beneficiaries, castedKind) + if err != nil { + panic(err) + } + out, err := syscall(contractAddr, packed) + if err != nil { + panic(err) + } + if len(out) == 0 { + return nil, nil + } + res, err := blockRewardAbi().Unpack("reward", out) + if err != nil { + panic(err) + } + beneficiariesRes := res[0].([]libcommon.Address) + rewardsBig := res[1].([]*big.Int) + rewardsU256 := make([]*uint256.Int, len(rewardsBig)) + for i := 0; i < len(rewardsBig); i++ { + var overflow bool + rewardsU256[i], overflow = uint256.FromBig(rewardsBig[i]) + if overflow { + panic("Overflow in callBlockRewardAbi") + } + } + return beneficiariesRes, rewardsU256 +} + +func callBlockGasLimitAbi(contractAddr libcommon.Address, syscall consensus.SystemCall) *uint256.Int { + packed, err := blockGasLimitAbi().Pack("blockGasLimit") + if err != nil { + panic(err) + } + out, err := syscall(contractAddr, packed) + if err != nil { + panic(err) + } + if len(out) == 0 { + return uint256.NewInt(0) + } + res, err := blockGasLimitAbi().Unpack("blockGasLimit", out) + if err != nil { + panic(err) + } + + val, overflow := uint256.FromBig(res[0].(*big.Int)) + if overflow { + panic("Overflow casting bigInt value to uint256") + } + return val +} + +func blockGasLimitAbi() abi.ABI { + a, err := abi.JSON(bytes.NewReader(contracts.BlockGasLimit)) + if err != nil { + panic(err) + } + return a +} + +func blockRewardAbi() abi.ABI { + a, err := abi.JSON(bytes.NewReader(contracts.BlockReward)) + if err != nil { + panic(err) + } + return a +} + +func certifierAbi() abi.ABI { + a, err := abi.JSON(bytes.NewReader(contracts.Certifier)) + if err != nil { + panic(err) + } + return a +} + +func registrarAbi() abi.ABI { + a, err := abi.JSON(bytes.NewReader(contracts.Registrar)) + if err != nil { + panic(err) + } + return a +} + +func withdrawalAbi() abi.ABI { + a, err := abi.JSON(bytes.NewReader(contracts.Withdrawal)) + if err != nil { + panic(err) + } + return a +} + +func getCertifier(registrar libcommon.Address, syscall consensus.SystemCall) *libcommon.Address { + hashedKey, err := common.HashData([]byte("service_transaction_checker")) + if err != nil { + panic(err) + } + packed, err := registrarAbi().Pack("getAddress", hashedKey, "A") + if err != nil { + panic(err) + } + out, err := syscall(registrar, packed) + if err != nil { + panic(err) + } + if len(out) == 0 { + return nil + } + res, err := registrarAbi().Unpack("getAddress", out) + if err != nil { + panic(err) + } + certifier := res[0].(libcommon.Address) + return &certifier +} diff --git a/consensus/aura/contracts/embed.go b/consensus/aura/contracts/embed.go index 5d540dccbd2..0a87b2d0b81 100644 --- a/consensus/aura/contracts/embed.go +++ b/consensus/aura/contracts/embed.go @@ -15,3 +15,6 @@ var Registrar []byte //go:embed withdrawal.json var Withdrawal []byte + +//go:embed block_gas_limit.json +var BlockGasLimit []byte diff --git a/consensus/aura/empty_step.go b/consensus/aura/empty_step.go new file mode 100644 index 00000000000..c2f5173e432 --- /dev/null +++ b/consensus/aura/empty_step.go @@ -0,0 +1,135 @@ +package aura + +import ( + "bytes" + "sort" + "sync" + + "github.com/ledgerwatch/secp256k1" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/rlp" +) + +// A message broadcast by authorities when it's their turn to seal a block but there are no +// transactions. Other authorities accumulate these messages and later include them in the seal as +// proof. +// +// An empty step message is created _instead of_ a block if there are no pending transactions. +// It cannot itself be a parent, and `parent_hash` always points to the most recent block. E.g.: +// - Validator A creates block `bA`. +// - Validator B has no pending transactions, so it signs an empty step message `mB` +// instead whose hash points to block `bA`. +// - Validator C also has no pending transactions, so it also signs an empty step message `mC` +// instead whose hash points to block `bA`. +// - Validator D creates block `bD`. The parent is block `bA`, and the header includes `mB` and `mC`. +type EmptyStep struct { + // The signature of the other two fields, by the message's author. + signature []byte // H520 + // This message's step number. + step uint64 + // The hash of the most recent block. + parentHash libcommon.Hash // H256 +} + +func (s *EmptyStep) Less(other *EmptyStep) bool { + if s.step < other.step { + return true + } + if bytes.Compare(s.parentHash[:], other.parentHash[:]) < 0 { + return true + } + if bytes.Compare(s.signature, other.signature) < 0 { + return true + } + return false +} +func (s *EmptyStep) LessOrEqual(other *EmptyStep) bool { + if s.step <= other.step { + return true + } + if bytes.Compare(s.parentHash[:], other.parentHash[:]) <= 0 { + return true + } + if bytes.Compare(s.signature, other.signature) <= 0 { + return true + } + return false +} + +// Returns `true` if the message has a valid signature by the expected proposer in the message's step. +func (s *EmptyStep) verify(validators ValidatorSet) (bool, error) { //nolint + //sRlp, err := EmptyStepRlp(s.step, s.parentHash) + //if err != nil { + // return false, err + //} + //message := crypto.Keccak256(sRlp) + + /* + let correct_proposer = step_proposer(validators, &self.parent_hash, self.step); + + publickey::verify_address(&correct_proposer, &self.signature.into(), &message) + .map_err(|e| e.into()) + */ + return true, nil +} + +// nolint +func (s *EmptyStep) author() (libcommon.Address, error) { + sRlp, err := EmptyStepRlp(s.step, s.parentHash) + if err != nil { + return libcommon.Address{}, err + } + message := crypto.Keccak256(sRlp) + public, err := secp256k1.RecoverPubkey(message, s.signature) + if err != nil { + return libcommon.Address{}, err + } + ecdsa, err := crypto.UnmarshalPubkeyStd(public) + if err != nil { + return libcommon.Address{}, err + } + return crypto.PubkeyToAddress(*ecdsa), nil +} + +type EmptyStepSet struct { + lock sync.Mutex + list []*EmptyStep +} + +func (s *EmptyStepSet) Less(i, j int) bool { return s.list[i].Less(s.list[j]) } +func (s *EmptyStepSet) Swap(i, j int) { s.list[i], s.list[j] = s.list[j], s.list[i] } +func (s *EmptyStepSet) Len() int { return len(s.list) } + +func (s *EmptyStepSet) Sort() { + s.lock.Lock() + defer s.lock.Unlock() + sort.Stable(s) +} + +func (s *EmptyStepSet) ForEach(f func(int, *EmptyStep)) { + s.lock.Lock() + defer s.lock.Unlock() + for i, el := range s.list { + f(i, el) + } +} + +func EmptyStepFullRlp(signature []byte, emptyStepRlp []byte) ([]byte, error) { + type A struct { + s []byte + r []byte + } + + return rlp.EncodeToBytes(A{s: signature, r: emptyStepRlp}) +} + +func EmptyStepRlp(step uint64, parentHash libcommon.Hash) ([]byte, error) { + type A struct { + s uint64 + h libcommon.Hash + } + return rlp.EncodeToBytes(A{s: step, h: parentHash}) +} diff --git a/consensus/aura/epoch.go b/consensus/aura/epoch.go new file mode 100644 index 00000000000..c663fc171fb --- /dev/null +++ b/consensus/aura/epoch.go @@ -0,0 +1,47 @@ +package aura + +import ( + "context" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + + "github.com/ledgerwatch/erigon/core/rawdb" +) + +type NonTransactionalEpochReader struct { + db kv.RwDB +} + +func newEpochReader(db kv.RwDB) *NonTransactionalEpochReader { + return &NonTransactionalEpochReader{db: db} +} + +func (cr *NonTransactionalEpochReader) GetEpoch(hash libcommon.Hash, number uint64) (v []byte, err error) { + return v, cr.db.View(context.Background(), func(tx kv.Tx) error { + v, err = rawdb.ReadEpoch(tx, number, hash) + return err + }) +} +func (cr *NonTransactionalEpochReader) PutEpoch(hash libcommon.Hash, number uint64, proof []byte) error { + return cr.db.UpdateNosync(context.Background(), func(tx kv.RwTx) error { + return rawdb.WriteEpoch(tx, number, hash, proof) + }) +} +func (cr *NonTransactionalEpochReader) GetPendingEpoch(hash libcommon.Hash, number uint64) (v []byte, err error) { + return v, cr.db.View(context.Background(), func(tx kv.Tx) error { + v, err = rawdb.ReadPendingEpoch(tx, number, hash) + return err + }) +} +func (cr *NonTransactionalEpochReader) PutPendingEpoch(hash libcommon.Hash, number uint64, proof []byte) error { + return cr.db.UpdateNosync(context.Background(), func(tx kv.RwTx) error { + return rawdb.WritePendingEpoch(tx, number, hash, proof) + }) +} +func (cr *NonTransactionalEpochReader) FindBeforeOrEqualNumber(number uint64) (blockNum uint64, blockHash libcommon.Hash, transitionProof []byte, err error) { + return blockNum, blockHash, transitionProof, cr.db.View(context.Background(), func(tx kv.Tx) error { + blockNum, blockHash, transitionProof, err = rawdb.FindEpochBeforeOrEqualNumber(tx, number) + return err + }) +} diff --git a/consensus/aura/gaslimit_override.go b/consensus/aura/gaslimit_override.go new file mode 100644 index 00000000000..a2df8f11fd5 --- /dev/null +++ b/consensus/aura/gaslimit_override.go @@ -0,0 +1,76 @@ +package aura + +import ( + lru "github.com/hashicorp/golang-lru/v2" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/chain" + + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" +) + +type GasLimitOverride struct { + cache *lru.Cache[libcommon.Hash, *uint256.Int] +} + +func NewGasLimitOverride() *GasLimitOverride { + // The number of recent block hashes for which the gas limit override is memoized. + const GasLimitOverrideCacheCapacity = 10 + + cache, err := lru.New[libcommon.Hash, *uint256.Int](GasLimitOverrideCacheCapacity) + if err != nil { + panic("error creating prefetching cache for blocks") + } + return &GasLimitOverride{cache: cache} +} + +func (pb *GasLimitOverride) Pop(hash libcommon.Hash) *uint256.Int { + if val, ok := pb.cache.Get(hash); ok && val != nil { + pb.cache.Remove(hash) + return val + } + return nil +} + +func (pb *GasLimitOverride) Add(hash libcommon.Hash, b *uint256.Int) { + if b == nil { + return + } + pb.cache.ContainsOrAdd(hash, b) +} + +func (c *AuRa) HasGasLimitContract() bool { + return len(c.cfg.BlockGasLimitContractTransitions) != 0 +} + +func (c *AuRa) GetBlockGasLimitFromContract(_ *chain.Config, syscall consensus.SystemCall) uint64 { + // var blockLimitContract + addr, ok := c.cfg.BlockGasLimitContractTransitions[0] + if !ok { + return 0 + } + gasLimit := callBlockGasLimitAbi(addr, syscall) + return gasLimit.Uint64() +} + +func (c *AuRa) verifyGasLimitOverride(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, syscallCustom consensus.SysCallCustom) { + //IsPoSHeader check is necessary as merge.go calls Initialize on AuRa indiscriminately + gasLimitOverride := c.HasGasLimitContract() && !misc.IsPoSHeader(header) + if gasLimitOverride { + syscallPrevHeader := func(addr libcommon.Address, data []byte) ([]byte, error) { + return syscallCustom(addr, data, state, chain.GetHeaderByHash(header.ParentHash), true) + } + blockGasLimit := c.GetBlockGasLimitFromContract(config, syscallPrevHeader) + + if blockGasLimit > 0 { + if header.GasLimit != blockGasLimit { + panic("Block gas limit doesn't match BlockGasLimitContract with AuRa") + } + } + } +} diff --git a/consensus/aura/rolling_finality.go b/consensus/aura/rolling_finality.go new file mode 100644 index 00000000000..e5a3eac27ac --- /dev/null +++ b/consensus/aura/rolling_finality.go @@ -0,0 +1,172 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package aura + +import ( + "container/list" + "fmt" + + libcommon "github.com/ledgerwatch/erigon-lib/common" +) + +// RollingFinality checker for authority round consensus. +// Stores a chain of unfinalized hashes that can be pushed onto. +// nolint +type RollingFinality struct { + headers unAssembledHeaders //nolint + signers *SimpleList + signCount map[libcommon.Address]uint + lastPushed *libcommon.Hash // Option, +} + +// NewRollingFinality creates a blank finality checker under the given validator set. +func NewRollingFinality(signers []libcommon.Address) *RollingFinality { + return &RollingFinality{ + signers: NewSimpleList(signers), + headers: unAssembledHeaders{l: list.New()}, + signCount: map[libcommon.Address]uint{}, + } +} + +// Clears the finality status, but keeps the validator set. +func (f *RollingFinality) print(num uint64) { + if num > DEBUG_LOG_FROM { + h := f.headers + fmt.Printf("finality_heads: %d\n", num) + i := 0 + for e := h.l.Front(); e != nil; e = e.Next() { + i++ + a := e.Value.(*unAssembledHeader) + fmt.Printf("\t%d,%x\n", a.number, a.signers[0]) + } + if i == 0 { + fmt.Printf("\tempty\n") + } + } +} + +func (f *RollingFinality) clear() { + f.headers = unAssembledHeaders{l: list.New()} + f.signCount = map[libcommon.Address]uint{} + f.lastPushed = nil +} + +// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) +// +// Fails if `signer` isn't a member of the active validator set. +// Returns a list of all newly finalized headers. +func (f *RollingFinality) push(head libcommon.Hash, num uint64, signers []libcommon.Address) (newlyFinalized []unAssembledHeader, err error) { + for i := range signers { + if !f.hasSigner(signers[i]) { + return nil, fmt.Errorf("unknown validator") + } + } + + f.addSigners(signers) + f.headers.PushBack(&unAssembledHeader{hash: head, number: num, signers: signers}) + + for f.isFinalized() { + e := f.headers.Pop() + if e == nil { + panic("headers length always greater than sign count length") + } + f.removeSigners(e.signers) + newlyFinalized = append(newlyFinalized, *e) + } + f.lastPushed = &head + return newlyFinalized, nil +} + +// isFinalized returns whether the first entry in `self.headers` is finalized. +func (f *RollingFinality) isFinalized() bool { + e := f.headers.Front() + if e == nil { + return false + } + return len(f.signCount)*2 > len(f.signers.validators) +} +func (f *RollingFinality) hasSigner(signer libcommon.Address) bool { + for j := range f.signers.validators { + if f.signers.validators[j] == signer { + return true + + } + } + return false +} +func (f *RollingFinality) addSigners(signers []libcommon.Address) bool { + for i := range signers { + count, ok := f.signCount[signers[i]] + if ok { + f.signCount[signers[i]] = count + 1 + } else { + f.signCount[signers[i]] = 1 + } + } + return false +} +func (f *RollingFinality) removeSigners(signers []libcommon.Address) { + for i := range signers { + count, ok := f.signCount[signers[i]] + if !ok { + panic("all hashes in `header` should have entries in `sign_count` for their signers") + //continue + } + if count <= 1 { + delete(f.signCount, signers[i]) + } else { + f.signCount[signers[i]] = count - 1 + } + } +} +func (f *RollingFinality) buildAncestrySubChain(get func(hash libcommon.Hash) ([]libcommon.Address, libcommon.Hash, libcommon.Hash, uint64, bool), parentHash, epochTransitionHash libcommon.Hash) error { // starts from chainHeadParentHash + f.clear() + + for { + signers, blockHash, newParentHash, blockNum, ok := get(parentHash) + if !ok { + return nil + } + if blockHash == epochTransitionHash { + return nil + } + for i := range signers { + if !f.hasSigner(signers[i]) { + return fmt.Errorf("unknown validator: blockNum=%d", blockNum) + } + } + if f.lastPushed == nil { + copyHash := parentHash + f.lastPushed = ©Hash + } + f.addSigners(signers) + f.headers.PushFront(&unAssembledHeader{hash: blockHash, number: blockNum, signers: signers}) + // break when we've got our first finalized block. + if f.isFinalized() { + e := f.headers.Pop() + if e == nil { + panic("we just pushed a block") + } + f.removeSigners(e.signers) + //log.Info("[aura] finality encountered already finalized block", "hash", e.hash.String(), "number", e.number) + break + } + + parentHash = newParentHash + } + return nil +} diff --git a/consensus/aura/types.go b/consensus/aura/types.go new file mode 100644 index 00000000000..62ab6ea4bc5 --- /dev/null +++ b/consensus/aura/types.go @@ -0,0 +1,69 @@ +package aura + +import ( + "sync/atomic" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" +) + +type StepDurationInfo struct { + TransitionStep uint64 + TransitionTimestamp uint64 + StepDuration uint64 +} + +// EpochTransitionProof - Holds 2 proofs inside: ValidatorSetProof and FinalityProof +type EpochTransitionProof struct { + SignalNumber uint64 + SetProof []byte + FinalityProof []byte +} + +// ValidatorSetProof - validator set proof +type ValidatorSetProof struct { + Header *types.Header + Receipts types.Receipts +} + +// FirstValidatorSetProof state-dependent proofs for the safe contract: +// only "first" proofs are such. +type FirstValidatorSetProof struct { // TODO: whaaat? here is no state! + ContractAddress libcommon.Address + Header *types.Header +} + +type EpochTransition struct { + /// Block hash at which the transition occurred. + BlockHash libcommon.Hash + /// Block number at which the transition occurred. + BlockNumber uint64 + /// "transition/epoch" proof from the engine combined with a finality proof. + ProofRlp []byte +} + +type epochReader interface { + GetEpoch(blockHash libcommon.Hash, blockN uint64) (transitionProof []byte, err error) + GetPendingEpoch(blockHash libcommon.Hash, blockN uint64) (transitionProof []byte, err error) + FindBeforeOrEqualNumber(number uint64) (blockNum uint64, blockHash libcommon.Hash, transitionProof []byte, err error) +} +type epochWriter interface { + epochReader + PutEpoch(blockHash libcommon.Hash, blockN uint64, transitionProof []byte) (err error) + PutPendingEpoch(blockHash libcommon.Hash, blockN uint64, transitionProof []byte) (err error) +} + +type PermissionedStep struct { + inner *Step + canPropose atomic.Bool +} + +// An empty step message that is included in a seal, the only difference is that it doesn't include +// the `parent_hash` in order to save space. The included signature is of the original empty step +// message, which can be reconstructed by using the parent hash of the block in which this sealed +// empty message is included. +// nolint +type SealedEmptyStep struct { + signature []byte // H520 + step uint64 +} diff --git a/consensus/aura/unassemble.go b/consensus/aura/unassemble.go new file mode 100644 index 00000000000..170f075306b --- /dev/null +++ b/consensus/aura/unassemble.go @@ -0,0 +1,34 @@ +package aura + +import ( + "container/list" + libcommon "github.com/ledgerwatch/erigon-lib/common" +) + +// nolint +type unAssembledHeader struct { + hash libcommon.Hash + number uint64 + signers []libcommon.Address +} +type unAssembledHeaders struct { + l *list.List +} + +func (u unAssembledHeaders) PushBack(header *unAssembledHeader) { u.l.PushBack(header) } +func (u unAssembledHeaders) PushFront(header *unAssembledHeader) { u.l.PushFront(header) } +func (u unAssembledHeaders) Pop() *unAssembledHeader { + e := u.l.Front() + if e == nil { + return nil + } + u.l.Remove(e) + return e.Value.(*unAssembledHeader) +} +func (u unAssembledHeaders) Front() *unAssembledHeader { + e := u.l.Front() + if e == nil { + return nil + } + return e.Value.(*unAssembledHeader) +} diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 27e64d7494e..ac77aec419c 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -3,7 +3,6 @@ package bor import ( "bytes" "context" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -476,11 +475,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t return ErrInvalidTimestamp } - // Retrieve the snapshot needed to verify this header and cache it - snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) - if err != nil { - return err - } + sprintLength := c.config.CalculateSprint(number) // Verify the validator list match the local contract // @@ -490,7 +485,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t // contract data and span data won't match for that. Skip validating // for 0th span. TODO: Remove `number > zerothSpanEnd` check // once we start fetching validator data from contract. - if number > zerothSpanEnd && isSprintStart(number+1, c.config.CalculateSprint(number)) { + if number > zerothSpanEnd && isSprintStart(number+1, sprintLength) { producerSet, err := c.spanner.GetCurrentProducers(number+1, c.authorizedSigner.Load().signer, c.getSpanForBlock) if err != nil { @@ -515,9 +510,14 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t } } } + snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) + if err != nil { + return err + } // verify the validator list in the last sprint block - if isSprintStart(number, c.config.CalculateSprint(number)) { + if isSprintStart(number, sprintLength) { + // Retrieve the snapshot needed to verify this header and cache it parentValidatorBytes := parent.Extra[extraVanity : len(parent.Extra)-extraSeal] validatorsBytes := make([]byte, len(snap.ValidatorSet.Validators)*validatorHeaderBytesLength) @@ -534,7 +534,7 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t } // All basic checks passed, verify the seal and return - return c.verifySeal(chain, header, parents) + return c.verifySeal(chain, header, parents, snap) } // snapshot retrieves the authorization snapshot at a given point in time. @@ -661,14 +661,18 @@ func (c *Bor) VerifyUncles(_ consensus.ChainReader, _ *types.Header, uncles []*t // VerifySeal implements consensus.Engine, checking whether the signature contained // in the header satisfies the consensus protocol requirements. func (c *Bor) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { - return c.verifySeal(chain, header, nil) + snap, err := c.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil) + if err != nil { + return err + } + return c.verifySeal(chain, header, nil, snap) } // verifySeal checks whether the signature contained in the header satisfies the // consensus protocol requirements. The method accepts an optional list of parent // headers that aren't yet part of the local blockchain to generate the snapshots // from. -func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { +func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header, snap *Snapshot) error { // Verifying the genesis block is not supported number := header.Number.Uint64() if number == 0 { @@ -680,12 +684,6 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header return err } - // Retrieve the snapshot needed to verify this header and cache it - snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) - if err != nil { - return err - } - if !snap.ValidatorSet.HasAddress(signer) { // Check the UnauthorizedSignerError.Error() msg to see why we pass number-1 return &UnauthorizedSignerError{number - 1, signer.Bytes()} @@ -819,8 +817,7 @@ func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state. if c.HeimdallClient != nil { // commit states - _, err = c.CommitStates(state, header, cx, syscall) - if err != nil { + if err = c.CommitStates(state, header, cx, syscall); err != nil { c.logger.Error("Error while committing states", "err", err) return nil, types.Receipts{}, err } @@ -896,8 +893,7 @@ func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Heade if c.HeimdallClient != nil { // commit states - _, err = c.CommitStates(state, header, cx, syscall) - if err != nil { + if err = c.CommitStates(state, header, cx, syscall); err != nil { c.logger.Error("Error while committing states", "err", err) return nil, nil, types.Receipts{}, err } @@ -929,7 +925,7 @@ func (c *Bor) GenerateSeal(chain consensus.ChainHeaderReader, currnt, parent *ty } func (c *Bor) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SystemCall) { + state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) { } // Authorize injects a private key into the consensus engine to mint new blocks @@ -1106,9 +1102,10 @@ func (c *Bor) needToCommitSpan(currentSpan *span.Span, headerNumber uint64) bool if currentSpan.EndBlock == 0 { return true } + sprintLength := c.config.CalculateSprint(headerNumber) // if current block is first block of last sprint in current span - if currentSpan.EndBlock > c.config.CalculateSprint(headerNumber) && currentSpan.EndBlock-c.config.CalculateSprint(headerNumber)+1 == headerNumber { + if currentSpan.EndBlock > sprintLength && currentSpan.EndBlock-sprintLength+1 == headerNumber { return true } @@ -1195,7 +1192,7 @@ func (c *Bor) CommitStates( header *types.Header, chain statefull.ChainContext, syscall consensus.SystemCall, -) ([]*types.StateSyncData, error) { +) error { fetchStart := time.Now() number := header.Number.Uint64() @@ -1211,7 +1208,7 @@ func (c *Bor) CommitStates( // the incoming chain. lastStateIDBig, err = c.GenesisContractsClient.LastStateId(syscall) if err != nil { - return nil, err + return err } if c.config.IsIndore(number) { @@ -1224,7 +1221,7 @@ func (c *Bor) CommitStates( lastStateID := lastStateIDBig.Uint64() from = lastStateID + 1 - c.logger.Debug( + c.logger.Info( "Fetching state updates from Heimdall", "fromID", from, "to", to.Format(time.RFC3339), @@ -1232,7 +1229,7 @@ func (c *Bor) CommitStates( eventRecords, err := c.HeimdallClient.StateSyncEvents(c.execCtx, lastStateID+1, to.Unix()) if err != nil { - return nil, err + return err } if c.config.OverrideStateSyncRecords != nil { @@ -1244,7 +1241,6 @@ func (c *Bor) CommitStates( fetchTime := time.Since(fetchStart) processStart := time.Now() chainID := c.chainConfig.ChainID.String() - stateSyncs := make([]*types.StateSyncData, 0, len(eventRecords)) for _, eventRecord := range eventRecords { if eventRecord.ID <= lastStateID { @@ -1256,16 +1252,8 @@ func (c *Bor) CommitStates( break } - stateData := types.StateSyncData{ - ID: eventRecord.ID, - Contract: eventRecord.Contract, - Data: hex.EncodeToString(eventRecord.Data), - TxHash: eventRecord.TxHash, - } - stateSyncs = append(stateSyncs, &stateData) - if err := c.GenesisContractsClient.CommitState(eventRecord, syscall); err != nil { - return nil, err + return err } lastStateID++ @@ -1273,9 +1261,9 @@ func (c *Bor) CommitStates( processTime := time.Since(processStart) - c.logger.Debug("StateSyncData", "number", number, "lastStateID", lastStateID, "total records", len(eventRecords), "fetch time", int(fetchTime.Milliseconds()), "process time", int(processTime.Milliseconds())) + c.logger.Info("StateSyncData", "number", number, "lastStateID", lastStateID, "total records", len(eventRecords), "fetch time", int(fetchTime.Milliseconds()), "process time", int(processTime.Milliseconds())) - return stateSyncs, nil + return nil } func validateEventRecord(eventRecord *clerk.EventRecordWithTime, number uint64, to time.Time, lastStateID uint64, chainID string) error { diff --git a/consensus/bor/contract/client.go b/consensus/bor/contract/client.go index a8c482a9d1d..5e3cf3c49b8 100644 --- a/consensus/bor/contract/client.go +++ b/consensus/bor/contract/client.go @@ -74,7 +74,7 @@ func (gc *GenesisContractsClient) CommitState(event *clerk.EventRecordWithTime, return err } - gc.logger.Debug("→ committing new state", "eventRecord", event.String()) + gc.logger.Info("→ committing new state", "eventRecord", event.String()) _, err = syscall(libcommon.HexToAddress(gc.StateReceiverContract), data) return err diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 88515977752..7d705ea666c 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -366,7 +366,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header } func (c *Clique) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SystemCall) { + state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) { } func (c *Clique) CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall consensus.SystemCall, diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go index 43bb77bd5a5..793cdff7530 100644 --- a/consensus/clique/clique_test.go +++ b/consensus/clique/clique_test.go @@ -86,7 +86,7 @@ func TestReimportMirroredState(t *testing.T) { } block.AddTxWithChain(getHeader, engine, tx) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index a5c99a0e685..4b560c0b2aa 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -437,7 +437,7 @@ func TestClique(t *testing.T) { copy(nonce[:], clique.NonceAuthVote) gen.SetNonce(nonce) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } diff --git a/consensus/consensus.go b/consensus/consensus.go index 30aa5e12181..7512ae2b19d 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -65,6 +65,9 @@ type ChainReader interface { } type SystemCall func(contract libcommon.Address, data []byte) ([]byte, error) + +// Use more options to call contract +type SysCallCustom func(contract libcommon.Address, data []byte, ibs *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) type Call func(contract libcommon.Address, data []byte) ([]byte, error) // RewardKind - The kind of block reward. @@ -129,7 +132,7 @@ type EngineWriter interface { // Initialize runs any pre-transaction state modifications (e.g. epoch start) Initialize(config *chain.Config, chain ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall SystemCall) + state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall SysCallCustom) // Finalize runs any post-transaction state modifications (e.g. block rewards) // but does not assemble the block. diff --git a/consensus/db/db.go b/consensus/db/db.go deleted file mode 100644 index 118770100d7..00000000000 --- a/consensus/db/db.go +++ /dev/null @@ -1,21 +0,0 @@ -package db - -import ( - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/log/v3" -) - -func OpenDatabase(path string, inMem bool, readonly bool) kv.RwDB { - opts := mdbx.NewMDBX(log.Root()).Label(kv.ConsensusDB) - if readonly { - opts = opts.Readonly() - } - if inMem { - opts = opts.InMem("") - } else { - opts = opts.Path(path) - } - - return opts.MustOpen() -} diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index fd84537704b..4aaa258e27a 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -552,7 +552,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H } func (ethash *Ethash) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SystemCall) { + state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) { } // Finalize implements consensus.Engine, accumulating the block and uncle rewards, diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go index ac23d0007ce..d988c2d79a5 100644 --- a/consensus/merge/merge.go +++ b/consensus/merge/merge.go @@ -10,7 +10,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/aura" "github.com/ledgerwatch/erigon/consensus/misc" @@ -71,7 +70,7 @@ func (s *Merge) Type() chain.ConsensusName { // proof-of-stake verified author of the block. // This is thread-safe (only access the header.Coinbase or the underlying engine's thread-safe method) func (s *Merge) Author(header *types.Header) (libcommon.Address, error) { - if !IsPoSHeader(header) { + if !misc.IsPoSHeader(header) { return s.eth1Engine.Author(header) } return header.Coinbase, nil @@ -98,7 +97,7 @@ func (s *Merge) VerifyHeader(chain consensus.ChainHeaderReader, header *types.He // VerifyUncles implements consensus.Engine, always returning an error for any // uncles as this consensus mechanism doesn't permit uncles. func (s *Merge) VerifyUncles(chain consensus.ChainReader, header *types.Header, uncles []*types.Header) error { - if !IsPoSHeader(header) { + if !misc.IsPoSHeader(header) { return s.eth1Engine.VerifyUncles(chain, header, uncles) } if len(uncles) > 0 { @@ -124,7 +123,7 @@ func (s *Merge) Prepare(chain consensus.ChainHeaderReader, header *types.Header, func (s *Merge) CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall consensus.SystemCall, ) ([]consensus.Reward, error) { _, isAura := s.eth1Engine.(*aura.AuRa) - if !IsPoSHeader(header) || isAura { + if !misc.IsPoSHeader(header) || isAura { return s.eth1Engine.CalculateRewards(config, header, uncles, syscall) } return []consensus.Reward{}, nil @@ -134,7 +133,7 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainHeaderReader, syscall consensus.SystemCall, ) (types.Transactions, types.Receipts, error) { - if !IsPoSHeader(header) { + if !misc.IsPoSHeader(header) { return s.eth1Engine.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall) } @@ -166,17 +165,13 @@ func (s *Merge) FinalizeAndAssemble(config *chain.Config, header *types.Header, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainHeaderReader, syscall consensus.SystemCall, call consensus.Call, ) (*types.Block, types.Transactions, types.Receipts, error) { - if !IsPoSHeader(header) { + if !misc.IsPoSHeader(header) { return s.eth1Engine.FinalizeAndAssemble(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, call) } outTxs, outReceipts, err := s.Finalize(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall) if err != nil { return nil, nil, nil, err } - if config.IsCancun(header.Time) { - dataGasUsed := uint64(misc.CountBlobs(txs)) * params.DataGasPerBlob - header.DataGasUsed = &dataGasUsed - } return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals), outTxs, outReceipts, nil } @@ -233,11 +228,7 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent * return errInvalidUncleHash } - skipGasLimit := false - if auraEngine, ok := s.eth1Engine.(*aura.AuRa); ok { - skipGasLimit = auraEngine.HasGasLimitContract() - } - if err := misc.VerifyEip1559Header(chain.Config(), parent, header, skipGasLimit); err != nil { + if err := misc.VerifyEip1559Header(chain.Config(), parent, header, false); err != nil { return err } @@ -265,7 +256,7 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent * } func (s *Merge) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { - if !IsPoSHeader(block.Header()) { + if !misc.IsPoSHeader(block.Header()) { return s.eth1Engine.Seal(chain, block, results, stop) } return nil @@ -279,7 +270,7 @@ func (s *Merge) IsServiceTransaction(sender libcommon.Address, syscall consensus return s.eth1Engine.IsServiceTransaction(sender, syscall) } -func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SystemCall) { +func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) { s.eth1Engine.Initialize(config, chain, header, state, txs, uncles, syscall) } @@ -291,16 +282,6 @@ func (s *Merge) Close() error { return s.eth1Engine.Close() } -// IsPoSHeader reports the header belongs to the PoS-stage with some special fields. -// This function is not suitable for a part of APIs like Prepare or CalcDifficulty -// because the header difficulty is not set yet. -func IsPoSHeader(header *types.Header) bool { - if header.Difficulty == nil { - panic("IsPoSHeader called with invalid difficulty") - } - return header.Difficulty.Cmp(ProofOfStakeDifficulty) == 0 -} - // IsTTDReached checks if the TotalTerminalDifficulty has been surpassed on the `parentHash` block. // It depends on the parentHash already being stored in the database. // If the total difficulty is not stored in the database a ErrUnknownAncestorTD error is returned. diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go index 7dc6042b160..f41e30cc5ec 100644 --- a/consensus/misc/eip4844.go +++ b/consensus/misc/eip4844.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon/core/types" @@ -36,10 +37,10 @@ func CalcExcessDataGas(parent *types.Header) uint64 { dataGasUsed = *parent.DataGasUsed } - if excessDataGas+dataGasUsed < params.TargetDataGasPerBlock { + if excessDataGas+dataGasUsed < chain.TargetDataGasPerBlock { return 0 } - return excessDataGas + dataGasUsed - params.TargetDataGasPerBlock + return excessDataGas + dataGasUsed - chain.TargetDataGasPerBlock } // FakeExponential approximates factor * e ** (num / denom) using a taylor expansion @@ -70,15 +71,6 @@ func FakeExponential(factor, denom *uint256.Int, excessDataGas uint64) (*uint256 return output.Div(output, denom), nil } -// CountBlobs returns the number of blob transactions in txs -func CountBlobs(txs []types.Transaction) int { - var count int - for _, tx := range txs { - count += len(tx.GetDataHashes()) - } - return count -} - // VerifyEip4844Header verifies that the header is not malformed func VerifyEip4844Header(config *chain.Config, parent, header *types.Header) error { if header.DataGasUsed == nil { @@ -96,5 +88,5 @@ func GetDataGasPrice(excessDataGas uint64) (*uint256.Int, error) { } func GetDataGasUsed(numBlobs int) uint64 { - return uint64(numBlobs) * params.DataGasPerBlob + return uint64(numBlobs) * chain.DataGasPerBlob } diff --git a/consensus/misc/pos.go b/consensus/misc/pos.go new file mode 100644 index 00000000000..ce6bdc5585f --- /dev/null +++ b/consensus/misc/pos.go @@ -0,0 +1,22 @@ +package misc + +import ( + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/types" +) + +// Constants for The Merge as specified by EIP-3675: Upgrade consensus to Proof-of-Stake +var ( + ProofOfStakeDifficulty = libcommon.Big0 // PoS block's difficulty is always 0 + ProofOfStakeNonce = types.BlockNonce{} // PoS block's have all-zero nonces +) + +// IsPoSHeader reports the header belongs to the PoS-stage with some special fields. +// This function is not suitable for a part of APIs like Prepare or CalcDifficulty +// because the header difficulty is not set yet. +func IsPoSHeader(header *types.Header) bool { + if header.Difficulty == nil { + panic("IsPoSHeader called with invalid difficulty") + } + return header.Difficulty.Cmp(ProofOfStakeDifficulty) == 0 +} diff --git a/core/block_validator_test.go b/core/block_validator_test.go index c82625b3dac..a175b496b94 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -38,7 +38,7 @@ func TestHeaderVerification(t *testing.T) { ) m := stages.MockWithGenesisEngine(t, gspec, engine, false) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 8, nil, false /* intermediateHashes */) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 8, nil) if err != nil { t.Fatalf("genetate chain: %v", err) } @@ -78,7 +78,7 @@ func TestHeaderWithSealVerification(t *testing.T) { ) m := stages.MockWithGenesisEngine(t, gspec, engine, false) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 8, nil, false /* intermediateHashes */) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 8, nil) if err != nil { t.Fatalf("genetate chain: %v", err) } diff --git a/core/blockchain.go b/core/blockchain.go index bb3c8c72be1..d165f7e0ce8 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -36,7 +36,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" - "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" ) @@ -86,8 +85,9 @@ func ExecuteBlockEphemerally( header := block.Header() usedGas := new(uint64) + usedDataGas := new(uint64) gp := new(GasPool) - gp.AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) + gp.AddGas(block.GasLimit()).AddDataGas(chain.MaxDataGasPerBlock) var ( rejectedTxs []*RejectedTx @@ -117,7 +117,7 @@ func ExecuteBlockEphemerally( vmConfig.Tracer = tracer writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, usedDataGas, *vmConfig) if writeTrace { if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { ftracer.Flush(tx) @@ -147,6 +147,10 @@ func ExecuteBlockEphemerally( return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } + if header.DataGasUsed != nil && *usedDataGas != *header.DataGasUsed { + return nil, fmt.Errorf("data gas used by execution: %d, in header: %d", *usedDataGas, *header.DataGasUsed) + } + var bloom types.Bloom if !vmConfig.NoReceipts { bloom = types.CreateBloom(receipts) @@ -172,130 +176,26 @@ func ExecuteBlockEphemerally( Rejected: rejectedTxs, } - return execRs, nil -} - -// ExecuteBlockEphemerallyBor runs a block from provided stateReader and -// writes the result to the provided stateWriter -func ExecuteBlockEphemerallyBor( - chainConfig *chain.Config, vmConfig *vm.Config, - blockHashFunc func(n uint64) libcommon.Hash, - engine consensus.Engine, block *types.Block, - stateReader state.StateReader, stateWriter state.WriterWithChangeSets, - chainReader consensus.ChainHeaderReader, getTracer func(txIndex int, txHash libcommon.Hash) (vm.EVMLogger, error), -) (*EphemeralExecResult, error) { - - defer BlockExecutionTimer.UpdateDuration(time.Now()) - block.Uncles() - ibs := state.New(stateReader) - header := block.Header() - - usedGas := new(uint64) - gp := new(GasPool) - gp.AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) - - var ( - rejectedTxs []*RejectedTx - includedTxs types.Transactions - receipts types.Receipts - ) - - if !vmConfig.ReadOnly { - if err := InitializeBlockExecution(engine, chainReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { - return nil, err + if chainConfig.Bor != nil { + var logs []*types.Log + for _, receipt := range receipts { + logs = append(logs, receipt.Logs...) } - } - if chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(ibs) - } - noop := state.NewNoopWriter() - //fmt.Printf("====txs processing start: %d====\n", block.NumberU64()) - for i, tx := range block.Transactions() { - ibs.SetTxContext(tx.Hash(), block.Hash(), i) - writeTrace := false - if vmConfig.Debug && vmConfig.Tracer == nil { - tracer, err := getTracer(i, tx.Hash()) - if err != nil { - return nil, fmt.Errorf("could not obtain tracer: %w", err) - } - vmConfig.Tracer = tracer - writeTrace = true - } + stateSyncReceipt := &types.Receipt{} + if chainConfig.Consensus == chain.BorConsensus && len(blockLogs) > 0 { + slices.SortStableFunc(blockLogs, func(i, j *types.Log) bool { return i.Index < j.Index }) - receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig) - if writeTrace { - if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { - ftracer.Flush(tx) - } + if len(blockLogs) > len(logs) { + stateSyncReceipt.Logs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()` - vmConfig.Tracer = nil - } - if err != nil { - if !vmConfig.StatelessExec { - return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } - rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) - } else { - includedTxs = append(includedTxs, tx) - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) + // fill the state sync with the correct information + types.DeriveFieldsForBorReceipt(stateSyncReceipt, block.Hash(), block.NumberU64(), receipts) + stateSyncReceipt.Status = types.ReceiptStatusSuccessful } } - } - - receiptSha := types.DeriveSha(receipts) - if !vmConfig.StatelessExec && chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts && receiptSha != block.ReceiptHash() { - return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), receiptSha.Hex(), block.ReceiptHash().Hex()) - } - - if !vmConfig.StatelessExec && *usedGas != header.GasUsed { - return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) - } - - var bloom types.Bloom - if !vmConfig.NoReceipts { - bloom = types.CreateBloom(receipts) - if !vmConfig.StatelessExec && bloom != header.Bloom { - return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) - } - } - if !vmConfig.ReadOnly { - txs := block.Transactions() - if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), chainReader, false); err != nil { - return nil, err - } - } - - var logs []*types.Log - for _, receipt := range receipts { - logs = append(logs, receipt.Logs...) - } - - blockLogs := ibs.Logs() - stateSyncReceipt := &types.Receipt{} - if chainConfig.Consensus == chain.BorConsensus && len(blockLogs) > 0 { - slices.SortStableFunc(blockLogs, func(i, j *types.Log) bool { return i.Index < j.Index }) - - if len(blockLogs) > len(logs) { - stateSyncReceipt.Logs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()` - - // fill the state sync with the correct information - types.DeriveFieldsForBorReceipt(stateSyncReceipt, block.Hash(), block.NumberU64(), receipts) - stateSyncReceipt.Status = types.ReceiptStatusSuccessful - } - } - execRs := &EphemeralExecResult{ - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: receiptSha, - Bloom: bloom, - LogsHash: rlpHash(blockLogs), - Receipts: receipts, - Difficulty: (*math.HexOrDecimal256)(header.Difficulty), - GasUsed: math.HexOrDecimal64(*usedGas), - Rejected: rejectedTxs, - StateSyncReceipt: stateSyncReceipt, + execRs.StateSyncReceipt = stateSyncReceipt } return execRs, nil @@ -387,6 +287,7 @@ func CallContract(contract libcommon.Address, data []byte, chainConfig chain.Con gp := new(GasPool) gp.AddGas(50_000_000) var gasUsed uint64 + var gasDataUsed uint64 if chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { misc.ApplyDAOHardFork(ibs) } @@ -396,7 +297,7 @@ func CallContract(contract libcommon.Address, data []byte, chainConfig chain.Con return nil, fmt.Errorf("SysCallContract: %w ", err) } vmConfig := vm.Config{NoReceipts: true} - _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig) + _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, &gasDataUsed, vmConfig) if err != nil { return result, fmt.Errorf("SysCallContract: %w ", err) } @@ -442,8 +343,8 @@ func FinalizeBlockExecution( } func InitializeBlockExecution(engine consensus.Engine, chain consensus.ChainHeaderReader, header *types.Header, txs types.Transactions, uncles []*types.Header, cc *chain.Config, ibs *state.IntraBlockState) error { - engine.Initialize(cc, chain, header, ibs, txs, uncles, func(contract libcommon.Address, data []byte) ([]byte, error) { - return SysCallContract(contract, data, cc, ibs, header, engine, false /* constCall */) + engine.Initialize(cc, chain, header, ibs, txs, uncles, func(contract libcommon.Address, data []byte, ibState *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { + return SysCallContract(contract, data, cc, ibState, header, engine, constCall) }) noop := state.NewNoopWriter() ibs.FinalizeTx(cc.Rules(header.Number.Uint64(), header.Time), noop) diff --git a/core/chain_makers.go b/core/chain_makers.go index ec257eb198b..45ce0ea7532 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -18,6 +18,7 @@ package core import ( "context" + "encoding/binary" "fmt" "math/big" @@ -37,7 +38,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/trie" ) @@ -58,6 +58,8 @@ type BlockGen struct { config *chain.Config engine consensus.Engine + + beforeAddTx func() } // SetCoinbase sets the coinbase of the generated block. @@ -114,11 +116,14 @@ func (b *BlockGen) AddFailedTx(tx types.Transaction) { // added. If contract code relies on the BLOCKHASH instruction, // the block in chain will be returned. func (b *BlockGen) AddTxWithChain(getHeader func(hash libcommon.Hash, number uint64) *types.Header, engine consensus.Engine, tx types.Transaction) { + if b.beforeAddTx != nil { + b.beforeAddTx() + } if b.gasPool == nil { b.SetCoinbase(libcommon.Address{}) } b.ibs.SetTxContext(tx.Hash(), libcommon.Hash{}, len(b.txs)) - receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, b.header.DataGasUsed, vm.Config{}) if err != nil { panic(err) } @@ -131,7 +136,7 @@ func (b *BlockGen) AddFailedTxWithChain(getHeader func(hash libcommon.Hash, numb b.SetCoinbase(libcommon.Address{}) } b.ibs.SetTxContext(tx.Hash(), libcommon.Hash{}, len(b.txs)) - receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, b.header.DataGasUsed, vm.Config{}) _ = err // accept failed transactions b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) @@ -297,9 +302,7 @@ func (cp *ChainPack) NumberOfPoWBlocks() int { // Blocks created by GenerateChain do not contain valid proof of work // values. Inserting them into BlockChain requires use of FakePow or // a similar non-validating proof of work implementation. -func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.Engine, db kv.RwDB, n int, gen func(int, *BlockGen), - intermediateHashes bool, -) (*ChainPack, error) { +func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.Engine, db kv.RwDB, n int, gen func(int, *BlockGen)) (*ChainPack, error) { if config == nil { config = params.TestChainConfig } @@ -310,12 +313,53 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E return nil, errBegin } defer tx.Rollback() - logger := log.New("generate-chain", config.ChainName) + var stateReader state.StateReader + var stateWriter state.StateWriter + if ethconfig.EnableHistoryV4InTest { + panic("implement me") + //agg := tx.(*temporal.Tx).Agg() + //sd := agg.SharedDomains() + //defer agg.StartUnbufferedWrites().FinishWrites() + //agg.SetTx(tx) + //stateWriter, stateReader = state.WrapStateIO(sd) + //sd.SetTx(tx) + //defer agg.CloseSharedDomains() + //oldTxNum := agg.GetTxNum() + //defer func() { + // agg.SetTxNum(oldTxNum) + //}() + } + txNum := -1 + setBlockNum := func(blockNum uint64) { + if ethconfig.EnableHistoryV4InTest { + panic("implement me") + //stateReader.(*state.StateReaderV4).SetBlockNum(blockNum) + //stateWriter.(*state.StateWriterV4).SetBlockNum(blockNum) + } else { + stateReader = state.NewPlainStateReader(tx) + stateWriter = state.NewPlainStateWriter(tx, nil, parent.NumberU64()+blockNum+1) + } + } + txNumIncrement := func() { + txNum++ + if ethconfig.EnableHistoryV4InTest { + panic("implement me") + //tx.(*temporal.Tx).Agg().SetTxNum(uint64(txNum)) + //stateReader.(*state.StateReaderV4).SetTxNum(uint64(txNum)) + //stateWriter.(*state.StateWriterV4).SetTxNum(uint64(txNum)) + } + } genblock := func(i int, parent *types.Block, ibs *state.IntraBlockState, stateReader state.StateReader, stateWriter state.StateWriter) (*types.Block, types.Receipts, error) { - b := &BlockGen{i: i, chain: blocks, parent: parent, ibs: ibs, stateReader: stateReader, config: config, engine: engine, txs: make([]types.Transaction, 0, 1), receipts: make([]*types.Receipt, 0, 1), uncles: make([]*types.Header, 0, 1)} + txNumIncrement() + + b := &BlockGen{i: i, chain: blocks, parent: parent, ibs: ibs, stateReader: stateReader, config: config, engine: engine, txs: make([]types.Transaction, 0, 1), receipts: make([]*types.Receipt, 0, 1), uncles: make([]*types.Header, 0, 1), + beforeAddTx: func() { + txNumIncrement() + }, + } b.header = makeHeader(chainreader, parent, ibs, b.engine) // Mutate the state and block according to any hard-fork specs if daoBlock := config.DAOForkBlock; daoBlock != nil { @@ -332,6 +376,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E if gen != nil { gen(i, b) } + txNumIncrement() if b.engine != nil { // Finalize and seal the block if _, _, _, err := b.engine.FinalizeAndAssemble(config, b.header, ibs, b.txs, b.uncles, b.receipts, nil, nil, nil, nil); err != nil { @@ -343,7 +388,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E } var err error - b.header.Root, err = hashRoot(tx, b.header) + b.header.Root, err = CalcHashRootForTests(tx, b.header, ethconfig.EnableHistoryV4InTest) if err != nil { return nil, nil, fmt.Errorf("call to CalcTrieRoot: %w", err) } @@ -354,15 +399,8 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E return nil, nil, fmt.Errorf("no engine to generate blocks") } - var txNum uint64 for i := 0; i < n; i++ { - stateReader := rpchelper.NewLatestStateReader(tx) - var stateWriter state.StateWriter - if ethconfig.EnableHistoryV4InTest { - panic("implement me on v4") - } else { - stateWriter = state.NewPlainStateWriter(tx, nil, parent.NumberU64()+uint64(i)+1) - } + setBlockNum(uint64(i)) ibs := state.New(stateReader) block, receipt, err := genblock(i, parent, ibs, stateReader, stateWriter) if err != nil { @@ -372,8 +410,6 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E blocks[i] = block receipts[i] = receipt parent = block - //TODO: genblock must call agg.SetTxNum after each txNum??? - txNum += uint64(block.Transactions().Len() + 2) //2 system txsr } tx.Rollback() @@ -381,14 +417,36 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E return &ChainPack{Headers: headers, Blocks: blocks, Receipts: receipts, TopBlock: blocks[n-1]}, nil } -func hashRoot(tx kv.RwTx, header *types.Header) (hashRoot libcommon.Hash, err error) { - if ethconfig.EnableHistoryV4InTest { - if GenerateTrace { - panic("implement me on v4") - } - panic("implement me on v4") +func hashKeyAndAddIncarnation(k []byte, h *common.Hasher) (newK []byte, err error) { + if len(k) == length.Addr { + newK = make([]byte, length.Hash) + } else { + newK = make([]byte, length.Hash*2+length.Incarnation) + } + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[:length.Addr]) + //nolint:errcheck + h.Sha.Read(newK[:length.Hash]) + if len(k) == length.Addr+length.Incarnation+length.Hash { // PlainState storage + copy(newK[length.Hash:], k[length.Addr:length.Addr+length.Incarnation]) + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[length.Addr+length.Incarnation:]) + //nolint:errcheck + h.Sha.Read(newK[length.Hash+length.Incarnation:]) + } else if len(k) == length.Addr+length.Hash { // e4 Domain storage + binary.BigEndian.PutUint64(newK[length.Hash:], 1) + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[len(k)-length.Hash:]) + //nolint:errcheck + h.Sha.Read(newK[length.Hash+length.Incarnation:]) } + return newK, nil +} +func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4 bool) (hashRoot libcommon.Hash, err error) { if err := tx.ClearBucket(kv.HashedAccounts); err != nil { return hashRoot, fmt.Errorf("clear HashedAccounts bucket: %w", err) } @@ -401,6 +459,65 @@ func hashRoot(tx kv.RwTx, header *types.Header) (hashRoot libcommon.Hash, err er if err := tx.ClearBucket(kv.TrieOfStorage); err != nil { return hashRoot, fmt.Errorf("clear TrieOfStorage bucket: %w", err) } + + if histV4 { + if GenerateTrace { + panic("implement me") + } + panic("implement me") + //h := common.NewHasher() + //defer common.ReturnHasherToPool(h) + + //agg := tx.(*temporal.Tx).Agg() + //agg.SetTx(tx) + //it, err := tx.(*temporal.Tx).AggCtx().DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) + //if err != nil { + // return libcommon.Hash{}, err + //} + // + //for it.HasNext() { + // k, v, err := it.Next() + // if err != nil { + // return hashRoot, fmt.Errorf("interate over plain state: %w", err) + // } + // if len(v) > 0 { + // v, err = accounts.ConvertV3toV2(v) + // if err != nil { + // return hashRoot, fmt.Errorf("interate over plain state: %w", err) + // } + // } + // newK, err := hashKeyAndAddIncarnation(k, h) + // if err != nil { + // return hashRoot, fmt.Errorf("clear HashedAccounts bucket: %w", err) + // } + // if err := tx.Put(kv.HashedAccounts, newK, v); err != nil { + // return hashRoot, fmt.Errorf("clear HashedAccounts bucket: %w", err) + // } + //} + // + //it, err = tx.(*temporal.Tx).AggCtx().DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) + //if err != nil { + // return libcommon.Hash{}, err + //} + //for it.HasNext() { + // k, v, err := it.Next() + // if err != nil { + // return hashRoot, fmt.Errorf("interate over plain state: %w", err) + // } + // newK, err := hashKeyAndAddIncarnation(k, h) + // if err != nil { + // return hashRoot, fmt.Errorf("clear HashedStorage bucket: %w", err) + // } + // if err := tx.Put(kv.HashedStorage, newK, v); err != nil { + // return hashRoot, fmt.Errorf("clear HashedStorage bucket: %w", err) + // } + // + //} + // + //root, err := trie.CalcRoot("GenerateChain", tx) + //return root, err + } + c, err := tx.Cursor(kv.PlainState) if err != nil { return hashRoot, err @@ -411,24 +528,11 @@ func hashRoot(tx kv.RwTx, header *types.Header) (hashRoot libcommon.Hash, err er if err != nil { return hashRoot, fmt.Errorf("interate over plain state: %w", err) } - var newK []byte - if len(k) == length.Addr { - newK = make([]byte, length.Hash) - } else { - newK = make([]byte, length.Hash*2+length.Incarnation) + newK, err := hashKeyAndAddIncarnation(k, h) + if err != nil { + return hashRoot, fmt.Errorf("insert hashed key: %w", err) } - h.Sha.Reset() - //nolint:errcheck - h.Sha.Write(k[:length.Addr]) - //nolint:errcheck - h.Sha.Read(newK[:length.Hash]) if len(k) > length.Addr { - copy(newK[length.Hash:], k[length.Addr:length.Addr+length.Incarnation]) - h.Sha.Reset() - //nolint:errcheck - h.Sha.Write(k[length.Addr+length.Incarnation:]) - //nolint:errcheck - h.Sha.Read(newK[length.Hash+length.Incarnation:]) if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil { return hashRoot, fmt.Errorf("insert hashed key: %w", err) } @@ -440,6 +544,7 @@ func hashRoot(tx kv.RwTx, header *types.Header) (hashRoot libcommon.Hash, err er } c.Close() + if GenerateTrace { fmt.Printf("State after %d================\n", header.Number) it, err := tx.Range(kv.HashedAccounts, nil, nil) @@ -500,6 +605,7 @@ func MakeEmptyHeader(parent *types.Header, chainConfig *chain.Config, timestamp if chainConfig.IsCancun(header.Time) { excessDataGas := misc.CalcExcessDataGas(parent) header.ExcessDataGas = &excessDataGas + header.DataGasUsed = new(uint64) } return header diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index f5b7b9c488c..d8894d81661 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -75,7 +75,7 @@ func TestCreation(t *testing.T) { {15049999, 1656586434, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block {15050000, 1656586444, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block {17034869, 1681338443, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last pre-Shanghai block - {17034870, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block + {17034870, 1681338479, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block {19000000, 1700000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block (mock) }, }, @@ -114,20 +114,22 @@ func TestCreation(t *testing.T) { params.GnosisChainConfig, params.GnosisGenesisHash, []testcase{ - {0, 0, ID{Hash: checksumToBytes(0xf64909b1), Next: 1604400}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium - {1604399, 0, ID{Hash: checksumToBytes(0xf64909b1), Next: 1604400}}, // Last Byzantium block - {1604400, 0, ID{Hash: checksumToBytes(0xfde2d083), Next: 2508800}}, // First Constantinople block - {2508799, 0, ID{Hash: checksumToBytes(0xfde2d083), Next: 2508800}}, // Last Constantinople block - {2508800, 0, ID{Hash: checksumToBytes(0xfc1d8f2f), Next: 7298030}}, // First Petersburg block - {7298029, 0, ID{Hash: checksumToBytes(0xfc1d8f2f), Next: 7298030}}, // Last Petersburg block - {7298030, 0, ID{Hash: checksumToBytes(0x54d05e6c), Next: 9186425}}, // First Istanbul block - {9186424, 0, ID{Hash: checksumToBytes(0x54d05e6c), Next: 9186425}}, // Last Istanbul block - {9186425, 0, ID{Hash: checksumToBytes(0xb6e6cd81), Next: 16101500}}, // First POSDAO Activation block - {16101499, 0, ID{Hash: checksumToBytes(0xb6e6cd81), Next: 16101500}}, // Last POSDAO Activation block - {16101500, 0, ID{Hash: checksumToBytes(0x069a83d9), Next: 19040000}}, // First Berlin block - {19039999, 0, ID{Hash: checksumToBytes(0x069a83d9), Next: 19040000}}, // Last Berlin block - {19040000, 0, ID{Hash: checksumToBytes(0x018479d3), Next: 0}}, // First London block - {21735000, 0, ID{Hash: checksumToBytes(0x018479d3), Next: 0}}, // First GIP-31 block + {0, 0, ID{Hash: checksumToBytes(0xf64909b1), Next: 1604400}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium + {1604399, 1547205885, ID{Hash: checksumToBytes(0xf64909b1), Next: 1604400}}, // Last Byzantium block + {1604400, 1547205890, ID{Hash: checksumToBytes(0xfde2d083), Next: 2508800}}, // First Constantinople block + {2508799, 1551879340, ID{Hash: checksumToBytes(0xfde2d083), Next: 2508800}}, // Last Constantinople block + {2508800, 1551879345, ID{Hash: checksumToBytes(0xfc1d8f2f), Next: 7298030}}, // First Petersburg block + {7298029, 1576134775, ID{Hash: checksumToBytes(0xfc1d8f2f), Next: 7298030}}, // Last Petersburg block + {7298030, 1576134780, ID{Hash: checksumToBytes(0x54d05e6c), Next: 9186425}}, // First Istanbul block + {9186424, 1585729685, ID{Hash: checksumToBytes(0x54d05e6c), Next: 9186425}}, // Last Istanbul block + {9186425, 1585729690, ID{Hash: checksumToBytes(0xb6e6cd81), Next: 16101500}}, // First POSDAO Activation block + {16101499, 1621258420, ID{Hash: checksumToBytes(0xb6e6cd81), Next: 16101500}}, // Last POSDAO Activation block + {16101500, 1621258425, ID{Hash: checksumToBytes(0x069a83d9), Next: 19040000}}, // First Berlin block + {19039999, 1636753575, ID{Hash: checksumToBytes(0x069a83d9), Next: 19040000}}, // Last Berlin block + {19040000, 1636753580, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // First London block + {21735000, 1650443255, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // First GIP-31 block + {29272666, 1690889655, ID{Hash: checksumToBytes(0x018479d3), Next: 1690889660}}, // Last pre-Shanghai block (approx) + {29272667, 1690889660, ID{Hash: checksumToBytes(0x2efe91ba), Next: 0}}, // First Shanghai block (approx) }, }, // Chiado test cases @@ -136,8 +138,8 @@ func TestCreation(t *testing.T) { params.ChiadoGenesisHash, []testcase{ {0, 0, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, - {4101957, 1684934215, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, // Last pre-Shanghai block (approx) - {4101958, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 0}}, // First Shanghai block (approx) + {4100418, 1684934215, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, // Last pre-Shanghai block + {4100419, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 0}}, // First Shanghai block }, }, } diff --git a/core/rlp_test.go b/core/rlp_test.go index f73ec1d9e43..8ef1c13ba83 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -53,20 +53,19 @@ func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir ) // We need to generate as many blocks +1 as uncles - chain, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1, - func(n int, b *BlockGen) { - if n == uncles { - // Add transactions and stuff on the last block - for i := 0; i < transactions; i++ { - tx, _ := types.SignTx(types.NewTransaction(uint64(i), aa, - u256.Num0, 50000, u256.Num1, make([]byte, dataSize)), *types.LatestSignerForChainID(nil), key) - b.AddTx(tx) - } - for i := 0; i < uncles; i++ { - b.AddUncle(&types.Header{ParentHash: b.PrevBlock(n - 1 - i).Hash(), Number: big.NewInt(int64(n - i))}) - } + chain, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1, func(n int, b *BlockGen) { + if n == uncles { + // Add transactions and stuff on the last block + for i := 0; i < transactions; i++ { + tx, _ := types.SignTx(types.NewTransaction(uint64(i), aa, + u256.Num0, 50000, u256.Num1, make([]byte, dataSize)), *types.LatestSignerForChainID(nil), key) + b.AddTx(tx) } - }, false /* intermediateHashes */) + for i := 0; i < uncles; i++ { + b.AddUncle(&types.Header{ParentHash: b.PrevBlock(n - 1 - i).Hash(), Number: big.NewInt(int64(n - i))}) + } + } + }) block := chain.TopBlock return block } diff --git a/core/state/contracts/poly.sol b/core/state/contracts/poly.sol index d0759e8edf2..f4a65244443 100644 --- a/core/state/contracts/poly.sol +++ b/core/state/contracts/poly.sol @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: LGPL-3.0 pragma solidity >=0.5.0; // solc --allow-paths ., --abi --bin --overwrite --optimize -o core/state/contracts/build core/state/contracts/poly.sol diff --git a/core/state/database_test.go b/core/state/database_test.go index c90f4075bd2..cd21c938fd4 100644 --- a/core/state/database_test.go +++ b/core/state/database_test.go @@ -125,7 +125,7 @@ func TestCreate2Revive(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -330,7 +330,7 @@ func TestCreate2Polymorth(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -493,7 +493,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -516,7 +516,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { block.AddTx(tx) } contractBackendLonger.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate long blocks") } @@ -635,7 +635,7 @@ func TestReorgOverStateChange(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -657,7 +657,7 @@ func TestReorgOverStateChange(t *testing.T) { block.AddTx(tx) } contractBackendLonger.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate longer blocks: %v", err) } @@ -784,7 +784,7 @@ func TestCreateOnExistingStorage(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -918,7 +918,7 @@ func TestEip2200Gas(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1006,7 +1006,7 @@ func TestWrongIncarnation(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1124,7 +1124,7 @@ func TestWrongIncarnation2(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1154,7 +1154,7 @@ func TestWrongIncarnation2(t *testing.T) { block.AddTx(tx) } contractBackendLonger.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate longer blocks: %v", err) } @@ -1408,7 +1408,7 @@ func TestRecreateAndRewind(t *testing.T) { block.AddTx(tx) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err1 != nil { t.Fatalf("generate blocks: %v", err1) } @@ -1466,7 +1466,7 @@ func TestRecreateAndRewind(t *testing.T) { block.AddTx(tx) } contractBackendLonger.Commit() - }, false /* intermediateHashes */) + }) if err1 != nil { t.Fatalf("generate longer blocks: %v", err1) } @@ -1563,12 +1563,12 @@ func TestTxLookupUnwind(t *testing.T) { } block.AddTx(tx) } - }, false) + }) if err != nil { t.Fatal(err) } chain2, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, block *core.BlockGen) { - }, false) + }) if err != nil { t.Fatal(err) } diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index a3e51223ed9..756501a8afe 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -19,13 +19,14 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/log/v3" + btree2 "github.com/tidwall/btree" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/state/temporal" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/turbo/shards" - "github.com/ledgerwatch/log/v3" - btree2 "github.com/tidwall/btree" ) const CodeSizeTable = "CodeSize" @@ -601,7 +602,15 @@ func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, ag } stateChanges := etl.NewCollector("", "", etl.NewOldestEntryBuffer(etl.BufferOptimalSize), rs.logger) defer stateChanges.Close() - actx := tx.(*temporal.Tx).AggCtx() + + var actx *libstate.AggregatorV3Context + switch ttx := tx.(type) { + case *temporal.Tx: + actx = ttx.AggCtx() + default: + actx = agg.MakeContext() + } + { iter, err := actx.AccountHistoryRange(int(txUnwindTo), -1, order.Asc, -1, tx) if err != nil { diff --git a/core/state/temporal/kv_temporal.go b/core/state/temporal/kv_temporal.go index 935a4b21025..3ebd94952d2 100644 --- a/core/state/temporal/kv_temporal.go +++ b/core/state/temporal/kv_temporal.go @@ -410,24 +410,7 @@ func (tx *Tx) HistoryGet(name kv.History, key []byte, ts uint64) (v []byte, ok b } func (tx *Tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) { - switch name { - case kv.AccountsHistoryIdx: - timestamps, err = tx.aggCtx.AccountHistoyIdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.StorageHistoryIdx: - timestamps, err = tx.aggCtx.StorageHistoyIdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.CodeHistoryIdx: - timestamps, err = tx.aggCtx.CodeHistoyIdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.LogTopicIdx: - timestamps, err = tx.aggCtx.LogTopicRange(k, fromTs, toTs, asc, limit, tx) - case kv.LogAddrIdx: - timestamps, err = tx.aggCtx.LogAddrRange(k, fromTs, toTs, asc, limit, tx) - case kv.TracesFromIdx: - timestamps, err = tx.aggCtx.TraceFromRange(k, fromTs, toTs, asc, limit, tx) - case kv.TracesToIdx: - timestamps, err = tx.aggCtx.TraceToRange(k, fromTs, toTs, asc, limit, tx) - default: - return nil, fmt.Errorf("unexpected history name: %s", name) - } + timestamps, err = tx.aggCtx.IndexRange(name, k, fromTs, toTs, asc, limit, tx.MdbxTx) if err != nil { return nil, err } diff --git a/core/state_processor.go b/core/state_processor.go index c5af69d74a0..f4720dc649b 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -32,7 +32,9 @@ import ( // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, evm vm.VMInterface, cfg vm.Config) (*types.Receipt, []byte, error) { +func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *GasPool, ibs *state.IntraBlockState, + stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas, usedDataGas *uint64, + evm vm.VMInterface, cfg vm.Config) (*types.Receipt, []byte, error) { rules := evm.ChainRules() msg, err := tx.AsMessage(*types.MakeSigner(config, header.Number.Uint64(), header.Time), header.BaseFee, rules) if err != nil { @@ -65,6 +67,9 @@ func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *G return nil, nil, err } *usedGas += result.UsedGas + if usedDataGas != nil { + *usedDataGas += tx.GetDataGas() + } // Set the receipt logs and create the bloom filter. // based on the eip phase, we're passing whether the root touch-delete accounts. @@ -97,7 +102,10 @@ func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *G // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *chain.Config, blockHashFunc func(n uint64) libcommon.Hash, engine consensus.EngineReader, author *libcommon.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, []byte, error) { +func ApplyTransaction(config *chain.Config, blockHashFunc func(n uint64) libcommon.Hash, engine consensus.EngineReader, + author *libcommon.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, + header *types.Header, tx types.Transaction, usedGas, usedDataGas *uint64, cfg vm.Config, +) (*types.Receipt, []byte, error) { // Create a new context to be used in the EVM environment // Add addresses to access list if applicable @@ -107,5 +115,5 @@ func ApplyTransaction(config *chain.Config, blockHashFunc func(n uint64) libcomm blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author) vmenv := vm.NewEVM(blockContext, evmtypes.TxContext{}, ibs, config, cfg) - return applyTransaction(config, engine, gp, ibs, stateWriter, header, tx, usedGas, vmenv, cfg) + return applyTransaction(config, engine, gp, ibs, stateWriter, header, tx, usedGas, usedDataGas, vmenv, cfg) } diff --git a/core/state_transition.go b/core/state_transition.go index de0e1252726..b2d18929f6f 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -100,10 +100,9 @@ type Message interface { // ExecutionResult includes all output after executing given evm // message no matter the execution itself is successful or not. type ExecutionResult struct { - UsedGas uint64 // Total used gas but include the refunded gas - UsedDataGas uint64 // Total data gas used - Err error // Any error encountered during the execution(listed in core/vm/errors.go) - ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode) + UsedGas uint64 // Total used gas but include the refunded gas + Err error // Any error encountered during the execution(listed in core/vm/errors.go) + ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode) } // Unwrap returns the internal evm error which allows us for further @@ -204,7 +203,6 @@ func (st *StateTransition) buyGas(gasBailout bool) error { // compute data fee for eip-4844 data blobs if any dgval := new(uint256.Int) - var dataGasUsed uint64 if st.evm.ChainRules().IsCancun { if st.evm.Context().ExcessDataGas == nil { return fmt.Errorf("%w: Cancun is active but ExcessDataGas is nil", ErrInternalFailure) @@ -213,10 +211,13 @@ func (st *StateTransition) buyGas(gasBailout bool) error { if err != nil { return err } - _, overflow = dgval.MulOverflow(dataGasPrice, new(uint256.Int).SetUint64(st.dataGasUsed())) + _, overflow = dgval.MulOverflow(dataGasPrice, new(uint256.Int).SetUint64(st.msg.DataGas())) if overflow { return fmt.Errorf("%w: overflow converting datagas: %v", ErrInsufficientFunds, dgval) } + if err := st.gp.SubDataGas(st.msg.DataGas()); err != nil { + return err + } } balanceCheck := mgval @@ -251,10 +252,6 @@ func (st *StateTransition) buyGas(gasBailout bool) error { st.gas += st.msg.Gas() st.initialGas = st.msg.Gas() - if err := st.gp.SubDataGas(dataGasUsed); err != nil { - return err - } - if subBalance { st.state.SubBalance(st.msg.From(), mgval) st.state.SubBalance(st.msg.From(), dgval) @@ -309,7 +306,7 @@ func (st *StateTransition) preCheck(gasBailout bool) error { } } } - if st.dataGasUsed() > 0 && st.evm.ChainRules().IsCancun { + if st.msg.DataGas() > 0 && st.evm.ChainRules().IsCancun { if st.evm.Context().ExcessDataGas == nil { return fmt.Errorf("%w: Cancun is active but ExcessDataGas is nil", ErrInternalFailure) } @@ -467,10 +464,9 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi } return &ExecutionResult{ - UsedGas: st.gasUsed(), - UsedDataGas: st.dataGasUsed(), - Err: vmerr, - ReturnData: ret, + UsedGas: st.gasUsed(), + Err: vmerr, + ReturnData: ret, }, nil } @@ -495,7 +491,3 @@ func (st *StateTransition) refundGas(refundQuotient uint64) { func (st *StateTransition) gasUsed() uint64 { return st.initialGas - st.gas } - -func (st *StateTransition) dataGasUsed() uint64 { - return misc.GetDataGasUsed(len(st.msg.DataHashes())) -} diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index cd5d2e5bdea..aa5c2248e7c 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -12,7 +12,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" types2 "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" ) @@ -43,7 +42,7 @@ func (stx BlobTx) GetDataHashes() []libcommon.Hash { } func (stx BlobTx) GetDataGas() uint64 { - return params.DataGasPerBlob * uint64(len(stx.BlobVersionedHashes)) + return chain.DataGasPerBlob * uint64(len(stx.BlobVersionedHashes)) } func (stx BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (Message, error) { @@ -305,11 +304,11 @@ func (stx *BlobTx) DecodeRLP(s *rlp.Stream) error { // decode BlobVersionedHashes stx.BlobVersionedHashes = []libcommon.Hash{} - if err = decodeBlobVersionedHashes(stx.BlobVersionedHashes, s); err != nil { + if err = decodeBlobVersionedHashes(&stx.BlobVersionedHashes, s); err != nil { return err } - // decode y_parity + // decode V if b, err = s.Uint256Bytes(); err != nil { return err } @@ -329,7 +328,7 @@ func (stx *BlobTx) DecodeRLP(s *rlp.Stream) error { return s.ListEnd() } -func decodeBlobVersionedHashes(hashes []libcommon.Hash, s *rlp.Stream) error { +func decodeBlobVersionedHashes(hashes *[]libcommon.Hash, s *rlp.Stream) error { _, err := s.List() if err != nil { return fmt.Errorf("open BlobVersionedHashes: %w", err) @@ -340,7 +339,7 @@ func decodeBlobVersionedHashes(hashes []libcommon.Hash, s *rlp.Stream) error { for b, err = s.Bytes(); err == nil; b, err = s.Bytes() { if len(b) == 32 { copy((_hash)[:], b) - hashes = append(hashes, _hash) + *hashes = append(*hashes, _hash) } else { return fmt.Errorf("wrong size for blobVersionedHashes: %d, %v", len(b), b[0]) } diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go index 85a72fa8d83..5f4fd6c4e5c 100644 --- a/core/types/blob_tx_wrapper.go +++ b/core/types/blob_tx_wrapper.go @@ -9,10 +9,12 @@ import ( gokzg4844 "github.com/crate-crypto/go-kzg-4844" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" libkzg "github.com/ledgerwatch/erigon-lib/crypto/kzg" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" ) @@ -40,7 +42,7 @@ type BlobTxWrapper struct { /* Blob methods */ func (b *Blob) payloadSize() int { - size := 1 // 0xb7 + size := 1 // 0xb7..0xbf size += libcommon.BitLenToByteLen(bits.Len(LEN_BLOB)) // params.FieldElementsPerBlob * 32 = 131072 (length encoding size) size += LEN_BLOB // byte_array it self return size @@ -55,11 +57,7 @@ func (li BlobKzgs) copy() BlobKzgs { } func (li BlobKzgs) payloadSize() int { - size := 49 * len(li) - if size >= 56 { - size += libcommon.BitLenToByteLen(bits.Len(uint(size))) // BE encoding of the length of hashes - } - return size + return 49 * len(li) } func (li BlobKzgs) encodePayload(w io.Writer, b []byte, payloadSize int) error { @@ -68,15 +66,35 @@ func (li BlobKzgs) encodePayload(w io.Writer, b []byte, payloadSize int) error { return err } - b[0] = 128 + LEN_48 - for _, arr := range li { - if _, err := w.Write(b[:1]); err != nil { + for _, cmtmt := range li { + if err := rlp.EncodeString(cmtmt[:], w, b); err != nil { return err } - if _, err := w.Write(arr[:]); err != nil { - return err + } + return nil +} + +func (li *BlobKzgs) DecodeRLP(s *rlp.Stream) error { + _, err := s.List() + if err != nil { + return fmt.Errorf("open BlobKzgs (Commitments): %w", err) + } + var b []byte + cmtmt := KZGCommitment{} + + for b, err = s.Bytes(); err == nil; b, err = s.Bytes() { + if len(b) == LEN_48 { + copy((cmtmt)[:], b) + *li = append(*li, cmtmt) + } else { + return fmt.Errorf("wrong size for BlobKzgs (Commitments): %d, %v", len(b), b[0]) } } + + if err = s.ListEnd(); err != nil { + return fmt.Errorf("close BlobKzgs (Commitments): %w", err) + } + return nil } @@ -89,11 +107,7 @@ func (li KZGProofs) copy() KZGProofs { } func (li KZGProofs) payloadSize() int { - size := 49 * len(li) - if size >= 56 { - size += libcommon.BitLenToByteLen(bits.Len(uint(size))) // BE encoding of the length of hashes - } - return size + return 49 * len(li) } func (li KZGProofs) encodePayload(w io.Writer, b []byte, payloadSize int) error { @@ -102,15 +116,36 @@ func (li KZGProofs) encodePayload(w io.Writer, b []byte, payloadSize int) error return err } - b[0] = 128 + LEN_48 - for _, arr := range li { - if _, err := w.Write(b[:1]); err != nil { + for _, proof := range li { + if err := rlp.EncodeString(proof[:], w, b); err != nil { return err } - if _, err := w.Write(arr[:]); err != nil { - return err + } + return nil +} + +func (li *KZGProofs) DecodeRLP(s *rlp.Stream) error { + _, err := s.List() + + if err != nil { + return fmt.Errorf("open KZGProofs (Proofs): %w", err) + } + var b []byte + proof := KZGProof{} + + for b, err = s.Bytes(); err == nil; b, err = s.Bytes() { + if len(b) == LEN_48 { + copy((proof)[:], b) + *li = append(*li, proof) + } else { + return fmt.Errorf("wrong size for KZGProofs (Proofs): %d, %v", len(b), b[0]) } } + + if err = s.ListEnd(); err != nil { + return fmt.Errorf("close KZGProofs (Proofs): %w", err) + } + return nil } @@ -123,12 +158,10 @@ func (blobs Blobs) copy() Blobs { } func (blobs Blobs) payloadSize() int { - total := 0 if len(blobs) > 0 { - total = len(blobs) * blobs[0].payloadSize() - total += libcommon.BitLenToByteLen(bits.Len(uint(total))) + return len(blobs) * blobs[0].payloadSize() } - return total + return 0 } func (blobs Blobs) encodePayload(w io.Writer, b []byte, payloadSize int) error { @@ -137,15 +170,36 @@ func (blobs Blobs) encodePayload(w io.Writer, b []byte, payloadSize int) error { return err } - for _, arr := range blobs { - if err := rlp.EncodeStringSizePrefix(LEN_BLOB, w, b); err != nil { + for _, blob := range blobs { + if err := rlp.EncodeString(blob[:], w, b); err != nil { return err } - if _, err := w.Write(arr[:]); err != nil { - return err + } + + return nil +} + +func (blobs *Blobs) DecodeRLP(s *rlp.Stream) error { + _, err := s.List() + if err != nil { + return fmt.Errorf("open Blobs: %w", err) + } + var b []byte + blob := Blob{} + + for b, err = s.Bytes(); err == nil; b, err = s.Bytes() { + if len(b) == LEN_BLOB { + copy((blob)[:], b) + *blobs = append(*blobs, blob) + } else { + return fmt.Errorf("wrong size for Blobs: %d, %v", len(b), b[0]) } } + if err = s.ListEnd(); err != nil { + return fmt.Errorf("close Blobs: %w", err) + } + return nil } @@ -293,32 +347,55 @@ func (txw *BlobTxWrapper) IsContractDeploy() bool { return txw.Tx.IsContractDepl func (txw *BlobTxWrapper) Unwrap() Transaction { return &txw.Tx } func (txw BlobTxWrapper) EncodingSize() int { - txSize, commitmentsSize, proofsSize, blobsSize := txw.payloadSize() - payloadSize := txSize + commitmentsSize + proofsSize + blobsSize - envelopeSize := payloadSize + total, _, _, _, _ := txw.payloadSize() + envelopeSize := total // Add envelope size and type size - if payloadSize >= 56 { - envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize))) + if total >= 56 { + envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(total))) } envelopeSize += 2 return envelopeSize } -func (txw BlobTxWrapper) payloadSize() (int, int, int, int) { +func (txw BlobTxWrapper) payloadSize() (int, int, int, int, int) { + total := 1 txSize, _, _, _, _ := txw.Tx.payloadSize() + if txSize >= 56 { + total += libcommon.BitLenToByteLen(bits.Len(uint(txSize))) + } + total += txSize + + total++ commitmentsSize := txw.Commitments.payloadSize() - proofsSize := txw.Proofs.payloadSize() + if commitmentsSize >= 56 { + total += libcommon.BitLenToByteLen(bits.Len(uint(commitmentsSize))) + } + total += commitmentsSize + + total++ blobsSize := txw.Blobs.payloadSize() - return txSize, commitmentsSize, proofsSize, blobsSize + if blobsSize >= 56 { + total += libcommon.BitLenToByteLen(bits.Len(uint(blobsSize))) + } + total += blobsSize + + total++ + proofsSize := txw.Proofs.payloadSize() + if proofsSize >= 56 { + total += libcommon.BitLenToByteLen(bits.Len(uint(proofsSize))) + } + total += proofsSize + return total, txSize, commitmentsSize, blobsSize, proofsSize } -func (txw BlobTxWrapper) encodePayload(w io.Writer, b []byte, payloadSize, commitmentsSize, proofsSize, blobsSize int) error { +func (txw BlobTxWrapper) encodePayload(w io.Writer, b []byte, total, txSize, commitmentsSize, blobsSize, proofsSize int) error { // prefix, encode txw payload size - if err := EncodeStructSizePrefix(payloadSize, w, b); err != nil { + if err := EncodeStructSizePrefix(total, w, b); err != nil { return err } txPayloadSize, nonceLen, gasLen, accessListLen, blobHashesLen := txw.Tx.payloadSize() + if err := txw.Tx.encodePayload(w, b, txPayloadSize, nonceLen, gasLen, accessListLen, blobHashesLen); err != nil { return err } @@ -330,20 +407,31 @@ func (txw BlobTxWrapper) encodePayload(w io.Writer, b []byte, payloadSize, commi if err := txw.Blobs.encodePayload(w, b, blobsSize); err != nil { return err } - txw.Proofs.encodePayload(w, b, proofsSize) + if err := txw.Proofs.encodePayload(w, b, proofsSize); err != nil { + return err + } return nil } func (txw *BlobTxWrapper) MarshalBinary(w io.Writer) error { + total, txSize, commitmentsSize, blobsSize, proofsSize := txw.payloadSize() + var b [33]byte + // encode TxType + b[0] = BlobTxType + if _, err := w.Write(b[:1]); err != nil { + return err + } + if err := txw.encodePayload(w, b[:], total, txSize, commitmentsSize, blobsSize, proofsSize); err != nil { + return err + } return nil } func (txw BlobTxWrapper) EncodeRLP(w io.Writer) error { - txSize, commitmentsSize, proofsSize, blobsSize := txw.payloadSize() - payloadSize := txSize + commitmentsSize + proofsSize + blobsSize - envelopeSize := payloadSize - if payloadSize >= 56 { - envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize))) + total, txSize, commitmentsSize, proofsSize, blobsSize := txw.payloadSize() + envelopeSize := total + if total >= 56 { + envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(total))) } // size of struct prefix and TxType envelopeSize += 2 @@ -357,13 +445,33 @@ func (txw BlobTxWrapper) EncodeRLP(w io.Writer) error { if _, err := w.Write(b[:1]); err != nil { return err } - if err := txw.encodePayload(w, b[:], payloadSize, commitmentsSize, proofsSize, blobsSize); err != nil { + if err := txw.encodePayload(w, b[:], total, txSize, commitmentsSize, proofsSize, blobsSize); err != nil { return err } return nil } -func (txw BlobTxWrapper) DecodeRLP(s *rlp.Stream) error { - // TODO - return nil +func (txw *BlobTxWrapper) DecodeRLP(s *rlp.Stream) error { + _, err := s.List() + if err != nil { + return err + } + + if err := txw.Tx.DecodeRLP(s); err != nil { + return err + } + + if err := txw.Commitments.DecodeRLP(s); err != nil { + return err + } + + if err := txw.Blobs.DecodeRLP(s); err != nil { + return err + } + + if err := txw.Proofs.DecodeRLP(s); err != nil { + return err + } + + return s.ListEnd() } diff --git a/core/types/transaction.go b/core/types/transaction.go index c041babfcf1..ba16187424d 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -27,16 +27,16 @@ import ( "time" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/protolambda/ztyp/codec" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" types2 "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/log/v3" - "github.com/protolambda/ztyp/codec" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" ) @@ -589,7 +589,7 @@ func (m *Message) ChangeGas(globalGasCap, desiredGas uint64) { m.gasLimit = gas } -func (m Message) DataGas() uint64 { return params.DataGasPerBlob * uint64(len(m.dataHashes)) } +func (m Message) DataGas() uint64 { return chain.DataGasPerBlob * uint64(len(m.dataHashes)) } func (m Message) MaxFeePerDataGas() *uint256.Int { return &m.maxFeePerDataGas } diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index 671346706c2..9373e37d50a 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -415,131 +415,117 @@ func (tx *DynamicFeeTransaction) UnmarshalJSON(input []byte) error { } func UnmarshalBlobTxJSON(input []byte) (Transaction, error) { - // var dec txJSON - // if err := json.Unmarshal(input, &dec); err != nil { - // return nil, err - // } - // tx := SignedBlobTx{} - // if dec.AccessList != nil { - // tx.Message.AccessList = AccessListView(*dec.AccessList) - // } else { - // tx.Message.AccessList = AccessListView([]types2.AccessTuple{}) - // } - // if dec.ChainID == nil { - // return nil, errors.New("missing required field 'chainId' in transaction") - // } - // chainID, overflow := uint256.FromBig(dec.ChainID.ToInt()) - // if overflow { - // return nil, errors.New("'chainId' in transaction does not fit in 256 bits") - // } - // tx.Message.ChainID = Uint256View(*chainID) - // if dec.To != nil { - // address := AddressSSZ(*dec.To) - // tx.Message.To = AddressOptionalSSZ{Address: &address} - // } - // if dec.Nonce == nil { - // return nil, errors.New("missing required field 'nonce' in transaction") - // } - // tx.Message.Nonce = Uint64View(uint64(*dec.Nonce)) - // tip, overflow := uint256.FromBig(dec.Tip.ToInt()) - // if overflow { - // return nil, errors.New("'tip' in transaction does not fit in 256 bits") - // } - // tx.Message.GasTipCap = Uint256View(*tip) - // feeCap, overflow := uint256.FromBig(dec.FeeCap.ToInt()) - // if overflow { - // return nil, errors.New("'feeCap' in transaction does not fit in 256 bits") - // } - // tx.Message.GasFeeCap = Uint256View(*feeCap) - // if dec.Gas == nil { - // return nil, errors.New("missing required field 'gas' in transaction") - // } - // tx.Message.Gas = Uint64View(uint64(*dec.Gas)) - // if dec.Value == nil { - // return nil, errors.New("missing required field 'value' in transaction") - // } - // value, overflow := uint256.FromBig(dec.Value.ToInt()) - // if overflow { - // return nil, errors.New("'value' in transaction does not fit in 256 bits") - // } - // tx.Message.Value = Uint256View(*value) - // if dec.Data == nil { - // return nil, errors.New("missing required field 'input' in transaction") - // } - // tx.Message.Data = TxDataView(*dec.Data) - - // if dec.MaxFeePerDataGas == nil { - // return nil, errors.New("missing required field 'maxFeePerDataGas' in transaction") - // } - // maxFeePerDataGas, overflow := uint256.FromBig(dec.MaxFeePerDataGas.ToInt()) - // if overflow { - // return nil, errors.New("'maxFeePerDataGas' in transaction does not fit in 256 bits") - // } - // tx.Message.MaxFeePerDataGas = Uint256View(*maxFeePerDataGas) - - // if dec.BlobVersionedHashes != nil { - // tx.Message.BlobVersionedHashes = VersionedHashesView(dec.BlobVersionedHashes) - // } else { - // tx.Message.BlobVersionedHashes = VersionedHashesView([]libcommon.Hash{}) - // } - - // if dec.V == nil { - // return nil, errors.New("missing required field 'v' in transaction") - // } - // var v uint256.Int - // overflow = v.SetFromBig(dec.V.ToInt()) - // if overflow { - // return nil, fmt.Errorf("dec.V higher than 2^256-1") - // } - // if v.Uint64() > 255 { - // return nil, fmt.Errorf("dev.V higher than 2^8 - 1") + var dec txJSON + if err := json.Unmarshal(input, &dec); err != nil { + return nil, err + } + tx := BlobTx{} + if dec.AccessList != nil { + tx.AccessList = *dec.AccessList + } else { + tx.AccessList = []types2.AccessTuple{} + } + if dec.ChainID == nil { + return nil, errors.New("missing required field 'chainId' in transaction") + } + chainID, overflow := uint256.FromBig(dec.ChainID.ToInt()) + if overflow { + return nil, errors.New("'chainId' in transaction does not fit in 256 bits") + } + tx.ChainID = chainID + if dec.To != nil { + tx.To = dec.To + } + if dec.Nonce == nil { + return nil, errors.New("missing required field 'nonce' in transaction") + } + tx.Nonce = uint64(*dec.Nonce) + // if dec.GasPrice == nil { // do we need gasPrice here? + // return nil, errors.New("missing required field 'gasPrice' in transaction") // } + tx.Tip, overflow = uint256.FromBig(dec.Tip.ToInt()) + if overflow { + return nil, errors.New("'tip' in transaction does not fit in 256 bits") + } + tx.FeeCap, overflow = uint256.FromBig(dec.FeeCap.ToInt()) + if overflow { + return nil, errors.New("'feeCap' in transaction does not fit in 256 bits") + } + if dec.Gas == nil { + return nil, errors.New("missing required field 'gas' in transaction") + } + tx.Gas = uint64(*dec.Gas) + if dec.Value == nil { + return nil, errors.New("missing required field 'value' in transaction") + } + tx.Value, overflow = uint256.FromBig(dec.Value.ToInt()) + if overflow { + return nil, errors.New("'value' in transaction does not fit in 256 bits") + } + if dec.Data == nil { + return nil, errors.New("missing required field 'input' in transaction") + } + tx.Data = *dec.Data - // tx.Signature.V = Uint8View(v.Uint64()) + if dec.MaxFeePerDataGas == nil { + return nil, errors.New("missing required field 'maxFeePerDataGas' in transaction") + } - // if dec.R == nil { - // return nil, errors.New("missing required field 'r' in transaction") - // } - // var r uint256.Int - // overflow = r.SetFromBig(dec.R.ToInt()) - // if overflow { - // return nil, fmt.Errorf("dec.R higher than 2^256-1") - // } - // tx.Signature.R = Uint256View(r) + maxFeePerDataGas, overflow := uint256.FromBig(dec.MaxFeePerDataGas.ToInt()) + if overflow { + return nil, errors.New("'maxFeePerDataGas' in transaction does not fit in 256 bits") + } + tx.MaxFeePerDataGas = maxFeePerDataGas - // if dec.S == nil { - // return nil, errors.New("missing required field 's' in transaction") - // } - // var s uint256.Int - // overflow = s.SetFromBig(dec.S.ToInt()) - // if overflow { - // return nil, errors.New("'s' in transaction does not fit in 256 bits") - // } - // tx.Signature.S = Uint256View(s) + if dec.BlobVersionedHashes != nil { + tx.BlobVersionedHashes = dec.BlobVersionedHashes + } else { + tx.BlobVersionedHashes = []libcommon.Hash{} + } - // withSignature := !v.IsZero() || !r.IsZero() || !s.IsZero() - // if withSignature { - // if err := sanityCheckSignature(&v, &r, &s, false); err != nil { - // return nil, err - // } - // } + if dec.V == nil { + return nil, errors.New("missing required field 'v' in transaction") + } + overflow = tx.V.SetFromBig(dec.V.ToInt()) + if overflow { + return nil, fmt.Errorf("dec.V higher than 2^256-1") + } + if dec.R == nil { + return nil, errors.New("missing required field 'r' in transaction") + } + overflow = tx.R.SetFromBig(dec.R.ToInt()) + if overflow { + return nil, fmt.Errorf("dec.R higher than 2^256-1") + } + if dec.S == nil { + return nil, errors.New("missing required field 's' in transaction") + } + overflow = tx.S.SetFromBig(dec.S.ToInt()) + if overflow { + return nil, fmt.Errorf("dec.S higher than 2^256-1") + } - // if len(dec.Blobs) == 0 { - // // if no blobs are specified in the json we assume it is an unwrapped blob tx - // return &tx, nil - // } + withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero() + if withSignature { + if err := sanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil { + return nil, err + } + } - // btx := BlobTxWrapper{ - // Tx: tx, - // Commitments: dec.Commitments, - // Blobs: dec.Blobs, - // Proofs: dec.Proofs, - // } - // err := btx.ValidateBlobTransactionWrapper() - // if err != nil { - // return nil, err - // } - // return &btx, nil + if len(dec.Blobs) == 0 { + // if no blobs are specified in the json we assume it is an unwrapped blob tx + return &tx, nil + } - return nil, nil + btx := BlobTxWrapper{ + Tx: tx, + Commitments: dec.Commitments, + Blobs: dec.Blobs, + Proofs: dec.Proofs, + } + err := btx.ValidateBlobTransactionWrapper() + if err != nil { + return nil, err + } + return &btx, nil } diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 838471f1ff2..f1b34ca4aec 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -561,6 +561,19 @@ func encodeDecodeBinary(tx Transaction) (Transaction, error) { return parsedTx, nil } +func encodeDecodeWrappedBinary(tx *BlobTxWrapper) (*BlobTxWrapper, error) { + var buf bytes.Buffer + var err error + if err = tx.MarshalBinary(&buf); err != nil { + return nil, fmt.Errorf("rlp encoding failed: %w", err) + } + var parsedTx Transaction + if parsedTx, err = UnmarshalWrappedTransactionFromBinary(buf.Bytes()); err != nil { + return nil, fmt.Errorf("rlp decoding failed: %w", err) + } + return parsedTx.(*BlobTxWrapper), nil +} + func assertEqual(orig Transaction, cpy Transaction) error { // compare nonce, price, gaslimit, recipient, amount, payload, V, R, S if want, got := orig.Hash(), cpy.Hash(); want != got { @@ -580,10 +593,39 @@ func assertEqual(orig Transaction, cpy Transaction) error { return nil } +func assertEqualBlobWrapper(orig *BlobTxWrapper, cpy *BlobTxWrapper) error { + // compare commitments, blobs, proofs + if want, got := len(orig.Commitments), len(cpy.Commitments); want != got { + return fmt.Errorf("parsed tx commitments have unequal size: want%v, got %v", want, got) + } + + if want, got := len(orig.Blobs), len(cpy.Blobs); want != got { + return fmt.Errorf("parsed tx blobs have unequal size: want%v, got %v", want, got) + } + + if want, got := len(orig.Proofs), len(cpy.Proofs); want != got { + return fmt.Errorf("parsed tx proofs have unequal size: want%v, got %v", want, got) + } + + if want, got := orig.Commitments, cpy.Commitments; !reflect.DeepEqual(want, got) { + return fmt.Errorf("parsed tx commitments unequal: want%v, got %v", want, got) + } + + if want, got := orig.Blobs, cpy.Blobs; !reflect.DeepEqual(want, got) { + return fmt.Errorf("parsed tx blobs unequal: want%v, got %v", want, got) + } + + if want, got := orig.Proofs, cpy.Proofs; !reflect.DeepEqual(want, got) { + return fmt.Errorf("parsed tx proofs unequal: want%v, got %v", want, got) + } + + return nil +} + const N = 50 var dummyBlobTxs = [N]*BlobTx{} -var addr [20]byte +var dummyBlobWrapperTxs = [N]*BlobTxWrapper{} func randIntInRange(min, max int) int { return (rand.Intn(max-min) + min) @@ -642,9 +684,12 @@ func newRandBlobTx() *BlobTx { To: randAddr(), Value: uint256.NewInt(rand.Uint64()), Data: randData(), - V: *uint256.NewInt(uint64(rand.Intn(2))), - R: *uint256.NewInt(rand.Uint64()), - S: *uint256.NewInt(rand.Uint64()), + // V: *uint256.NewInt(rand.Uint64()), + // R: *uint256.NewInt(rand.Uint64()), + // S: *uint256.NewInt(rand.Uint64()), + V: *uint256.NewInt(0), + R: *uint256.NewInt(rand.Uint64()), + S: *uint256.NewInt(rand.Uint64()), }, ChainID: uint256.NewInt(rand.Uint64()), Tip: uint256.NewInt(rand.Uint64()), @@ -652,7 +697,7 @@ func newRandBlobTx() *BlobTx { AccessList: randAccessList(), }, MaxFeePerDataGas: uint256.NewInt(rand.Uint64()), - BlobVersionedHashes: randHashes(randIntInRange(5, 10)), + BlobVersionedHashes: randHashes(randIntInRange(0, 6)), } return stx } @@ -670,19 +715,92 @@ func printSTX(stx *BlobTx) { fmt.Printf("AccessList: %v\n", stx.AccessList) fmt.Printf("MaxFeePerDataGas: %v\n", stx.MaxFeePerDataGas) fmt.Printf("BlobVersionedHashes: %v\n", stx.BlobVersionedHashes) - fmt.Printf("YParity: %v\n", stx.V) + fmt.Printf("V: %v\n", stx.V) fmt.Printf("R: %v\n", stx.R) fmt.Printf("S: %v\n", stx.S) fmt.Println("-----") fmt.Println() } +func printSTXW(txw *BlobTxWrapper) { + fmt.Println("--BlobTxWrapper") + printSTX(&txw.Tx) + fmt.Printf("Commitments LEN: %v\n", txw.Commitments) + fmt.Printf("Proofs LEN: %v\n", txw.Proofs) + fmt.Println("-----") + fmt.Println() +} + +func randByte() byte { + return byte(rand.Intn(256)) +} + +func newRandCommitments(size int) BlobKzgs { + var result BlobKzgs + for i := 0; i < size; i++ { + var arr [LEN_48]byte + for j := 0; j < LEN_48; j++ { + arr[j] = randByte() + } + result = append(result, arr) + } + return result +} + +func newRandProofs(size int) KZGProofs { + var result KZGProofs + for i := 0; i < size; i++ { + var arr [LEN_48]byte + for j := 0; j < LEN_48; j++ { + arr[j] = randByte() + } + result = append(result, arr) + } + return result +} + +func newRandBlobs(size int) Blobs { + var result Blobs + for i := 0; i < size; i++ { + var arr [LEN_BLOB]byte + for j := 0; j < LEN_BLOB; j++ { + arr[j] = randByte() + } + result = append(result, arr) + } + return result +} + +func newRandBlobWrapper() *BlobTxWrapper { + btxw := *newRandBlobTx() + l := len(btxw.BlobVersionedHashes) + return &BlobTxWrapper{ + Tx: btxw, + Commitments: newRandCommitments(l), + Blobs: newRandBlobs(l), + Proofs: newRandProofs(l), + } +} + func populateBlobTxs() { for i := 0; i < N; i++ { dummyBlobTxs[i] = newRandBlobTx() } } +func populateBlobWrapperTxs() { + for i := 0; i < N-1; i++ { + dummyBlobWrapperTxs[i] = newRandBlobWrapper() + } + + dummyBlobWrapperTxs[N-1] = &BlobTxWrapper{ + Tx: *newRandBlobTx(), + Commitments: nil, + Blobs: nil, + Proofs: nil, + } +} + func TestBlobTxEncodeDecode(t *testing.T) { rand.Seed(time.Now().UnixNano()) populateBlobTxs() @@ -693,6 +811,50 @@ func TestBlobTxEncodeDecode(t *testing.T) { if err != nil { t.Fatal(err) } - assertEqual(dummyBlobTxs[i], tx) + if err := assertEqual(dummyBlobTxs[i], tx); err != nil { + t.Fatal(err) + } + + // JSON + tx, err = encodeDecodeJSON(dummyBlobTxs[i]) + if err != nil { + t.Fatal(err) + } + if err = assertEqual(dummyBlobTxs[i], tx); err != nil { + t.Fatal(err) + } + + } +} + +func TestBlobTxWrappedEncodeDecode(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + populateBlobWrapperTxs() + for i := 0; i < N; i++ { + tx, err := encodeDecodeWrappedBinary(dummyBlobWrapperTxs[i]) + if err != nil { + t.Fatal(err) + } + if err := assertEqual(dummyBlobWrapperTxs[i], tx); err != nil { + t.Fatal(err) + } + if err := assertEqualBlobWrapper(dummyBlobWrapperTxs[i], tx); err != nil { + t.Fatal(err) + } + + // JSON + // fails in ValidateBlobTransactionWrapper() + // error during proof verification: invalid infinity point encoding + + // jtx, err := encodeDecodeJSON(dummyBlobWrapperTxs[i]) + // if err != nil { + // t.Fatal(err) + // } + // if err = assertEqual(dummyBlobWrapperTxs[i], jtx); err != nil { + // t.Fatal(err) + // } + // if err := assertEqualBlobWrapper(dummyBlobWrapperTxs[i], jtx.(*BlobTxWrapper)); err != nil { + // t.Fatal(err) + // } } } diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index a5843dfeac7..8ee4406fcbf 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto/ecdsa" "encoding/hex" + "golang.org/x/crypto/sha3" "os" "reflect" "testing" @@ -52,6 +53,33 @@ func TestKeccak256Hasher(t *testing.T) { checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := HashData(hasher, in); return h[:] }, msg, exp) } +func TestKeccak256HasherNew(t *testing.T) { + msg := []byte("abc") + exp, _ := hex.DecodeString("3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532") + hasher := sha3.New256() + hasher.Write(msg) + var h libcommon.Hash + if !bytes.Equal(exp, hasher.Sum(h[:0])) { + t.Fatalf("hash %s mismatch: want: %x have: %x", "new", exp, h[:]) + } +} + +func TestKeccak256HasherMulti(t *testing.T) { + exp1, _ := hex.DecodeString("d341f310fa772d37e6966b84b37ad760811d784729b641630f6a03f729e1e20e") + exp2, _ := hex.DecodeString("6de9c0166df098306abb98b112c0834c29eedee6fcba804c7c4f4568204c9d81") + hasher := NewKeccakState() + d1, _ := hex.DecodeString("1234") + hasher.Write(d1) + d2, _ := hex.DecodeString("cafe") + hasher.Write(d2) + d3, _ := hex.DecodeString("babe") + hasher.Write(d3) + checkhash(t, "multi1", func(in []byte) []byte { var h libcommon.Hash; return hasher.Sum(h[:0]) }, []byte{}, exp1) + d4, _ := hex.DecodeString("5678") + hasher.Write(d4) + checkhash(t, "multi2", func(in []byte) []byte { var h libcommon.Hash; return hasher.Sum(h[:0]) }, []byte{}, exp2) +} + func TestToECDSAErrors(t *testing.T) { if _, err := HexToECDSA("0000000000000000000000000000000000000000000000000000000000000000"); err == nil { t.Fatal("HexToECDSA should've returned error") diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index 7aa0339212f..bd4ecfb087d 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -440,3 +440,13 @@ func decode(s string) []byte { } return bytes } + +func TestDecrypt(t *testing.T) { + data, _ := hex.DecodeString("043c8d19a2957e1f259cf325ad4c7f60a94bead921c7cedc135600511d51ee1d7f44d72fde3b9c9506dd3e6c69f4c10c910ea4257e42cd4335531cb2add1aed3b47e568f1473487279fdac238aa323409df92235a13d8a9036ac8d2ad3968c5f0483cd7a5fd6a441e520870644d3c61a630229b01f3e19fbd25e751ec9cfa5782abcd48a5ee406742d20a329e005761316f6963b0ec4b50f2ec3bbb022227961893a51ae568094267f27babeae3b452de67cd084fb5d03c635d7cebba86f8814b469ead9dad2504b79ca6e08e8f1db59747470054c61638000687b04a83af75111e196d253ef42697da2dd11c2bf67796b8f273a5161d7fdcfbc77332f3e0872dede7c33d6671b0b7fc7bf62db549123b0dfa66a2d76dd921faf9de35522863c8b7bc3d1a37af2d1b7f347bfdcf29b3fb7b038b86e22bd3b1a8e5b2520c52ea4ac1ce968672325bc1332b0966d2c5280b6980431e86792a485e5402aada661c6c848635d0fee662dcaa117249d346f875ffe7d85de9f6fa146d9f560bca9cee86c55028bcea3d29e38d44c4e74fd58f9cd66441f720f22349d60524aa3aae37a3f6da0cea78ca6162ce3b6b6ae3626562d6db3822f35710a95af90f4ba4eac1372dbf941e1c81567410a05fa9caaf2") + key := hexKey("36a7edad64d51a568b00e51d3fa8cd340aa704153010edf7f55ab3066ca4ef21") + extra, _ := hex.DecodeString("01cf") + _, err := key.Decrypt(data, nil, extra) + if err != nil { + t.Fatal(err) + } +} diff --git a/docs/programmers_guide/db_walkthrough.MD b/docs/programmers_guide/db_walkthrough.MD index 13d3b99ac2d..c289844e824 100644 --- a/docs/programmers_guide/db_walkthrough.MD +++ b/docs/programmers_guide/db_walkthrough.MD @@ -272,7 +272,7 @@ The first entry has a non-empty value. This is the sender of 0.001 ETH. As history bucket records the value of this account *before* the change, we should expect the same value here as this account had at the Genesis. The other two records contain empty values, which means these account were non-existent previously. -These are the account of the recepient of 0.001 ETH (`0x0100000000000000000000000000000000000000`), +These are the account of the recipient of 0.001 ETH (`0x0100000000000000000000000000000000000000`), and the miner account `0x0000000000000000000000000000000000000000`. The keys of all three records have the common suffix of `0x21` instead of `0x20`, which is simply encoding of the block number 1 instead of block number 0. diff --git a/eth/backend.go b/eth/backend.go index c31047fbed3..2d31775e2d5 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -205,7 +205,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } // Assemble the Ethereum object - chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, logger) + chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger) if err != nil { return nil, err } @@ -459,8 +459,8 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } else { consensusConfig = &config.Ethash } - backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, - config.WithoutHeimdall, stack.DataDir(), false /* readonly */, logger) + backend.engine = ethconsensusconfig.CreateConsensusEngine(stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, + config.WithoutHeimdall, false /* readonly */, logger) backend.forkValidator = engineapi.NewForkValidator(currentBlockNumber, inMemoryExecution, tmpdir, backend.blockReader) backend.sentriesClient, err = sentry.NewMultiClient( diff --git a/eth/ethconsensusconfig/config.go b/eth/ethconsensusconfig/config.go index f29acf53136..f242ba48d70 100644 --- a/eth/ethconsensusconfig/config.go +++ b/eth/ethconsensusconfig/config.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/aura" "github.com/ledgerwatch/erigon/consensus/bor" @@ -16,15 +17,16 @@ import ( "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc" "github.com/ledgerwatch/erigon/consensus/clique" - "github.com/ledgerwatch/erigon/consensus/db" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg" "github.com/ledgerwatch/erigon/consensus/merge" + "github.com/ledgerwatch/erigon/node" + "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" ) -func CreateConsensusEngine(chainConfig *chain.Config, config interface{}, notify []string, noVerify bool, - heimdallGrpcAddress string, heimdallUrl string, withoutHeimdall bool, dataDir string, readonly bool, +func CreateConsensusEngine(nodeConfig *nodecfg.Config, chainConfig *chain.Config, config interface{}, notify []string, noVerify bool, + heimdallGrpcAddress string, heimdallUrl string, withoutHeimdall bool, readonly bool, logger log.Logger, ) consensus.Engine { var eng consensus.Engine @@ -53,16 +55,41 @@ func CreateConsensusEngine(chainConfig *chain.Config, config interface{}, notify } case *params.ConsensusSnapshotConfig: if chainConfig.Clique != nil { - if consensusCfg.DBPath == "" { - consensusCfg.DBPath = filepath.Join(dataDir, "clique", "db") + if consensusCfg.InMemory { + nodeConfig.Dirs.DataDir = "" + } else { + if consensusCfg.DBPath != "" { + if filepath.Base(consensusCfg.DBPath) == "clique" { + nodeConfig.Dirs.DataDir = filepath.Dir(consensusCfg.DBPath) + } else { + nodeConfig.Dirs.DataDir = consensusCfg.DBPath + } + } + } + + var err error + var db kv.RwDB + + db, err = node.OpenDatabase(nodeConfig, kv.ConsensusDB, "clique", readonly, logger) + + if err != nil { + panic(err) } - eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, consensusCfg.InMemory, readonly), logger) + + eng = clique.New(chainConfig, consensusCfg, db, logger) } case *chain.AuRaConfig: if chainConfig.Aura != nil { - dbPath := filepath.Join(dataDir, "aura") var err error - eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(dbPath, false, readonly)) + var db kv.RwDB + + db, err = node.OpenDatabase(nodeConfig, kv.ConsensusDB, "aura", readonly, logger) + + if err != nil { + panic(err) + } + + eng, err = aura.NewAuRa(chainConfig.Aura, db) if err != nil { panic(err) } @@ -74,8 +101,15 @@ func CreateConsensusEngine(chainConfig *chain.Config, config interface{}, notify if chainConfig.Bor != nil && chainConfig.Bor.ValidatorContract != "" { genesisContractsClient := contract.NewGenesisContractsClient(chainConfig, chainConfig.Bor.ValidatorContract, chainConfig.Bor.StateReceiverContract, logger) spanner := span.NewChainSpanner(contract.ValidatorSet(), chainConfig, logger) - borDbPath := filepath.Join(dataDir, "bor") // bor consensus path: datadir/bor - db := db.OpenDatabase(borDbPath, false, readonly) + + var err error + var db kv.RwDB + + db, err = node.OpenDatabase(nodeConfig, kv.ConsensusDB, "bor", readonly, logger) + + if err != nil { + panic(err) + } var heimdallClient bor.IHeimdallClient if withoutHeimdall { @@ -117,6 +151,6 @@ func CreateConsensusEngineBareBones(chainConfig *chain.Config, logger log.Logger consensusConfig = ðashCfg } - return CreateConsensusEngine(chainConfig, consensusConfig, nil /* notify */, true, /* noVerify */ - "" /* heimdallGrpcAddress */, "" /* heimdallUrl */, true /* withoutHeimdall */, "" /*dataDir*/, false /* readonly */, logger) + return CreateConsensusEngine(&nodecfg.Config{}, chainConfig, consensusConfig, nil /* notify */, true, /* noVerify */ + "" /* heimdallGrpcAddress */, "" /* heimdallUrl */, true /* withoutHeimdall */, false /* readonly */, logger) } diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 233c7041907..2a67e84d59d 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -113,7 +113,7 @@ func newTestBackend(t *testing.T) *testBackend { t.Fatalf("failed to create tx: %v", txErr) } b.AddTx(tx) - }, false) + }) if err != nil { t.Error(err) } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 2d7358e51fe..36817ced634 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -136,7 +136,7 @@ func mockWithGenerator(t *testing.T, blocks int, generator func(int, *core.Block Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, }, testKey, false) if blocks > 0 { - chain, _ := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, blocks, generator, true) + chain, _ := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, blocks, generator) err := m.InsertChain(chain, nil) require.NoError(t, err) } diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index ac7bdf92ef2..03e65056229 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -161,14 +161,9 @@ func executeBlock( var receipts types.Receipts var stateSyncReceipt *types.Receipt var execRs *core.EphemeralExecResult - isBor := cfg.chainConfig.Bor != nil getHashFn := core.GetHashFn(block.Header(), getHeader) - if isBor { - execRs, err = core.ExecuteBlockEphemerallyBor(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer) - } else { - execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer) - } + execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer) if err != nil { return err } diff --git a/eth/stagedsync/stage_execute_test.go b/eth/stagedsync/stage_execute_test.go index 5856af247cd..7ef922a112a 100644 --- a/eth/stagedsync/stage_execute_test.go +++ b/eth/stagedsync/stage_execute_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/common/datadir" - "github.com/ledgerwatch/erigon/core/state/temporal" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" @@ -17,8 +16,10 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/state/temporal" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 7ab1e90c800..31e5e191c35 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -49,7 +49,7 @@ type MiningExecCfg struct { } type TxPoolForMining interface { - YieldBest(n uint16, txs *types2.TxsRlp, tx kv.Tx, onTopOf, availableGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) + YieldBest(n uint16, txs *types2.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableDataGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) } func StageMiningExecCfg( @@ -194,7 +194,11 @@ func getNextTransactions( counter := 0 for !onTime && counter < 1000 { remainingGas := header.GasLimit - header.GasUsed - if onTime, count, err = cfg.txPool2.YieldBest(amount, &txSlots, poolTx, executionAt, remainingGas, alreadyYielded); err != nil { + remainingDataGas := uint64(0) + if header.DataGasUsed != nil { + remainingDataGas = chain.MaxDataGasPerBlock - *header.DataGasUsed + } + if onTime, count, err = cfg.txPool2.YieldBest(amount, &txSlots, poolTx, executionAt, remainingGas, remainingDataGas, alreadyYielded); err != nil { return err } time.Sleep(1 * time.Millisecond) @@ -367,12 +371,13 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC var miningCommitTx = func(txn types.Transaction, coinbase libcommon.Address, vmConfig *vm.Config, chainConfig chain.Config, ibs *state.IntraBlockState, current *MiningBlock) ([]*types.Log, error) { ibs.SetTxContext(txn.Hash(), libcommon.Hash{}, tcount) gasSnap := gasPool.Gas() + dataGasSnap := gasPool.DataGas() snap := ibs.Snapshot() logger.Debug("addTransactionsToMiningBlock", "txn hash", txn.Hash()) - receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig) + receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, header.DataGasUsed, *vmConfig) if err != nil { ibs.RevertToSnapshot(snap) - gasPool = new(core.GasPool).AddGas(gasSnap) // restore gasPool as well as ibs + gasPool = new(core.GasPool).AddGas(gasSnap).AddDataGas(dataGasSnap) // restore gasPool as well as ibs return nil, err } diff --git a/go.mod b/go.mod index 7c9189080c4..ca94a268c7d 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/ledgerwatch/erigon go 1.19 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20230619063809-966d95d0f690 - github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230605042354-196538d42475 + github.com/ledgerwatch/erigon-lib v0.0.0-20230627104814-797724496a65 + github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230622075030-1d69651854c2 github.com/ledgerwatch/log/v3 v3.8.0 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/ledgerwatch/trackerslist v1.1.0 // indirect @@ -47,15 +47,14 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/hashicorp/golang-lru/arc/v2 v2.0.3 - github.com/hashicorp/golang-lru/v2 v2.0.3 + github.com/hashicorp/golang-lru/arc/v2 v2.0.4 + github.com/hashicorp/golang-lru/v2 v2.0.4 github.com/holiman/uint256 v1.2.2 github.com/huandu/xstrings v1.4.0 github.com/huin/goupnp v1.2.0 github.com/jackpal/go-nat-pmp v1.0.2 github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 - github.com/kevinburke/go-bindata v3.21.0+incompatible github.com/libp2p/go-libp2p v0.28.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/maticnetwork/crand v1.0.2 @@ -82,7 +81,7 @@ require ( github.com/tidwall/btree v1.6.0 github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 - github.com/urfave/cli/v2 v2.25.6 + github.com/urfave/cli/v2 v2.25.7 github.com/valyala/fastjson v1.6.4 github.com/vektah/gqlparser/v2 v2.5.3 github.com/xsleonard/go-merkle v1.1.0 @@ -93,7 +92,7 @@ require ( golang.org/x/sync v0.3.0 golang.org/x/sys v0.9.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.56.0 + google.golang.org/grpc v1.56.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.30.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c diff --git a/go.sum b/go.sum index 90abf7d4736..212e20fb99f 100644 --- a/go.sum +++ b/go.sum @@ -345,10 +345,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/arc/v2 v2.0.3 h1:D+r4C25CbvVaMiyerWsrcvfzQLwDwHFFb4PzgwhWqBU= -github.com/hashicorp/golang-lru/arc/v2 v2.0.3/go.mod h1:e1kvlTaZVi6wntRwqfHWdL5ZXhrHUwl04M9LSwK6vQE= -github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE= -github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.4 h1:+tHnVSaabYlClRqUq4/+xzeyy9nAf8ju/JJsb4KTNBc= +github.com/hashicorp/golang-lru/arc/v2 v2.0.4/go.mod h1:rbQ1sKlUmbE1QbWxZbqtbpw8frA8ecNEhI0cQBxYtaU= +github.com/hashicorp/golang-lru/v2 v2.0.4 h1:7GHuZcgid37q8o5i3QI9KMT4nCWQQ3Kx3Ov6bb9MfK0= +github.com/hashicorp/golang-lru/v2 v2.0.4/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk= @@ -390,8 +390,6 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/go-bindata v3.21.0+incompatible h1:baK7hwFJDlAHrOqmE9U3u8tow1Uc5ihN9E/b7djcK2g= -github.com/kevinburke/go-bindata v3.21.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -417,10 +415,10 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20230619063809-966d95d0f690 h1:HJaRdOdrwLdmlpiaDktVljViyBpPaeFmx1DnyXunpcM= -github.com/ledgerwatch/erigon-lib v0.0.0-20230619063809-966d95d0f690/go.mod h1:iz1daifnfSn3P0Iwd21ioyjwdmFEOn8DKeynahoHeSc= -github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230605042354-196538d42475 h1:1BvWA6agTUS4RZUHx79f45HpvelMVv4iEddaURUYcC8= -github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230605042354-196538d42475/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-lib v0.0.0-20230627104814-797724496a65 h1:KiLnZa8ALmNovK96dQ6gvXf01RFxJYNB/rqjdtIW4Go= +github.com/ledgerwatch/erigon-lib v0.0.0-20230627104814-797724496a65/go.mod h1:DmziKzY3PtjCCAQxqSYvJbRxru/sNHESud6LgO3YWAI= +github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230622075030-1d69651854c2 h1:Ls2itRGHMOr2PbHRDA4g1HH8HQdwfJhRVfMPEaLQe94= +github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230622075030-1d69651854c2/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.8.0 h1:gCpp7uGtIerEz1jKVPeDnbIopFPud9ZnCpBLlLBGqPU= github.com/ledgerwatch/log/v3 v3.8.0/go.mod h1:J2Jl6zV/58LeA6LTaVVnCGyf1/cYYSEOOLHY4ZN8S2A= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -768,8 +766,8 @@ github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCB github.com/ugorji/go/codec/codecgen v1.1.13 h1:rGpZ4Q63VcWA3DMBbIHvg+SQweUkfXBBa/f9X0W+tFg= github.com/ugorji/go/codec/codecgen v1.1.13/go.mod h1:EhCxlc7Crov+HLygD4+hBCitXNrrGKRrRWj+pRsyJGg= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.25.6 h1:yuSkgDSZfH3L1CjF2/5fNNg2KbM47pY2EvjBq4ESQnU= -github.com/urfave/cli/v2 v2.25.6/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= @@ -1026,8 +1024,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE= -google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index b66c2c854b3..c2081b10683 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -14,7 +14,7 @@ import ( // Setup starts a dedicated metrics server at the given address. // This function enables metrics reporting separate from pprof. -func Setup(address string) { +func Setup(address string, logger log.Logger) { http.HandleFunc("/debug/metrics/prometheus", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") metrics2.WritePrometheus(w, true) @@ -30,12 +30,12 @@ func Setup(address string) { }) //m.Handle("/debug/metrics", ExpHandler(metrics.DefaultRegistry)) //http.Handle("/debug/metrics/prometheus2", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) - log.Info("Starting metrics server", "addr", + logger.Info("Starting metrics server", "addr", fmt.Sprintf("http://%s/debug/metrics/prometheus", address), ) go func() { if err := http.ListenAndServe(address, nil); err != nil { // nolint:gosec - log.Error("Failure in running metrics server", "err", err) + logger.Error("Failure in running metrics server", "err", err) } }() } diff --git a/node/endpoints.go b/node/endpoints.go index 0af10b600a8..3855b6dc8d3 100644 --- a/node/endpoints.go +++ b/node/endpoints.go @@ -51,7 +51,8 @@ func StartHTTPEndpoint(endpoint string, timeouts rpccfg.HTTPTimeouts, handler ht } go func() { serveErr := httpSrv.Serve(listener) - if serveErr != nil && !errors.Is(serveErr, context.Canceled) && !errors.Is(serveErr, libcommon.ErrStopped) { + if serveErr != nil && + !(errors.Is(serveErr, context.Canceled) || errors.Is(serveErr, libcommon.ErrStopped) || errors.Is(serveErr, http.ErrServerClosed)) { log.Warn("Failed to serve http endpoint", "err", serveErr) } }() diff --git a/node/node.go b/node/node.go index ab571471c8c..bfc70a70c94 100644 --- a/node/node.go +++ b/node/node.go @@ -283,16 +283,20 @@ func (n *Node) DataDir() string { return n.config.Dirs.DataDir } -func OpenDatabase(config *nodecfg.Config, label kv.Label, logger log.Logger) (kv.RwDB, error) { - var name string +func OpenDatabase(config *nodecfg.Config, label kv.Label, name string, readonly bool, logger log.Logger) (kv.RwDB, error) { switch label { case kv.ChainDB: name = "chaindata" case kv.TxPoolDB: name = "txpool" + case kv.ConsensusDB: + if len(name) == 0 { + return nil, fmt.Errorf("Expected a consensus name") + } default: name = "test" } + var db kv.RwDB if config.Dirs.DataDir == "" { db = memdb.New("") @@ -300,9 +304,9 @@ func OpenDatabase(config *nodecfg.Config, label kv.Label, logger log.Logger) (kv } dbPath := filepath.Join(config.Dirs.DataDir, name) - var openFunc func(exclusive bool) (kv.RwDB, error) + logger.Info("Opening Database", "label", name, "path", dbPath) - openFunc = func(exclusive bool) (kv.RwDB, error) { + openFunc := func(exclusive bool) (kv.RwDB, error) { roTxLimit := int64(32) if config.Http.DBReadConcurrency > 0 { roTxLimit = int64(config.Http.DBReadConcurrency) @@ -311,19 +315,29 @@ func OpenDatabase(config *nodecfg.Config, label kv.Label, logger log.Logger) (kv opts := mdbx.NewMDBX(log.Root()). Path(dbPath).Label(label). DBVerbosity(config.DatabaseVerbosity).RoTxsLimiter(roTxsLimiter) + + if readonly { + opts = opts.Readonly() + } if exclusive { opts = opts.Exclusive() } - if label == kv.ChainDB { + + switch label { + case kv.ChainDB, kv.ConsensusDB: if config.MdbxPageSize.Bytes() > 0 { opts = opts.PageSize(config.MdbxPageSize.Bytes()) } if config.MdbxDBSizeLimit > 0 { opts = opts.MapSize(config.MdbxDBSizeLimit) } - } else { + if config.MdbxGrowthStep > 0 { + opts = opts.GrowthStep(config.MdbxGrowthStep) + } + default: opts = opts.GrowthStep(16 * datasize.MB) } + return opts.Open() } var err error diff --git a/node/node_test.go b/node/node_test.go index 0a1556156f2..b2d8c7f2c40 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -147,7 +147,7 @@ func TestNodeCloseClosesDB(t *testing.T) { stack, _ := New(testNodeConfig(t), logger) defer stack.Close() - db, err := OpenDatabase(stack.Config(), kv.SentryDB, logger) + db, err := OpenDatabase(stack.Config(), kv.SentryDB, "", false, logger) if err != nil { t.Fatal("can't open DB:", err) } @@ -179,7 +179,7 @@ func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) { var db kv.RwDB stack.RegisterLifecycle(&InstrumentedService{ startHook: func() { - db, err = OpenDatabase(stack.Config(), kv.SentryDB, logger) + db, err = OpenDatabase(stack.Config(), kv.SentryDB, "", false, logger) if err != nil { t.Fatal("can't open DB:", err) } @@ -205,7 +205,7 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) { stack.RegisterLifecycle(&InstrumentedService{ stopHook: func() { - db, err := OpenDatabase(stack.Config(), kv.ChainDB, logger) + db, err := OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger) if err != nil { t.Fatal("can't open DB:", err) } diff --git a/node/nodecfg/config.go b/node/nodecfg/config.go index 1bffa857bfc..42e8e18a894 100644 --- a/node/nodecfg/config.go +++ b/node/nodecfg/config.go @@ -162,7 +162,7 @@ type Config struct { MdbxPageSize datasize.ByteSize MdbxDBSizeLimit datasize.ByteSize - + MdbxGrowthStep datasize.ByteSize // HealthCheck enables standard grpc health check HealthCheck bool diff --git a/params/chainspecs/bor-devnet.json b/params/chainspecs/bor-devnet.json index 133c16378ee..3f4c306f690 100644 --- a/params/chainspecs/bor-devnet.json +++ b/params/chainspecs/bor-devnet.json @@ -19,10 +19,10 @@ "0": 2 }, "producerDelay": { - "0": 6 + "0": 4 }, "sprint": { - "0": 64 + "0":16 }, "backupMultiplier": { "0": 5 diff --git a/params/chainspecs/bor-mainnet.json b/params/chainspecs/bor-mainnet.json index 982dfcdc049..baf0dc4b9aa 100644 --- a/params/chainspecs/bor-mainnet.json +++ b/params/chainspecs/bor-mainnet.json @@ -30,7 +30,7 @@ "0": 2 }, "stateSyncConfirmationDelay": { - "354000000": 128 + "44934656": 128 }, "validatorContract": "0x0000000000000000000000000000000000001000", "stateReceiverContract": "0x0000000000000000000000000000000000001001", @@ -56,6 +56,6 @@ "calcuttaBlock": 22156660, "jaipurBlock": 23850000, "delhiBlock": 38189056, - "indoreBlock": 354000000 + "indoreBlock": 44934656 } } diff --git a/params/chainspecs/gnosis.json b/params/chainspecs/gnosis.json index 6ebc9f18732..d8887899ef7 100644 --- a/params/chainspecs/gnosis.json +++ b/params/chainspecs/gnosis.json @@ -15,6 +15,7 @@ "eip1559FeeCollector": "0x6BBe78ee9e474842Dbd4AB4987b3CeFE88426A92", "terminalTotalDifficulty": 8626000000000000000000058750000000000000000000, "terminalTotalDifficultyPassed": true, + "shanghaiTime": 1690889660, "aura": { "stepDuration": 5, "blockReward": 0, @@ -49,6 +50,7 @@ "21735000": { "0xf8D1677c8a0c961938bf2f9aDc3F3CFDA759A9d9": "0x6080604052600436106101b35763ffffffff60e060020a60003504166305d2035b81146101b857806306fdde03146101e1578063095ea7b31461026b5780630b26cf661461028f57806318160ddd146102b257806323b872dd146102d957806330adf81f14610303578063313ce567146103185780633644e5151461034357806339509351146103585780634000aea01461037c57806340c10f19146103ad57806342966c68146103d157806354fd4d50146103e957806366188463146103fe57806369ffa08a1461042257806370a0823114610449578063715018a61461046a578063726600ce1461047f5780637d64bcb4146104a05780637ecebe00146104b5578063859ba28c146104d65780638da5cb5b146105175780638fcbaf0c1461054857806395d89b4114610586578063a457c2d71461059b578063a9059cbb146105bf578063b753a98c146105e3578063bb35783b14610607578063c6a1dedf14610631578063cd59658314610646578063d505accf1461065b578063d73dd62314610694578063dd62ed3e146106b8578063f2d5d56b146106df578063f2fde38b14610703578063ff9e884d14610724575b600080fd5b3480156101c457600080fd5b506101cd61074b565b604080519115158252519081900360200190f35b3480156101ed57600080fd5b506101f661076c565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610230578181015183820152602001610218565b50505050905090810190601f16801561025d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561027757600080fd5b506101cd600160a060020a03600435166024356107fa565b34801561029b57600080fd5b506102b0600160a060020a0360043516610810565b005b3480156102be57600080fd5b506102c761086a565b60408051918252519081900360200190f35b3480156102e557600080fd5b506101cd600160a060020a0360043581169060243516604435610870565b34801561030f57600080fd5b506102c7610a38565b34801561032457600080fd5b5061032d610a5c565b6040805160ff9092168252519081900360200190f35b34801561034f57600080fd5b506102c7610a65565b34801561036457600080fd5b506101cd600160a060020a0360043516602435610a6b565b34801561038857600080fd5b506101cd60048035600160a060020a0316906024803591604435918201910135610aac565b3480156103b957600080fd5b506101cd600160a060020a0360043516602435610bbd565b3480156103dd57600080fd5b506102b0600435610cc8565b3480156103f557600080fd5b506101f6610cd5565b34801561040a57600080fd5b506101cd600160a060020a0360043516602435610d0c565b34801561042e57600080fd5b506102b0600160a060020a0360043581169060243516610de9565b34801561045557600080fd5b506102c7600160a060020a0360043516610e0e565b34801561047657600080fd5b506102b0610e29565b34801561048b57600080fd5b506101cd600160a060020a0360043516610e40565b3480156104ac57600080fd5b506101cd610e54565b3480156104c157600080fd5b506102c7600160a060020a0360043516610e5b565b3480156104e257600080fd5b506104eb610e6d565b6040805167ffffffffffffffff9485168152928416602084015292168183015290519081900360600190f35b34801561052357600080fd5b5061052c610e78565b60408051600160a060020a039092168252519081900360200190f35b34801561055457600080fd5b506102b0600160a060020a0360043581169060243516604435606435608435151560ff60a4351660c43560e435610e87565b34801561059257600080fd5b506101f6610fc5565b3480156105a757600080fd5b506101cd600160a060020a036004351660243561101f565b3480156105cb57600080fd5b506101cd600160a060020a0360043516602435611032565b3480156105ef57600080fd5b506102b0600160a060020a0360043516602435611054565b34801561061357600080fd5b506102b0600160a060020a0360043581169060243516604435611064565b34801561063d57600080fd5b506102c7611075565b34801561065257600080fd5b5061052c611099565b34801561066757600080fd5b506102b0600160a060020a036004358116906024351660443560643560ff6084351660a43560c4356110a8565b3480156106a057600080fd5b506101cd600160a060020a0360043516602435611184565b3480156106c457600080fd5b506102c7600160a060020a036004358116906024351661120b565b3480156106eb57600080fd5b506102b0600160a060020a0360043516602435611236565b34801561070f57600080fd5b506102b0600160a060020a0360043516611241565b34801561073057600080fd5b506102c7600160a060020a0360043581169060243516611261565b60065474010000000000000000000000000000000000000000900460ff1681565b6000805460408051602060026001851615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156107f25780601f106107c7576101008083540402835291602001916107f2565b820191906000526020600020905b8154815290600101906020018083116107d557829003601f168201915b505050505081565b600061080733848461127e565b50600192915050565b600654600160a060020a0316331461082757600080fd5b610830816112c0565b151561083b57600080fd5b6007805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b60045490565b600080600160a060020a038516151561088857600080fd5b600160a060020a038416151561089d57600080fd5b600160a060020a0385166000908152600360205260409020546108c6908463ffffffff6112c816565b600160a060020a0380871660009081526003602052604080822093909355908616815220546108fb908463ffffffff6112da16565b600160a060020a038086166000818152600360209081526040918290209490945580518781529051919392891692600080516020611d7283398151915292918290030190a3600160a060020a0385163314610a225761095a853361120b565b905060001981146109c457610975818463ffffffff6112c816565b600160a060020a038616600081815260056020908152604080832033808552908352928190208590558051948552519193600080516020611d92833981519152929081900390910190a3610a22565b600160a060020a0385166000908152600a602090815260408083203384529091529020541580610a175750600160a060020a0385166000908152600a602090815260408083203384529091529020544211155b1515610a2257600080fd5b610a2d8585856112ed565b506001949350505050565b7f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c981565b60025460ff1681565b60085481565b336000818152600560209081526040808320600160a060020a03871684529091528120549091610807918590610aa7908663ffffffff6112da16565b61127e565b600084600160a060020a03811615801590610ad05750600160a060020a0381163014155b1515610adb57600080fd5b610ae58686611324565b1515610af057600080fd5b85600160a060020a031633600160a060020a03167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16878787604051808481526020018060200182810382528484828181526020019250808284376040519201829003965090945050505050a3610b65866112c0565b15610bb157610ba633878787878080601f01602080910402602001604051908101604052809392919081815260200183838082843750611330945050505050565b1515610bb157600080fd5b50600195945050505050565b600654600090600160a060020a03163314610bd757600080fd5b60065474010000000000000000000000000000000000000000900460ff1615610bff57600080fd5b600454610c12908363ffffffff6112da16565b600455600160a060020a038316600090815260036020526040902054610c3e908363ffffffff6112da16565b600160a060020a038416600081815260036020908152604091829020939093558051858152905191927f0f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d412139688592918290030190a2604080518381529051600160a060020a03851691600091600080516020611d728339815191529181900360200190a350600192915050565b610cd233826114ad565b50565b60408051808201909152600181527f3100000000000000000000000000000000000000000000000000000000000000602082015281565b336000908152600560209081526040808320600160a060020a0386168452909152812054808310610d6057336000908152600560209081526040808320600160a060020a0388168452909152812055610d95565b610d70818463ffffffff6112c816565b336000908152600560209081526040808320600160a060020a03891684529091529020555b336000818152600560209081526040808320600160a060020a038916808552908352928190205481519081529051929392600080516020611d92833981519152929181900390910190a35060019392505050565b600654600160a060020a03163314610e0057600080fd5b610e0a828261159c565b5050565b600160a060020a031660009081526003602052604090205490565b600654600160a060020a031633146101b357600080fd5b600754600160a060020a0390811691161490565b6000806000fd5b60096020526000908152604090205481565b600260056000909192565b600654600160a060020a031681565b600080861580610e975750864211155b1515610ea257600080fd5b604080517fea2aa0a1be11a07ed86d755c93467f4f82362b452371d1ba94d1715123511acb6020820152600160a060020a03808d16828401528b166060820152608081018a905260a0810189905287151560c0808301919091528251808303909101815260e0909101909152610f17906115da565b9150610f25828686866116e1565b600160a060020a038b8116911614610f3c57600080fd5b600160a060020a038a1660009081526009602052604090208054600181019091558814610f6857600080fd5b85610f74576000610f78565b6000195b905085610f86576000610f88565b865b600160a060020a03808c166000908152600a60209081526040808320938e1683529290522055610fb98a8a836118e3565b50505050505050505050565b60018054604080516020600284861615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156107f25780601f106107c7576101008083540402835291602001916107f2565b600061102b8383610d0c565b9392505050565b600061103e8383611324565b151561104957600080fd5b6108073384846112ed565b61105f338383610870565b505050565b61106f838383610870565b50505050565b7fea2aa0a1be11a07ed86d755c93467f4f82362b452371d1ba94d1715123511acb81565b600754600160a060020a031690565b600080428610156110b857600080fd5b600160a060020a03808a1660008181526009602090815260409182902080546001810190915582517f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c99281019290925281830193909352928b166060840152608083018a905260a0830182905260c08084018a90528151808503909101815260e090930190529250611149906115da565b9050611157818686866116e1565b600160a060020a038a811691161461116e57600080fd5b61117989898961127e565b505050505050505050565b336000908152600560209081526040808320600160a060020a03861684529091528120546111b8908363ffffffff6112da16565b336000818152600560209081526040808320600160a060020a038916808552908352928190208590558051948552519193600080516020611d92833981519152929081900390910190a350600192915050565b600160a060020a03918216600090815260056020908152604080832093909416825291909152205490565b61105f823383610870565b600654600160a060020a0316331461125857600080fd5b610cd281611a3e565b600a60209081526000928352604080842090915290825290205481565b6112898383836118e3565b60001981141561105f57600160a060020a038084166000908152600a60209081526040808320938616835292905290812055505050565b6000903b1190565b6000828211156112d457fe5b50900390565b818101828110156112e757fe5b92915050565b6112f682610e40565b1561105f5760408051600081526020810190915261131990849084908490611330565b151561105f57600080fd5b600061102b8383611abc565b600083600160a060020a031663a4c0ed3660e060020a028685856040516024018084600160a060020a0316600160a060020a0316815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b838110156113a8578181015183820152602001611390565b50505050905090810190601f1680156113d55780820380516001836020036101000a031916815260200191505b5060408051601f198184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909916989098178852518151919790965086955093509150819050838360005b8381101561146357818101518382015260200161144b565b50505050905090810190601f1680156114905780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1979650505050505050565b600160a060020a0382166000908152600360205260409020548111156114d257600080fd5b600160a060020a0382166000908152600360205260409020546114fb908263ffffffff6112c816565b600160a060020a038316600090815260036020526040902055600454611527908263ffffffff6112c816565b600455604080518281529051600160a060020a038416917fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5919081900360200190a2604080518281529051600091600160a060020a03851691600080516020611d728339815191529181900360200190a35050565b80600160a060020a03811615156115b257600080fd5b600160a060020a03831615156115d0576115cb82611b8b565b61105f565b61105f8383611b97565b6000600854826040518082805190602001908083835b6020831061160f5780518252601f1990920191602091820191016115f0565b51815160209384036101000a6000190180199092169116179052604080519290940182900382207f190100000000000000000000000000000000000000000000000000000000000083830152602283019790975260428083019790975283518083039097018752606290910192839052855192945084935085019190508083835b602083106116af5780518252601f199092019160209182019101611690565b5181516020939093036101000a6000190180199091169216919091179052604051920182900390912095945050505050565b6000808460ff16601b14806116f957508460ff16601c145b1515611775576040805160e560020a62461bcd02815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202776272076616c60448201527f7565000000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b7f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0831115611813576040805160e560020a62461bcd02815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202773272076616c60448201527f7565000000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b60408051600080825260208083018085528a905260ff8916838501526060830188905260808301879052925160019360a0808501949193601f19840193928390039091019190865af115801561186d573d6000803e3d6000fd5b5050604051601f190151915050600160a060020a03811615156118da576040805160e560020a62461bcd02815260206004820152601860248201527f45434453413a20696e76616c6964207369676e61747572650000000000000000604482015290519081900360640190fd5b95945050505050565b600160a060020a0383161515611968576040805160e560020a62461bcd028152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f7265737300000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b600160a060020a03821615156119ee576040805160e560020a62461bcd02815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f7373000000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b600160a060020a0380841660008181526005602090815260408083209487168084529482529182902085905581518581529151600080516020611d928339815191529281900390910190a3505050565b600160a060020a0381161515611a5357600080fd5b600654604051600160a060020a038084169216907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a36006805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b33600090815260036020526040812054821115611ad857600080fd5b600160a060020a0383161515611aed57600080fd5b33600090815260036020526040902054611b0d908363ffffffff6112c816565b3360009081526003602052604080822092909255600160a060020a03851681522054611b3f908363ffffffff6112da16565b600160a060020a038416600081815260036020908152604091829020939093558051858152905191923392600080516020611d728339815191529281900390910190a350600192915050565b3031610e0a8282611c44565b604080517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015290518391600091600160a060020a038416916370a0823191602480830192602092919082900301818787803b158015611bfc57600080fd5b505af1158015611c10573d6000803e3d6000fd5b505050506040513d6020811015611c2657600080fd5b5051905061106f600160a060020a038516848363ffffffff611cac16565b604051600160a060020a0383169082156108fc029083906000818181858888f193505050501515610e0a578082611c79611d41565b600160a060020a039091168152604051908190036020019082f080158015611ca5573d6000803e3d6000fd5b5050505050565b82600160a060020a031663a9059cbb83836040518363ffffffff1660e060020a0281526004018083600160a060020a0316600160a060020a0316815260200182815260200192505050600060405180830381600087803b158015611d0f57600080fd5b505af1158015611d23573d6000803e3d6000fd5b505050503d1561105f5760206000803e600051151561105f57600080fd5b604051602180611d51833901905600608060405260405160208060218339810160405251600160a060020a038116ff00ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a165627a7a72305820b96bb0733a3e45fdddafa592f51114d0cf16cad047ad60b9b91ae91eb772c6940029" } - } + }, + "withdrawalContractAddress": "0x0B98057eA310F4d31F2a452B414647007d1645d9" } } diff --git a/params/protocol_params.go b/params/protocol_params.go index b42004158bf..6174e143b3e 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -16,7 +16,11 @@ package params -import "math/big" +import ( + "math/big" + + "github.com/ledgerwatch/erigon-lib/chain" +) const ( GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. @@ -163,13 +167,10 @@ const ( RefundQuotientEIP3529 uint64 = 5 // stuff from EIP-4844 - FieldElementsPerBlob = 4096 // each field element is 32 bytes - MaxDataGasPerBlock uint64 = 0xC0000 - TargetDataGasPerBlock uint64 = 0x60000 - DataGasPerBlob uint64 = 0x20000 - MinDataGasPrice = 1 - DataGasPriceUpdateFraction = 3338477 - MaxBlobsPerBlock = MaxDataGasPerBlock / DataGasPerBlob + FieldElementsPerBlob = 4096 // each field element is 32 bytes + MinDataGasPrice = 1 + DataGasPriceUpdateFraction = 3338477 + MaxBlobsPerBlock = chain.MaxDataGasPerBlock / chain.DataGasPerBlob BlobVerificationGas uint64 = 1800000 BlobCommitmentVersionKZG uint8 = 0x01 diff --git a/params/version.go b/params/version.go index 8d0c7d1d9d4..e25c318ccaf 100644 --- a/params/version.go +++ b/params/version.go @@ -32,7 +32,7 @@ var ( // see https://calver.org const ( VersionMajor = 2 // Major version component of the current release - VersionMinor = 45 // Minor version component of the current release + VersionMinor = 48 // Minor version component of the current release VersionMicro = 0 // Patch version component of the current release VersionModifier = "dev" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" diff --git a/tests/state_test.go b/tests/state_test.go index 1cbda422936..d69269daff9 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -53,11 +53,11 @@ func TestState(t *testing.T) { st.skipLoad(`.*vmPerformance/loop.*`) st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) { - _, db, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil) for _, subtest := range test.Subtests() { subtest := subtest key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) t.Run(key, func(t *testing.T) { + _, db, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil) withTrace(t, func(vmconfig vm.Config) error { tx, err := db.BeginRw(context.Background()) if err != nil { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index f787c99057c..e5441fe1f8d 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -26,7 +26,6 @@ import ( "strings" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/eth/ethconfig" "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon-lib/chain" @@ -43,7 +42,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/trie" @@ -248,7 +247,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co // Execute the message. snapshot := statedb.Snapshot() gaspool := new(core.GasPool) - gaspool.AddGas(block.GasLimit()).AddDataGas(params.MaxDataGasPerBlock) + gaspool.AddGas(block.GasLimit()).AddDataGas(chain.MaxDataGasPerBlock) if _, err = core.ApplyMessage(evm, msg, gaspool, true /* refunds */, false /* gasBailout */); err != nil { statedb.RevertToSnapshot(snapshot) } diff --git a/tests/statedb_chain_test.go b/tests/statedb_chain_test.go index edb2932dbfa..69d7cb3685a 100644 --- a/tests/statedb_chain_test.go +++ b/tests/statedb_chain_test.go @@ -93,7 +93,7 @@ func TestSelfDestructReceive(t *testing.T) { block.AddTx(txn) } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } diff --git a/tests/statedb_insert_chain_transaction_test.go b/tests/statedb_insert_chain_transaction_test.go index 8c1ef4349e3..1159e7b7289 100644 --- a/tests/statedb_insert_chain_transaction_test.go +++ b/tests/statedb_insert_chain_transaction_test.go @@ -770,7 +770,7 @@ func genBlocks(t *testing.T, gspec *types.Genesis, txs map[int]txn) (*stages.Moc } contractBackend.Commit() - }, false /* intermediateHashes */) + }) if err != nil { return nil, nil, fmt.Errorf("generate chain: %w", err) } diff --git a/tools.go b/tools.go index eab76e96267..6d4624dd2c0 100644 --- a/tools.go +++ b/tools.go @@ -18,7 +18,6 @@ package tools import ( _ "github.com/fjl/gencodec" - _ "github.com/kevinburke/go-bindata" _ "github.com/torquem-ch/mdbx-go" _ "github.com/torquem-ch/mdbx-go/mdbxdist" _ "github.com/ugorji/go/codec/codecgen" diff --git a/turbo/app/import_cmd.go b/turbo/app/import_cmd.go index 986b1a873bd..1936626515b 100644 --- a/turbo/app/import_cmd.go +++ b/turbo/app/import_cmd.go @@ -222,7 +222,7 @@ func InsertChain(ethereum *eth.Ethereum, chain *core.ChainPack, logger log.Logge blockReader, _ := ethereum.BlockIO() hook := stages.NewHook(ethereum.SentryCtx(), ethereum.Notifications(), ethereum.StagedSync(), blockReader, ethereum.ChainConfig(), logger, sentryControlServer.UpdateHead) - err := stages.StageLoopStep(ethereum.SentryCtx(), ethereum.ChainDB(), nil, ethereum.StagedSync(), initialCycle, logger, blockReader, hook) + err := stages.StageLoopIteration(ethereum.SentryCtx(), ethereum.ChainDB(), nil, ethereum.StagedSync(), initialCycle, logger, blockReader, hook) if err != nil { return err } diff --git a/turbo/app/init_cmd.go b/turbo/app/init_cmd.go index 50033978fa7..ed4c01ac898 100644 --- a/turbo/app/init_cmd.go +++ b/turbo/app/init_cmd.go @@ -61,7 +61,7 @@ func initGenesis(ctx *cli.Context) error { stack := MakeConfigNodeDefault(ctx, logger) defer stack.Close() - chaindb, err := node.OpenDatabase(stack.Config(), kv.ChainDB, logger) + chaindb, err := node.OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger) if err != nil { utils.Fatalf("Failed to open database: %v", err) } diff --git a/turbo/app/support_cmd.go b/turbo/app/support_cmd.go index 1121720a8ad..bf5fb09aec8 100644 --- a/turbo/app/support_cmd.go +++ b/turbo/app/support_cmd.go @@ -53,6 +53,10 @@ by the URL.`, const Version = 1 func connectDiagnostics(cliCtx *cli.Context) error { + return ConnectDiagnostics(cliCtx, log.Root()) +} + +func ConnectDiagnostics(cliCtx *cli.Context, logger log.Logger) error { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) @@ -72,7 +76,7 @@ func connectDiagnostics(cliCtx *cli.Context) error { // Perform the requests in a loop (reconnect) for { - if err := tunnel(ctx, cancel, sigs, tlsConfig, diagnosticsUrl, metricsURL); err != nil { + if err := tunnel(ctx, cancel, sigs, tlsConfig, diagnosticsUrl, metricsURL, logger); err != nil { return err } select { @@ -81,7 +85,7 @@ func connectDiagnostics(cliCtx *cli.Context) error { return nil default: } - log.Info("Reconnecting in 1 second...") + logger.Info("Reconnecting in 1 second...") timer := time.NewTimer(1 * time.Second) <-timer.C } @@ -91,7 +95,7 @@ var successLine = []byte("SUCCESS") // tunnel operates the tunnel from diagnostics system to the metrics URL for one http/2 request // needs to be called repeatedly to implement re-connect logic -func tunnel(ctx context.Context, cancel context.CancelFunc, sigs chan os.Signal, tlsConfig *tls.Config, diagnosticsUrl string, metricsURL string) error { +func tunnel(ctx context.Context, cancel context.CancelFunc, sigs chan os.Signal, tlsConfig *tls.Config, diagnosticsUrl string, metricsURL string, logger log.Logger) error { diagnosticsClient := &http.Client{Transport: &http2.Transport{TLSClientConfig: tlsConfig}} defer diagnosticsClient.CloseIdleConnections() metricsClient := &http.Client{} @@ -142,7 +146,7 @@ func tunnel(ctx context.Context, cancel context.CancelFunc, sigs chan os.Signal, return fmt.Errorf("sending version: %v", err) } - log.Info("Connected") + logger.Info("Connected") for line, isPrefix, err = r.ReadLine(); err == nil && !isPrefix; line, isPrefix, err = r.ReadLine() { metricsBuf.Reset() @@ -160,11 +164,11 @@ func tunnel(ctx context.Context, cancel context.CancelFunc, sigs chan os.Signal, var sizeBuf [4]byte binary.BigEndian.PutUint32(sizeBuf[:], uint32(metricsBuf.Len())) if _, err = writer.Write(sizeBuf[:]); err != nil { - log.Error("Problem relaying metrics prefix len", "url", metricsURL, "query", line, "err", err) + logger.Error("Problem relaying metrics prefix len", "url", metricsURL, "query", line, "err", err) break } if _, err = writer.Write(metricsBuf.Bytes()); err != nil { - log.Error("Problem relaying", "url", metricsURL, "query", line, "err", err) + logger.Error("Problem relaying", "url", metricsURL, "query", line, "err", err) break } } @@ -172,11 +176,11 @@ func tunnel(ctx context.Context, cancel context.CancelFunc, sigs chan os.Signal, select { case <-ctx.Done(): default: - log.Error("Breaking connection", "err", err) + logger.Error("Breaking connection", "err", err) } } if isPrefix { - log.Error("Request too long, circuit breaker") + logger.Error("Request too long, circuit breaker") } return nil } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index fb3ed79ec9f..81be9f920fb 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -72,6 +72,7 @@ var DefaultFlags = []cli.Flag{ &utils.RpcGasCapFlag, &utils.RpcBatchLimit, &utils.RpcReturnDataLimit, + &utils.RPCGlobalTxFeeCapFlag, &utils.TxpoolApiAddrFlag, &utils.TraceMaxtracesFlag, &HTTPReadTimeoutFlag, diff --git a/turbo/debug/flags.go b/turbo/debug/flags.go index 73447f0e1e4..0888ec5596d 100644 --- a/turbo/debug/flags.go +++ b/turbo/debug/flags.go @@ -148,7 +148,7 @@ func SetupCobra(cmd *cobra.Command, filePrefix string) (log.Logger, error) { if metricsEnabled && metricsAddr != "" { address := fmt.Sprintf("%s:%d", metricsAddr, metricsPort) - exp.Setup(address) + exp.Setup(address, logger) } withMetrics := metricsEnabled && metricsAddr == "" @@ -189,7 +189,7 @@ func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, error) { if metricsEnabled && (!pprofEnabled || metricsAddr != "") { metricsPort := ctx.Int(metricsPortFlag.Name) address := fmt.Sprintf("%s:%d", metricsAddr, metricsPort) - exp.Setup(address) + exp.Setup(address, logger) diagnostics.SetupLogsAccess(ctx) diagnostics.SetupDbAccess(ctx) diagnostics.SetupCmdLineAccess() diff --git a/turbo/rpchelper/subscription.go b/turbo/rpchelper/subscription.go index e7eb12cbd5f..6fb57b151d0 100644 --- a/turbo/rpchelper/subscription.go +++ b/turbo/rpchelper/subscription.go @@ -2,7 +2,6 @@ package rpchelper import ( "sync" - "sync/atomic" ) // a simple interface for subscriptions for rpc helper @@ -12,14 +11,14 @@ type Sub[T any] interface { } type chan_sub[T any] struct { + lock sync.Mutex // protects all fileds of this struct ch chan T - closed atomic.Bool + closed bool } -// buffered channel +// newChanSub - buffered channel func newChanSub[T any](size int) *chan_sub[T] { - // set min size to 8. - if size < 8 { + if size < 8 { // set min size to 8 size = 8 } o := &chan_sub[T]{} @@ -27,19 +26,23 @@ func newChanSub[T any](size int) *chan_sub[T] { return o } func (s *chan_sub[T]) Send(x T) { - if s.closed.Load() { + s.lock.Lock() + defer s.lock.Unlock() + if s.closed { return } - select { case s.ch <- x: default: // the sub is overloaded, dispose message } } func (s *chan_sub[T]) Close() { - if swapped := s.closed.CompareAndSwap(false, true); !swapped { + s.lock.Lock() + defer s.lock.Unlock() + if s.closed { return } + s.closed = true close(s.ch) } diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 266b4d2344a..3bb7de4a64d 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -1381,14 +1381,13 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo chainID, _ := uint256.FromBig(chainConfig.ChainID) - var prevTxID uint64 numBuf := make([]byte, 8) parseCtx := types2.NewTxParseContext(*chainID) parseCtx.WithSender(false) slot := types2.TxSlot{} var sender [20]byte parse := func(v, valueBuf []byte, senders []common2.Address, j int) ([]byte, error) { - if _, err := parseCtx.ParseTransaction(v, 0, &slot, sender[:], false /* hasEnvelope */, nil); err != nil { + if _, err := parseCtx.ParseTransaction(v, 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { return valueBuf, err } if len(senders) > 0 { @@ -1466,18 +1465,8 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo if err := addSystemTx(tx, body.BaseTxId); err != nil { return false, err } - if prevTxID > 0 { - prevTxID++ - } else { - prevTxID = body.BaseTxId - } binary.BigEndian.PutUint64(numBuf, body.BaseTxId+1) - if err := tx.ForAmount(kv.EthTx, numBuf, body.TxAmount-2, func(tk, tv []byte) error { - id := binary.BigEndian.Uint64(tk) - if prevTxID != 0 && id != prevTxID+1 { - panic(fmt.Sprintf("no gaps in tx ids are allowed: block %d does jump from %d to %d", blockNum, prevTxID, id)) - } - prevTxID = id + if err := tx.ForAmount(kv.EthTx, numBuf, body.TxAmount-2, func(_, tv []byte) error { parseCtx.WithSender(len(senders) == 0) valueBuf, err = parse(tv, valueBuf, senders, j) if err != nil { @@ -1497,7 +1486,6 @@ func DumpTxs(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, chainCo if err := addSystemTx(tx, body.BaseTxId+uint64(body.TxAmount)-1); err != nil { return false, err } - prevTxID++ select { case <-ctx.Done(): @@ -1780,7 +1768,7 @@ RETRY: if isSystemTx { // system-txs hash:pad32(txnID) binary.BigEndian.PutUint64(slot.IDHash[:], firstTxID+i) } else { - if _, err = parseCtx.ParseTransaction(word[firstTxByteAndlengthOfAddress:], 0, &slot, nil, true /* hasEnvelope */, nil /* validateHash */); err != nil { + if _, err = parseCtx.ParseTransaction(word[firstTxByteAndlengthOfAddress:], 0, &slot, nil, true /* hasEnvelope */, false /* wrappedWithBlobs */, nil /* validateHash */); err != nil { return fmt.Errorf("ParseTransaction: %w, blockNum: %d, i: %d", err, blockNum, i) } } diff --git a/turbo/snapshotsync/freezeblocks/dump_test.go b/turbo/snapshotsync/freezeblocks/dump_test.go index 6da633aa6ea..0968a06f967 100644 --- a/turbo/snapshotsync/freezeblocks/dump_test.go +++ b/turbo/snapshotsync/freezeblocks/dump_test.go @@ -5,8 +5,12 @@ import ( "testing" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + libcommon "github.com/ledgerwatch/erigon-lib/common" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" @@ -15,8 +19,6 @@ import ( "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/ledgerwatch/log/v3" - "github.com/stretchr/testify/require" ) func TestDump(t *testing.T) { @@ -35,7 +37,7 @@ func TestDump(t *testing.T) { if v == nil { systemTxs++ } else { - if _, err := parseCtx.ParseTransaction(v[1+20:], 0, &slot, sender[:], false /* hasEnvelope */, nil); err != nil { + if _, err := parseCtx.ParseTransaction(v[1+20:], 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { return err } nonceList = append(nonceList, slot.Nonce) @@ -60,7 +62,7 @@ func TestDump(t *testing.T) { if v == nil { systemTxs++ } else { - if _, err := parseCtx.ParseTransaction(v[1+20:], 0, &slot, sender[:], false /* hasEnvelope */, nil); err != nil { + if _, err := parseCtx.ParseTransaction(v[1+20:], 0, &slot, sender[:], false /* hasEnvelope */, false /* wrappedWithBlobs */, nil); err != nil { return err } nonceList = append(nonceList, slot.Nonce) @@ -173,7 +175,7 @@ func createDumpTestKV(t *testing.T, chainSize int) *stages.MockSentry { t.Fatalf("failed to create tx: %v", txErr) } b.AddTx(tx) - }, false) + }) if err != nil { t.Error(err) } diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go index 9ed67da7751..16691877e24 100644 --- a/turbo/stages/blockchain_test.go +++ b/turbo/stages/blockchain_test.go @@ -63,7 +63,7 @@ var ( func makeBlockChain(parent *types.Block, n int, m *stages.MockSentry, seed int) *core.ChainPack { chain, _ := core.GenerateChain(m.ChainConfig, parent, m.Engine, m.DB, n, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{0: byte(seed), 19: byte(i)}) - }, false /* intermediateHashes */) + }) return chain } @@ -314,13 +314,13 @@ func testReorg(t *testing.T, first, second []int64, td int64) { // Insert an easy and a difficult chain afterwards easyChain, err := core.GenerateChain(m.ChainConfig, current(m, nil), m.Engine, m.DB, len(first), func(i int, b *core.BlockGen) { b.OffsetTime(first[i]) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } diffChain, err := core.GenerateChain(m.ChainConfig, current(m, nil), m.Engine, m.DB, len(second), func(i int, b *core.BlockGen) { b.OffsetTime(second[i]) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } @@ -444,7 +444,7 @@ func TestChainTxReorgs(t *testing.T) { gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } @@ -471,7 +471,7 @@ func TestChainTxReorgs(t *testing.T) { futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, uint256.NewInt(1000), params.TxGas, nil, nil), *signer, key3) gen.AddTx(futureAdd) // This transaction will be added after a full reorg } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } @@ -560,7 +560,7 @@ func readReceipt(db kv.Tx, txHash libcommon.Hash, br services.FullBlockReader) ( func TestCanonicalBlockRetrieval(t *testing.T) { m := newCanonical(t, 0) - chain, err2 := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) {}, false /* intermediateHashes */) + chain, err2 := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) {}) if err2 != nil { t.Fatalf("generate chain: %v", err2) } @@ -652,7 +652,7 @@ func TestEIP155Transition(t *testing.T) { } block.AddTx(tx) } - }, false /* intermediateHashes */) + }) if chainErr != nil { t.Fatalf("generate chain: %v", chainErr) } @@ -693,7 +693,7 @@ func TestEIP155Transition(t *testing.T) { } block.AddTx(tx) } - }, false /* intermediateHashes */) + }) if chainErr != nil { t.Fatalf("generate blocks: %v", chainErr) } @@ -776,7 +776,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error { } block.AddTx(tx) } - }, false /* intermediateHashes */) + }) if err != nil { return fmt.Errorf("generate blocks: %w", err) } @@ -975,7 +975,7 @@ func TestEIP161AccountRemoval(t *testing.T) { t.Fatal(err) } block.AddTx(txn) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1052,7 +1052,7 @@ func TestDoubleAccountRemoval(t *testing.T) { assert.NoError(t, err) block.AddTx(txn) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1092,7 +1092,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { // Generate a canonical chain to act as the main dataset m, m2 := stages.Mock(t), stages.Mock(t) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 64, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) }, false /* intermediateHashes */) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 64, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1108,7 +1108,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { } else { b.SetCoinbase(libcommon.Address{1}) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate fork %d: %v", i, err) } @@ -1157,7 +1157,7 @@ func TestLargeReorgTrieGC(t *testing.T) { shared, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 64, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate shared chain: %v", err) } @@ -1167,7 +1167,7 @@ func TestLargeReorgTrieGC(t *testing.T) { } else { b.SetCoinbase(libcommon.Address{2}) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate original chain: %v", err) } @@ -1178,7 +1178,7 @@ func TestLargeReorgTrieGC(t *testing.T) { b.SetCoinbase(libcommon.Address{3}) b.OffsetTime(-2) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate competitor chain: %v", err) } @@ -1218,7 +1218,7 @@ func TestLowDiffLongChain(t *testing.T) { chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 6*core.TriesInMemory, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) b.OffsetTime(-9) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1230,7 +1230,7 @@ func TestLowDiffLongChain(t *testing.T) { } else { b.SetCoinbase(libcommon.Address{2}) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate fork: %v", err) } @@ -1331,7 +1331,7 @@ func TestDeleteCreateRevert(t *testing.T) { tx, _ = types.SignTx(types.NewTransaction(1, bb, u256.Num0, 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1436,7 +1436,7 @@ func TestDeleteRecreateSlots(t *testing.T) { tx, _ = types.SignTx(types.NewTransaction(1, bb, u256.Num0, 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1554,7 +1554,7 @@ func TestCVE2020_26265(t *testing.T) { tx, _ = types.SignTx(types.NewTransaction(1, aa, new(uint256.Int).SetUint64(5), 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1621,7 +1621,7 @@ func TestDeleteRecreateAccount(t *testing.T) { tx, _ = types.SignTx(types.NewTransaction(1, aa, u256.Num1, 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1796,7 +1796,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { } expectations = append(expectations, exp) current = exp - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -1934,7 +1934,7 @@ func TestInitThenFailCreateContract(t *testing.T) { u256.Num0, 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) b.AddTx(tx) nonce++ - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -2026,7 +2026,7 @@ func TestEIP2718Transition(t *testing.T) { }}, }) b.AddTx(tx) - }, false /*intermediateHashes*/) + }) if err != nil { t.Fatalf("generate chain: %v", err) } @@ -2129,7 +2129,7 @@ func TestEIP1559Transition(t *testing.T) { b.AddTx(tx) } - }, false /* intermediate hashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -2183,7 +2183,7 @@ func TestEIP1559Transition(t *testing.T) { tx, _ = types.SignTx(tx, *signer, key2) b.AddTx(tx) - }, false /* intermediate hashes */) + }) if err != nil { t.Fatalf("generate chain: %v", err) } diff --git a/turbo/stages/chain_makers_test.go b/turbo/stages/chain_makers_test.go index e2b64262f64..aace9af1977 100644 --- a/turbo/stages/chain_makers_test.go +++ b/turbo/stages/chain_makers_test.go @@ -87,7 +87,7 @@ func TestGenerateChain(t *testing.T) { b3.Extra = []byte("foo") gen.AddUncle(b3) } - }, false /* intermediateHashes */) + }) if err != nil { fmt.Printf("generate chain: %v\n", err) } diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index c18496488f5..3db6e653bac 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -119,7 +119,7 @@ func TestSetupGenesis(t *testing.T) { key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") m := stages.MockWithGenesis(t, &oldcustomg, key, false) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 4, nil, false /* intermediateHashes */) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 4, nil) if err != nil { return nil, nil, err } diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 29190e7df9c..bc3bb5a9d4f 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -287,7 +287,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK } chainID, _ := uint256.FromBig(mock.ChainConfig.ChainID) shanghaiTime := mock.ChainConfig.ShanghaiTime - mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID, shanghaiTime, logger) + cancunTime := mock.ChainConfig.CancunTime + mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID, shanghaiTime, cancunTime, logger) if err != nil { tb.Fatal(err) } @@ -586,7 +587,7 @@ func (ms *MockSentry) insertPoWBlocks(chain *core.ChainPack, tx kv.RwTx) error { } initialCycle := MockInsertAsInitialCycle hook := NewHook(ms.Ctx, ms.Notifications, ms.Sync, ms.BlockReader, ms.ChainConfig, ms.Log, ms.UpdateHead) - if err = StageLoopStep(ms.Ctx, ms.DB, tx, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook); err != nil { + if err = StageLoopIteration(ms.Ctx, ms.DB, tx, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook); err != nil { return err } if ms.TxPool != nil { @@ -610,7 +611,7 @@ func (ms *MockSentry) insertPoSBlocks(chain *core.ChainPack, tx kv.RwTx) error { initialCycle := MockInsertAsInitialCycle hook := NewHook(ms.Ctx, ms.Notifications, ms.Sync, ms.BlockReader, ms.ChainConfig, ms.Log, ms.UpdateHead) - err := StageLoopStep(ms.Ctx, ms.DB, tx, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook) + err := StageLoopIteration(ms.Ctx, ms.DB, tx, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook) if err != nil { return err } @@ -623,7 +624,7 @@ func (ms *MockSentry) insertPoSBlocks(chain *core.ChainPack, tx kv.RwTx) error { FinalizedBlockHash: chain.TopBlock.Hash(), } ms.SendForkChoiceRequest(&fc) - err = StageLoopStep(ms.Ctx, ms.DB, tx, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook) + err = StageLoopIteration(ms.Ctx, ms.DB, tx, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook) if err != nil { return err } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index b3db4a46b75..561c4105ff7 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -32,7 +32,7 @@ func TestHeaderStep(t *testing.T) { chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 100, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -59,7 +59,7 @@ func TestHeaderStep(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed initialCycle := stages.MockInsertAsInitialCycle - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { t.Fatal(err) } } @@ -70,7 +70,7 @@ func TestMineBlockWith1Tx(t *testing.T) { chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) require.NoError(err) { // Do 1 step to start txPool @@ -97,7 +97,7 @@ func TestMineBlockWith1Tx(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := stages.MockInsertAsInitialCycle - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, log.New(), m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, log.New(), m.BlockReader, nil); err != nil { t.Fatal(err) } } @@ -107,7 +107,7 @@ func TestMineBlockWith1Tx(t *testing.T) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(m.Address), libcommon.Address{1}, uint256.NewInt(10_000), params.TxGas, u256.Num1, nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) require.NoError(err) gen.AddTx(tx) - }, false /* intermediateHashes */) + }) require.NoError(err) // Send NewBlock message @@ -133,7 +133,7 @@ func TestReorg(t *testing.T) { chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -165,27 +165,27 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := stages.MockInsertAsInitialCycle - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { t.Fatal(err) } // Now generate three competing branches, one short and two longer ones short, err := core.GenerateChain(m.ChainConfig, chain.TopBlock, m.Engine, m.DB, 2, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate short fork: %v", err) } long1, err := core.GenerateChain(m.ChainConfig, chain.TopBlock, m.Engine, m.DB, 10, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{2}) // Need to make headers different from short branch - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate short fork: %v", err) } // Second long chain needs to be slightly shorter than the first long chain long2, err := core.GenerateChain(m.ChainConfig, chain.TopBlock, m.Engine, m.DB, 9, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{3}) // Need to make headers different from short branch and another long branch - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate short fork: %v", err) } @@ -218,7 +218,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle = false - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { t.Fatal(err) } @@ -261,7 +261,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed // This is unwind step - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { t.Fatal(err) } @@ -269,7 +269,7 @@ func TestReorg(t *testing.T) { // Now generate three competing branches, one short and two longer ones short2, err := core.GenerateChain(m.ChainConfig, long1.TopBlock, m.Engine, m.DB, 2, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate short fork: %v", err) } @@ -298,7 +298,7 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle = stages.MockInsertAsInitialCycle - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { t.Fatal(err) } } @@ -308,14 +308,14 @@ func TestAnchorReplace(t *testing.T) { chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } short, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 11, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -326,7 +326,7 @@ func TestAnchorReplace(t *testing.T) { } else { b.SetCoinbase(libcommon.Address{2}) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -394,7 +394,7 @@ func TestAnchorReplace(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := stages.MockInsertAsInitialCycle - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil); err != nil { t.Fatal(err) } } @@ -403,14 +403,14 @@ func TestAnchorReplace2(t *testing.T) { m := stages.Mock(t) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } short, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 20, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -421,7 +421,7 @@ func TestAnchorReplace2(t *testing.T) { } else { b.SetCoinbase(libcommon.Address{2}) } - }, false /* intermediateHashes */) + }) if err != nil { t.Fatalf("generate blocks: %v", err) } @@ -499,7 +499,7 @@ func TestAnchorReplace2(t *testing.T) { initialCycle := stages.MockInsertAsInitialCycle hook := stages.NewHook(m.Ctx, m.Notifications, m.Sync, m.BlockReader, m.ChainConfig, m.Log, m.UpdateHead) - if err := stages.StageLoopStep(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, hook); err != nil { + if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, hook); err != nil { t.Fatal(err) } } @@ -519,7 +519,7 @@ func TestForkchoiceToGenesis(t *testing.T) { tx, err := m.DB.BeginRw(m.Ctx) require.NoError(t, err) defer tx.Rollback() - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -544,7 +544,7 @@ func TestBogusForkchoice(t *testing.T) { require.NoError(t, err) defer tx.Rollback() initialCycle := stages.MockInsertAsInitialCycle - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -559,7 +559,7 @@ func TestBogusForkchoice(t *testing.T) { } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -570,9 +570,9 @@ func TestBogusForkchoice(t *testing.T) { func TestPoSDownloader(t *testing.T) { m := stages.MockWithZeroTTD(t, true) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2 /* n */, func(i int, b *core.BlockGen) { + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) require.NoError(t, err) // Send a payload whose parent isn't downloaded yet @@ -582,7 +582,7 @@ func TestPoSDownloader(t *testing.T) { require.NoError(t, err) defer tx.Rollback() initialCycle := stages.MockInsertAsInitialCycle - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -602,12 +602,12 @@ func TestPoSDownloader(t *testing.T) { m.ReceiveWg.Wait() // First cycle: save the downloaded header - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) // Second cycle: process the previous beacon request - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -618,7 +618,7 @@ func TestPoSDownloader(t *testing.T) { FinalizedBlockHash: chain.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) assert.Equal(t, chain.TopBlock.Hash(), rawdb.ReadHeadBlockHash(tx)) @@ -632,9 +632,9 @@ func TestPoSDownloader(t *testing.T) { func TestPoSSyncWithInvalidHeader(t *testing.T) { m := stages.MockWithZeroTTD(t, true) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3 /* n */, func(i int, b *core.BlockGen) { + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) - }, false /* intermediateHashes */) + }) require.NoError(t, err) lastValidHeader := chain.Headers[0] @@ -653,7 +653,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { require.NoError(t, err) defer tx.Rollback() initialCycle := stages.MockInsertAsInitialCycle - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -672,7 +672,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { } m.ReceiveWg.Wait() - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -683,7 +683,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { FinalizedBlockHash: invalidTip.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(t, err) bad, lastValidHash := m.HeaderDownload().IsBadHeaderPoS(invalidTip.Hash()) @@ -701,7 +701,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { // One empty block chain0, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, gen *core.BlockGen) { gen.SetDifficulty(big.NewInt(0)) - }, false /* intermediateHashes */) + }) require.NoError(err) // One empty block, one block with transaction for 10k wei @@ -714,7 +714,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { require.NoError(err) gen.AddTx(tx) } - }, false /* intermediateHashes */) + }) require.NoError(err) // One empty block, one block with transaction for 20k wei @@ -727,13 +727,13 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { require.NoError(err) gen.AddTx(tx) } - }, false /* intermediateHashes */) + }) require.NoError(err) // 3 empty blocks chain3, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, gen *core.BlockGen) { gen.SetDifficulty(big.NewInt(0)) - }, false /* intermediateHashes */) + }) require.NoError(err) //------------------------------------------ @@ -743,7 +743,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { require.NoError(err) defer tx.Rollback() initialCycle := stages.MockInsertAsInitialCycle - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) @@ -755,7 +755,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { FinalizedBlockHash: chain0.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) assert.Equal(t, chain0.TopBlock.Hash(), rawdb.ReadHeadBlockHash(tx)) @@ -765,7 +765,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { //------------------------------------------ m.SendPayloadRequest(chain1.TopBlock) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) payloadStatus1 := m.ReceivePayloadStatus() @@ -776,7 +776,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { FinalizedBlockHash: chain1.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) assert.Equal(t, chain1.TopBlock.Hash(), rawdb.ReadHeadBlockHash(tx)) @@ -786,7 +786,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { //------------------------------------------ m.SendPayloadRequest(chain2.TopBlock) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) payloadStatus2 := m.ReceivePayloadStatus() @@ -797,7 +797,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { FinalizedBlockHash: chain2.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) assert.Equal(t, chain2.TopBlock.Hash(), rawdb.ReadHeadBlockHash(tx)) @@ -808,13 +808,13 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { //------------------------------------------ preTop3 := chain3.Blocks[chain3.Length()-2] m.SendPayloadRequest(preTop3) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) payloadStatus3 := m.ReceivePayloadStatus() assert.Equal(t, remote.EngineStatus_VALID, payloadStatus3.Status) m.SendPayloadRequest(chain3.TopBlock) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) payloadStatus3 = m.ReceivePayloadStatus() @@ -825,7 +825,7 @@ func TestPOSWrongTrieRootReorgs(t *testing.T) { FinalizedBlockHash: chain3.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - err = stages.StageLoopStep(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) + err = stages.StageLoopIteration(m.Ctx, m.DB, tx, m.Sync, initialCycle, m.Log, m.BlockReader, nil) require.NoError(err) stages.SendPayloadStatus(m.HeaderDownload(), rawdb.ReadHeadBlockHash(tx), err) assert.Equal(t, chain3.TopBlock.Hash(), rawdb.ReadHeadBlockHash(tx)) diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index dfb43a9c746..281289bc9ca 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -8,6 +8,8 @@ import ( "time" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" @@ -17,13 +19,11 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -31,6 +31,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/turbo/engineapi" + "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" @@ -89,7 +90,7 @@ func StageLoop(ctx context.Context, } // Estimate the current top height seen from the peer - err := StageLoopStep(ctx, db, nil, sync, initialCycle, logger, blockReader, hook) + err := StageLoopIteration(ctx, db, nil, sync, initialCycle, logger, blockReader, hook) db.View(ctx, func(tx kv.Tx) error { SendPayloadStatus(hd, rawdb.ReadHeadBlockHash(tx), err) return nil @@ -124,7 +125,7 @@ func StageLoop(ctx context.Context, } } -func StageLoopStep(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stagedsync.Sync, initialCycle bool, logger log.Logger, blockReader services.FullBlockReader, hook *Hook) (err error) { +func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stagedsync.Sync, initialCycle bool, logger log.Logger, blockReader services.FullBlockReader, hook *Hook) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack())