diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index c7c848431c..b419362792 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,6 +1,6 @@ --- name: Report a bug -about: Something with Polygon zkEVM is not working as expected +about: Something with Xgon is not working as expected title: '' labels: 'type:bug' assignees: '' @@ -8,7 +8,7 @@ assignees: '' #### System information -zkEVM Node version: `v0.0.X-RCXX` +Xgon Node version: `v0.0.X-RCXX` OS & Version: `Windows/Linux/OSX` Commit hash : (if `develop`) Network: `Mainnet/Testnet` diff --git a/.github/workflows/test-from-prover.yml b/.github/workflows/test-from-prover.yml index ca917668a4..db94d4737f 100644 --- a/.github/workflows/test-from-prover.yml +++ b/.github/workflows/test-from-prover.yml @@ -25,7 +25,7 @@ jobs: - name: Checkout code uses: actions/checkout@v3 with: - repository: 0xPolygonHermez/zkevm-node + repository: okx/xgon-node - name: Install Go uses: actions/setup-go@v3 diff --git a/Dockerfile b/Dockerfile index 69829d3151..d1a31cf906 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,8 +13,8 @@ RUN cd /src && make build # CONTAINER FOR RUNNING BINARY FROM alpine:3.18.0 -COPY --from=build /src/dist/zkevm-node /app/zkevm-node +COPY --from=build /src/dist/xgon-node /app/xgon-node COPY --from=build /src/config/environments/testnet/node.config.toml /app/example.config.toml RUN apk update && apk add postgresql15-client EXPOSE 8123 -CMD ["/bin/sh", "-c", "/app/zkevm-node run"] +CMD ["/bin/sh", "-c", "/app/xgon-node run"] diff --git a/Makefile b/Makefile index 363dceea94..d6225a7fae 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ endif GOBASE := $(shell pwd) GOBIN := $(GOBASE)/dist GOENVVARS := GOBIN=$(GOBIN) CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) -GOBINARY := zkevm-node +GOBINARY := xgon-node GOCMD := $(GOBASE)/cmd LDFLAGS += -X 'github.com/0xPolygonHermez/zkevm-node.Version=$(VERSION)' @@ -35,21 +35,21 @@ build: ## Builds the binary locally into ./dist .PHONY: build-docker build-docker: ## Builds a docker image with the node binary - docker build -t zkevm-node -f ./Dockerfile . + docker build -t xgon-node -f ./Dockerfile . .PHONY: build-docker-nc build-docker-nc: ## Builds a docker image with the node binary - but without build cache - docker build --no-cache=true -t zkevm-node -f ./Dockerfile . + docker build --no-cache=true -t xgon-node -f ./Dockerfile . .PHONY: run-rpc run-rpc: ## Runs all the services need to run a local zkEMV RPC node - docker-compose up -d zkevm-state-db zkevm-pool-db + docker-compose up -d xgon-state-db xgon-pool-db sleep 2 - docker-compose up -d zkevm-prover + docker-compose up -d xgon-prover sleep 5 - docker-compose up -d zkevm-sync + docker-compose up -d xgon-sync sleep 2 - docker-compose up -d zkevm-rpc + docker-compose up -d xgon-rpc .PHONY: stop stop: ## Stops all services diff --git a/README.md b/README.md index e5a802d6f0..5d391ddcb2 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,16 @@ -# zkEVM Node +# Xgon Node -zkEVM Node is a Go implementation of a node that operates the Polygon zkEVM Network. +Xgon Node is a Go implementation of a node that operates the Xgon Network. -## About the Polygon zkEVM network +## About the Xgon network -Since this is an implementation of a protocol it's fundamental to understand it, [here](https://zkevm.polygon.technology/docs/zknode/zknode-overview) you can find the specification of the protocol. +Since this is an implementation of a protocol it's fundamental to understand it, [here]() you can find the specification of the protocol. Glossary: - L1: Base blockchain where the rollup smart contracts are deployed. It's Ethereum or a testnet of Ethereum, but it could be any EVM compatible blockchain. -- L2: the rollup network aka the Polygon zkEVM network. -- Batch: a group of transactions that are executed/proved, using the [zkEVM prover](https://github.com/0xPolygonHermez/zkevm-prover) and sent to / synchronized from L1 +- L2: the rollup network aka the Xgon network. +- Batch: a group of transactions that are executed/proved, using the [Xgon prover]() and sent to / synchronized from L1 - Sequencer: the actor that is responsible for selecting transactions, putting them in a specific order, and sending them in batches to L1 - Trusted sequencer: sequencer that has special privileges, there can only be one trusted sequencer. The privileges granted to the trusted sequencer allow it to forecast the batches that will be applied to L1. This way it can commit to a specific sequence before interacting with L1. This is done to achieve fast finality and reduce costs associated with using the network (lower gas fees) - Permissionless sequencer: sequencer role that can be performed by anyone. It has competitive disadvantages compared to the trusted sequencer (slow finality, MEV attacks). Its main purpose is to provide censorship resistance and unstoppability features to the network. @@ -20,9 +20,9 @@ Glossary: - Trusted state: state reached through processing transactions that have been shared by the trusted sequencer. This state is considered trusted as the trusted sequencer could commit to a certain sequence, and then send a different one to L1 - Virtual state: state reached through processing transactions that have already been submitted to L1. These transactions are sent in batches by either trusted or permissionless sequencers. Those batches are also called virtual batches. Note that this state is trustless as it relies on L1 security assumptions - Consolidated state: state that is proven on-chain by submitting a ZKP (Zero Knowledge Proof) that proves the execution of a sequence of the last virtual batch. -- Invalid transaction: a transaction that can't be processed and doesn't affect the state. Note that such a transaction could be included in a virtual batch. The reason for a transaction to be invalid could be related to the Ethereum protocol (invalid nonce, not enough balance, ...) or due to limitations introduced by the zkEVM (each batch can make use of a limited amount of resources such as the total amount of keccak hashes that can be computed) +- Invalid transaction: a transaction that can't be processed and doesn't affect the state. Note that such a transaction could be included in a virtual batch. The reason for a transaction to be invalid could be related to the Ethereum protocol (invalid nonce, not enough balance, ...) or due to limitations introduced by the Xgon (each batch can make use of a limited amount of resources such as the total amount of keccak hashes that can be computed) - Reverted transaction: a transaction that is executed, but is reverted (because of smart contract logic). The main difference with *invalid transaction* is that this transaction modifies the state, at least to increment nonce of the sender. -- Proof of Efficiency (PoE): name of the protocol used by the network, it's enforced by the [smart contracts](https://github.com/0xPolygonHermez/zkevm-contracts) +- Proof of Efficiency (PoE): name of the protocol used by the network, it's enforced by the [smart contracts](https://github.com/okx/xgon-contracts) ## Architecture @@ -41,10 +41,10 @@ The diagram represents the main components of the software and how they interact - State: Responsible for managing the state data (batches, blocks, transactions, ...) that is stored on the `state SB`. It also handles the integration with the `executor` and the `Merkletree` service - State DB: persistence layer for the state data (except the Merkletree that is handled by the `Merkletree` service) - Aggregator: consolidates batches by generating ZKPs (Zero Knowledge proofs). To do so it gathers the necessary data that the `prover` needs as input through the `state` and sends a request to it. Once the proof is generated it's sent to Ethereum through the `etherman` -- Prover/Executor: service that generates ZK proofs. Note that this component is not implemented in this repository, and it's treated as a "black box" from the perspective of the node. The prover/executor has two implementations: [JS reference implementation](https://github.com/0xPolygonHermez/zkevm-proverjs) and [C production-ready implementation](https://github.com/0xPolygonHermez/zkevm-prover). Although it's the same software/service, it has two very different purposes: +- Prover/Executor: service that generates ZK proofs. Note that this component is not implemented in this repository, and it's treated as a "black box" from the perspective of the node. The prover/executor has two implementations: [JS reference implementation](https://github.com/0xPolygonHermez/zkevm-proverjs) and [C production-ready implementation](https://github.com/okx/xgon-prover). Although it's the same software/service, it has two very different purposes: - Provide an EVM implementation that allows processing transactions and getting all needed results metadata (state root, receipts, logs, ...) - Generate ZKPs -- Merkletree: service that stores the Merkletree, containing all the account information (balances, nonces, smart contract code, and smart contract storage). This component is also not implemented in this repo and is consumed as an external service by the node. The implementation can be found [here](https://github.com/0xPolygonHermez/zkevm-prover) +- Merkletree: service that stores the Merkletree, containing all the account information (balances, nonces, smart contract code, and smart contract storage). This component is also not implemented in this repo and is consumed as an external service by the node. The implementation can be found [here](https://github.com/okx/xgon-prover) ## Roles of the network @@ -63,8 +63,8 @@ Required services and components: There must be only one synchronizer, and it's recommended that it has exclusive access to an executor instance, although it's not necessary. This role can perfectly be run in a single instance, however, the JSON RPC and executor services can benefit from running in multiple instances, if the performance decreases due to the number of requests received -- [`zkEVM RPC endpoints`](./docs/json-rpc-endpoints.md) -- [`zkEVM RPC Custom endpoints documentation`](./docs/zkEVM-custom-endpoints.md) +- [`Xgon RPC endpoints`](./docs/json-rpc-endpoints.md) +- [`Xgon RPC Custom endpoints documentation`](./docs/zkEVM-custom-endpoints.md) ### Trusted sequencer diff --git a/cmd/run.go b/cmd/run.go index 41f5e08655..34319842ce 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -5,6 +5,7 @@ import ( "crypto/ecdsa" "errors" "fmt" + "github.com/ethereum/go-ethereum/common" "net" "net/http" "net/http/pprof" @@ -168,7 +169,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } seq := createSequencer(*c, poolInstance, ethTxManagerStorage, st, eventLog) go seq.Start(cliCtx.Context) @@ -180,7 +181,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } seqSender := createSequenceSender(*c, poolInstance, ethTxManagerStorage, st, eventLog) go seqSender.Start(cliCtx.Context) @@ -192,7 +193,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } if c.RPC.EnableL2SuggestedGasPricePolling { // Needed for rejecting transactions with too low gas price @@ -211,7 +212,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } go runSynchronizer(*c, etherman, etm, st, poolInstance, eventLog) case ETHTXMANAGER: @@ -231,7 +232,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } go runL2GasPriceSuggester(c.L2GasPriceSuggester, st, poolInstance, etherman) } @@ -488,13 +489,13 @@ func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDInt return st } -func createPool(cfgPool pool.Config, l2ChainID uint64, st *state.State, eventLog *event.EventLog) *pool.Pool { +func createPool(cfgPool pool.Config, l2BridgeAddr common.Address, l2ChainID uint64, st *state.State, eventLog *event.EventLog) *pool.Pool { runPoolMigrations(cfgPool.DB) poolStorage, err := pgpoolstorage.NewPostgresPoolStorage(cfgPool.DB) if err != nil { log.Fatal(err) } - poolInstance := pool.NewPool(cfgPool, poolStorage, st, l2ChainID, eventLog) + poolInstance := pool.NewPool(cfgPool, poolStorage, st, l2BridgeAddr, l2ChainID, eventLog) return poolInstance } diff --git a/config/default.go b/config/default.go index 4df6c49a56..84b50088b2 100644 --- a/config/default.go +++ b/config/default.go @@ -15,12 +15,13 @@ Outputs = ["stderr"] User = "state_user" Password = "state_password" Name = "state_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 [Pool] +FreeClaimGasLimit = 150000 IntervalToRefreshBlockedAddresses = "5m" IntervalToRefreshGasPrices = "5s" MaxTxBytesSize=100132 @@ -30,11 +31,12 @@ MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" AccountQueue = 64 GlobalQueue = 1024 +FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" [Pool.DB] User = "pool_user" Password = "pool_password" Name = "pool_db" - Host = "zkevm-pool-db" + Host = "xgon-pool-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -139,10 +141,10 @@ CleanHistoryPeriod = "1h" CleanHistoryTimeRetention = "5m" [MTClient] -URI = "zkevm-prover:50061" +URI = "xgon-prover:50061" [Executor] -URI = "zkevm-prover:50071" +URI = "xgon-prover:50071" MaxResourceExhaustedAttempts = 3 WaitOnResourceExhaustion = "1s" MaxGRPCMessageSize = 100000000 @@ -156,7 +158,7 @@ Enabled = false User = "prover_user" Password = "prover_pass" Name = "prover_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index 9b9c99e6b9..e00361752d 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -10,12 +10,13 @@ Outputs = ["stderr"] User = "state_user" Password = "state_password" Name = "state_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 [Pool] +FreeClaimGasLimit = 1500000 IntervalToRefreshBlockedAddresses = "5m" IntervalToRefreshGasPrices = "5s" MaxTxBytesSize=100132 @@ -23,11 +24,12 @@ MaxTxDataBytesSize=100000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" +FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" [Pool.DB] User = "pool_user" Password = "pool_password" Name = "pool_db" - Host = "zkevm-pool-db" + Host = "xgon-pool-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -45,7 +47,7 @@ Port = 8545 ReadTimeout = "60s" WriteTimeout = "60s" MaxRequestsPerIPAndSecond = 5000 -SequencerNodeURI = "https://internal.zkevm-test.net:2083/" +SequencerNodeURI = "https://internal.xgon-test.net:2083/" EnableL2SuggestedGasPricePolling = true [RPC.WebSockets] Enabled = true @@ -125,11 +127,25 @@ Type = "default" DefaultGasPriceWei = 1000000000 MaxGasPriceWei = 0 +#Type = "fixed" +#UpdatePeriod = "10s" +#DefaultGasPriceWei = 1000000000 +#KafkaURL = "127.0.0.1:9092" +#Topic = "middle_coinPrice_push" +#GroupID = "web3_okbc_explorerchainprice" +## just in SASL_SSL mode +#Username = "" +#Password = "" +#RootCAPath = "only-4096-ca-cert" +#DefaultL2CoinPrice = 40 +#GasPriceUsdt = 0.0001 +#L2CoinId = 7184 + [MTClient] -URI = "zkevm-prover:50061" +URI = "xgon-prover:50061" [Executor] -URI = "zkevm-prover:50071" +URI = "xgon-prover:50071" MaxResourceExhaustedAttempts = 3 WaitOnResourceExhaustion = "1s" MaxGRPCMessageSize = 100000000 @@ -146,7 +162,7 @@ ProfilingEnabled = false User = "prover_user" Password = "prover_pass" Name = "prover_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 diff --git a/config/environments/mainnet/node.config.toml b/config/environments/mainnet/node.config.toml index a328ba32ba..6282aef3a7 100644 --- a/config/environments/mainnet/node.config.toml +++ b/config/environments/mainnet/node.config.toml @@ -7,22 +7,24 @@ Outputs = ["stderr"] User = "state_user" Password = "state_password" Name = "state_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 [Pool] +FreeClaimGasLimit = 1500000 MaxTxBytesSize=100132 MaxTxDataBytesSize=100000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" +FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" [Pool.DB] User = "pool_user" Password = "pool_password" Name = "pool_db" - Host = "zkevm-pool-db" + Host = "xgon-pool-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -40,7 +42,7 @@ Port = 8545 ReadTimeout = "60s" WriteTimeout = "60s" MaxRequestsPerIPAndSecond = 5000 -SequencerNodeURI = "https://zkevm-rpc.com" +SequencerNodeURI = "https://xgon-rpc.com" EnableL2SuggestedGasPricePolling = false [RPC.WebSockets] Enabled = true @@ -52,10 +54,10 @@ SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc [MTClient] -URI = "zkevm-prover:50061" +URI = "xgon-prover:50061" [Executor] -URI = "zkevm-prover:50071" +URI = "xgon-prover:50071" MaxResourceExhaustedAttempts = 3 WaitOnResourceExhaustion = "1s" MaxGRPCMessageSize = 100000000 @@ -72,7 +74,7 @@ ProfilingEnabled = false User = "prover_user" Password = "prover_pass" Name = "prover_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 \ No newline at end of file diff --git a/config/environments/testnet/node.config.toml b/config/environments/testnet/node.config.toml index c5600fb5c4..a636dab9c8 100644 --- a/config/environments/testnet/node.config.toml +++ b/config/environments/testnet/node.config.toml @@ -7,12 +7,13 @@ Outputs = ["stderr"] User = "state_user" Password = "state_password" Name = "state_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 [Pool] +FreeClaimGasLimit = 1500000 IntervalToRefreshBlockedAddresses = "5m" IntervalToRefreshGasPrices = "5s" MaxTxBytesSize=100132 @@ -20,11 +21,12 @@ MaxTxDataBytesSize=100000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" +FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" [Pool.DB] User = "pool_user" Password = "pool_password" Name = "pool_db" - Host = "zkevm-pool-db" + Host = "xgon-pool-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -42,7 +44,7 @@ Port = 8545 ReadTimeout = "60s" WriteTimeout = "60s" MaxRequestsPerIPAndSecond = 5000 -SequencerNodeURI = "https://rpc.public.zkevm-test.net/" +SequencerNodeURI = "https://rpc.public.xgon-test.net/" EnableL2SuggestedGasPricePolling = false [RPC.WebSockets] Enabled = true @@ -53,10 +55,10 @@ SyncInterval = "2s" SyncChunkSize = 100 [MTClient] -URI = "zkevm-prover:50061" +URI = "xgon-prover:50061" [Executor] -URI = "zkevm-prover:50071" +URI = "xgon-prover:50071" MaxResourceExhaustedAttempts = 3 WaitOnResourceExhaustion = "1s" MaxGRPCMessageSize = 100000000 @@ -73,7 +75,7 @@ ProfilingEnabled = false User = "prover_user" Password = "prover_pass" Name = "prover_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index c9a2f4bd6d..d5babe01bc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,19 +1,19 @@ version: "3.5" networks: default: - name: zkevm + name: Xgon services: - zkevm-rpc: - container_name: zkevm-rpc + xgon-rpc: + container_name: xgon-rpc restart: unless-stopped depends_on: - zkevm-pool-db: + xgon-pool-db: condition: service_healthy - zkevm-state-db: + xgon-state-db: condition: service_healthy - zkevm-sync: + xgon-sync: condition: service_started - image: zkevm-node + image: xgon-node deploy: resources: limits: @@ -30,15 +30,15 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network ${ZKEVM_NETWORK} --cfg /app/config.toml --components rpc" + - "/app/xgon-node run --network ${ZKEVM_NETWORK} --cfg /app/config.toml --components rpc" - zkevm-sync: - container_name: zkevm-sync + xgon-sync: + container_name: xgon-sync restart: unless-stopped depends_on: - zkevm-state-db: + xgon-state-db: condition: service_healthy - image: zkevm-node + image: xgon-node ports: - 9092:9091 # needed if metrics enabled deploy: @@ -54,10 +54,10 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network ${ZKEVM_NETWORK} --cfg /app/config.toml --components synchronizer" + - "/app/xgon-node run --network ${ZKEVM_NETWORK} --cfg /app/config.toml --components synchronizer" - zkevm-state-db: - container_name: zkevm-state-db + xgon-state-db: + container_name: xgon-state-db restart: unless-stopped image: postgres:15 healthcheck: @@ -82,8 +82,8 @@ services: - "-c" - "config_file=/etc/postgresql.conf" - zkevm-pool-db: - container_name: zkevm-pool-db + xgon-pool-db: + container_name: xgon-pool-db restart: unless-stopped image: postgres:15 healthcheck: @@ -104,12 +104,12 @@ services: - "-N" - "500" - zkevm-prover: - container_name: zkevm-prover + xgon-prover: + container_name: xgon-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v2.2.0 + image: hermeznetwork/xgon-prover:v2.2.0 depends_on: - zkevm-state-db: + xgon-state-db: condition: service_healthy ports: - 50061:50061 # MT diff --git a/docs/ci/actions.md b/docs/ci/actions.md index 821c6edcea..a1507167f8 100644 --- a/docs/ci/actions.md +++ b/docs/ci/actions.md @@ -60,7 +60,7 @@ PR opened and pushing changes to PRs. There are two variants, `trusted` and ## updatedeps -The `zkevm-node` repo requires some external resources for working. We call +The `xgon-node` repo requires some external resources for working. We call these resources custom dependencies (as opposed to the golang packages required by the code). @@ -80,7 +80,7 @@ for changes the client/server golang code is generated from them. With all the potential changes we create a new PR and the tests are run on it, so that we can review and eventually approve the changes to be included in the -`zkevm-node` repo. +`xgon-node` repo. ### When is executed diff --git a/docs/components/account_keystore.md b/docs/components/account_keystore.md index ea2f15dbe6..c9f36edcb8 100644 --- a/docs/components/account_keystore.md +++ b/docs/components/account_keystore.md @@ -3,10 +3,10 @@ This file contains your Ethereum L1 private key, but it will be encrypted at rest using a password of your choice. The ZKEVM Node - depending on which operating mode it's set up - will use this file in conjunction with the password to authorize L1 transactions. ```bash -docker run --rm hermeznetwork/zkevm-node:latest sh -c "/app/zkevm-node encryptKey --pk=[your private key] --pw=[password to encrypt file] --output=./keystore; cat ./keystore/*" > account.keystore +docker run --rm okx/xgon-node:latest sh -c "/app/xgon-node encryptKey --pk=[your private key] --pw=[password to encrypt file] --output=./keystore; cat ./keystore/*" > account.keystore ``` **NOTE**: - Replace `[your private key]` with your Ethereum L1 account private key -- Replace `[password to encrypt file]` with a password used for file encryption. This password must be passed to the Node later on via env variable (`ZKEVM_NODE_ETHERMAN_PRIVATEKEYPASSWORD`) +- Replace `[password to encrypt file]` with a password used for file encryption. This password must be passed to the Node later on via env variable (`XGON_NODE_ETHERMAN_PRIVATEKEYPASSWORD`) diff --git a/docs/components/aggregator.md b/docs/components/aggregator.md index a1c314a22e..4f897c7d38 100644 --- a/docs/components/aggregator.md +++ b/docs/components/aggregator.md @@ -1,8 +1,8 @@ # Component: Aggregator -## ZKEVM Aggregator: +## XGON Aggregator: -The ZKEVM Aggregator is an optional module responsible for receiving connections from Prover(s) in order to generate the proofs for the batches not proven yet. +The XGON Aggregator is an optional module responsible for receiving connections from Prover(s) in order to generate the proofs for the batches not proven yet. ## Hard dependencies: @@ -12,33 +12,33 @@ The ZKEVM Aggregator is an optional module responsible for receiving connections ## Running: -The preferred way to run the ZKEVM Aggregator component is via Docker and Docker Compose. +The preferred way to run the XGON Aggregator component is via Docker and Docker Compose. ```bash -docker pull hermeznetwork/zkevm-node +docker pull okx/xgon-node ``` -To orchestrate multiple deployments of the different ZKEVM Node components, a `docker-compose.yaml` file for Docker Compose can be used: +To orchestrate multiple deployments of the different XGON Node components, a `docker-compose.yaml` file for Docker Compose can be used: ```yaml - zkevm-aggregator: - container_name: zkevm-aggregator - image: zkevm-node + xgon-aggregator: + container_name: xgon-aggregator + image: xgon-node command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --genesis /app/genesis.json --cfg /app/config.toml --components aggregator" + - "/app/xgon-node run --genesis /app/genesis.json --cfg /app/config.toml --components aggregator" ``` The container alone needs some parameters configured, access to certain configuration files and the appropriate ports exposed. - volumes: - - `your Account Keystore file`: /pk/keystore (note, this `/pk/keystore` value is the default path that's written in the Public Configuration files on this repo, meant to expedite deployments, it can be superseded via an env flag `ZKEVM_NODE_ETHERMAN_PRIVATEKEYPATH`.) + - `your Account Keystore file`: /pk/keystore (note, this `/pk/keystore` value is the default path that's written in the Public Configuration files on this repo, meant to expedite deployments, it can be superseded via an env flag `XGON_NODE_ETHERMAN_PRIVATEKEYPATH`.) - `your config.toml file`: /app/config.toml - `your genesis.json file`: /app/genesis.json - environment: Env variables that supersede the config file - - `ZKEVM_NODE_STATEDB_HOST`: Name of StateDB Database Host + - `XGON_NODE_STATEDB_HOST`: Name of StateDB Database Host ### The Account Keystore file: diff --git a/docs/components/databases.md b/docs/components/databases.md index 37e5ee5a92..fec2e48137 100644 --- a/docs/components/databases.md +++ b/docs/components/databases.md @@ -12,11 +12,11 @@ Note the `environment` values will change per DB. The StateDB needs to generate some extra databases and tables (`merkletree`) for use with the MerkleTree/Executor service. -This is done via an sql file: [init_prover_db.sql](https://github.com/0xPolygonHermez/zkevm-node/blob/develop/db/scripts/init_prover_db.sql) +This is done via an sql file: [init_prover_db.sql](https://github.com/okx/xgon-node/blob/develop/db/scripts/init_prover_db.sql) ```yaml -zkevm-state-db: - container_name: zkevm-state-db +xgon-state-db: + container_name: xgon-state-db image: postgres:15 deploy: resources: @@ -38,8 +38,8 @@ zkevm-state-db: - **Other DBs: Pool/RPC**: ```yaml - zkevm-pool-db: - container_name: zkevm-pool-db + xgon-pool-db: + container_name: xgon-pool-db image: postgres:15 deploy: resources: diff --git a/docs/components/prover.md b/docs/components/prover.md index eedc0d088f..d00dbd1cd3 100644 --- a/docs/components/prover.md +++ b/docs/components/prover.md @@ -1,10 +1,10 @@ # Component: Prover -NOTE: The Prover is not considered part of the ZKEVM Node and all issues and suggestions should be sent to the [Prover repo](https://github.com/0xPolygonHermez/zkevm-prover/). +NOTE: The Prover is not considered part of the XGON Node and all issues and suggestions should be sent to the [Prover repo](https://github.com/okx/xgon-prover/). -## ZKEVM Prover: +## XGON Prover: -The ZKEVM Prover image hosts different components, *Merkle Tree*, *Executor* and finally the actual *Prover*. +The XGON Prover image hosts different components, *Merkle Tree*, *Executor* and finally the actual *Prover*. ## Hard dependencies: @@ -12,18 +12,18 @@ The ZKEVM Prover image hosts different components, *Merkle Tree*, *Executor* and ## Running: -The preferred way to run the ZKEVM Prover component is via Docker and Docker Compose. +The preferred way to run the XGON Prover component is via Docker and Docker Compose. ```bash -docker pull hermeznetwork/zkevm-prover +docker pull hermeznetwork/xgon-prover ``` -To orchestrate multiple deployments of the different ZKEVM Node components, a `docker-compose.yaml` file for Docker Compose can be used: +To orchestrate multiple deployments of the different XGON Node components, a `docker-compose.yaml` file for Docker Compose can be used: ```yaml - zkevm-prover: - container_name: zkevm-prover - image: zkevm-prover + xgon-prover: + container_name: xgon-prover + image: xgon-prover volumes: - ./prover-config.json:/usr/src/app/config.json command: > diff --git a/docs/components/rpc.md b/docs/components/rpc.md index 3725ed0436..a787d6eeac 100644 --- a/docs/components/rpc.md +++ b/docs/components/rpc.md @@ -1,8 +1,8 @@ # Component: RPC -## ZKEVM RPC: +## XGON RPC: -The ZKEVM RPC relays transactions to the Trusted sequencer. +The XGON RPC relays transactions to the Trusted sequencer. ## Hard dependencies: @@ -13,22 +13,22 @@ The ZKEVM RPC relays transactions to the Trusted sequencer. ## Running: -The preferred way to run the ZKEVM RPC component is via Docker and Docker Compose. +The preferred way to run the XGON RPC component is via Docker and Docker Compose. ```bash -docker pull hermeznetwork/zkevm-node +docker pull okx/xgon-node ``` -To orchestrate multiple deployments of the different ZKEVM Node components, a `docker-compose.yaml` file for Docker Compose can be used: +To orchestrate multiple deployments of the different XGON Node components, a `docker-compose.yaml` file for Docker Compose can be used: ```yaml - zkevm-rpc: - container_name: zkevm-rpc - image: zkevm-node + xgon-rpc: + container_name: xgon-rpc + image: xgon-node command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --genesis /app/genesis.json --cfg /app/config.toml --components rpc" + - "/app/xgon-node run --genesis /app/genesis.json --cfg /app/config.toml --components rpc" ``` The container alone needs some parameters configured, access to certain configuration files and the appropriate ports exposed. @@ -37,9 +37,9 @@ The container alone needs some parameters configured, access to certain configur - `8545:8545`: RPC Port - `9091:9091`: Needed if Prometheus metrics are enabled - environment: Env variables that supersede the config file - - `ZKEVM_NODE_STATEDB_HOST`: Name of StateDB Database Host - - `ZKEVM_NODE_POOL_HOST`: Name of PoolDB Database Host - - `ZKEVM_NODE_RPC_DB_HOST`: Name of RPCDB Database Host + - `XGON_NODE_STATEDB_HOST`: Name of StateDB Database Host + - `XGON_NODE_POOL_HOST`: Name of PoolDB Database Host + - `XGON_NODE_RPC_DB_HOST`: Name of RPCDB Database Host - volumes: - `your config.toml file`: /app/config.toml - `your genesis file`: /app/genesis.json diff --git a/docs/components/sequencer.md b/docs/components/sequencer.md index 3490494de7..9de06284d0 100644 --- a/docs/components/sequencer.md +++ b/docs/components/sequencer.md @@ -1,27 +1,27 @@ # Component: Sequencer -## ZKEVM Sequencer: +## Xgon Sequencer: -The ZKEVM Sequencer is an optional but ancillary module that proposes new batches using transactions stored in the Pool Database. +The Xgon Sequencer is an optional but ancillary module that proposes new batches using transactions stored in the Pool Database. ## Running: -The preferred way to run the ZKEVM Sequencer component is via Docker and Docker Compose. +The preferred way to run the Xgon Sequencer component is via Docker and Docker Compose. ```bash -docker pull hermeznetwork/zkevm-node +docker pull okx/xgon-node ``` -To orchestrate multiple deployments of the different ZKEVM Node components, a `docker-compose.yaml` file for Docker Compose can be used: +To orchestrate multiple deployments of the different Xgon Node components, a `docker-compose.yaml` file for Docker Compose can be used: ```yaml - zkevm-sequencer: - container_name: zkevm-sequencer - image: zkevm-node + xgon-sequencer: + container_name: xgon-sequencer + image: xgon-node command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --genesis /app/genesis.json --cfg /app/config.toml --components sequencer" + - "/app/xgon-node run --genesis /app/genesis.json --cfg /app/config.toml --components sequencer" ``` The container alone needs some parameters configured, access to certain configuration files and the appropriate ports exposed. diff --git a/docs/components/synchronizer.md b/docs/components/synchronizer.md index 0e79e52a37..7ced71525d 100644 --- a/docs/components/synchronizer.md +++ b/docs/components/synchronizer.md @@ -1,31 +1,31 @@ # Component: Synchronizer -## ZKEVM Synchronizer: +## XGON Synchronizer: -The ZKEVM Synchronizer is the **base** component for which all others will depend on. You can *mix and match* different components to achieve a different outcome, be it sending transactions or computing proofs, but the Sync module will need to be up and running. +The XGON Synchronizer is the **base** component for which all others will depend on. You can *mix and match* different components to achieve a different outcome, be it sending transactions or computing proofs, but the Sync module will need to be up and running. -This module syncs data between the Layer 1 Ethereum network and ZKEVM L2 network. +This module syncs data between the Layer 1 Ethereum network and XGON L2 network. ## Running: -The preferred way to run the ZKEVM Synchronizer component is via Docker and Docker Compose. +The preferred way to run the XGON Synchronizer component is via Docker and Docker Compose. ```bash -docker pull hermeznetwork/zkevm-node +docker pull hermeznetwork/xgon-node ``` -To orchestrate multiple deployments of the different ZKEVM Node components, a `docker-compose.yaml` file for Docker Compose can be used: +To orchestrate multiple deployments of the different XGON Node components, a `docker-compose.yaml` file for Docker Compose can be used: **THIS STEP IS MANDATORY ON ALL DEPLOYMENT MODES** ```yaml - zkevm-sync: - container_name: zkevm-sync - image: zkevm-node + xgon-sync: + container_name: xgon-sync + image: xgon-node command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --genesis /app/genesis.json --cfg /app/config.toml --components synchronizer" + - "/app/xgon-node run --genesis /app/genesis.json --cfg /app/config.toml --components synchronizer" ``` The container alone needs some parameters configured, access to certain configuration files and the appropriate ports exposed. diff --git a/docs/configuration.md b/docs/configuration.md index a61bcac3c8..1e6e6f3024 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -35,7 +35,7 @@ For details about the contents you can read specifications [here](config-file/cu ### Prover Config -Please check [prover repository](https://github.com/0xPolygonHermez/zkevm-prover) for further information +Please check [prover repository](https://github.com/okx/xgon-prover) for further information Examples: - `config/environments/mainnet/prover.config.json` diff --git a/docs/networks.md b/docs/networks.md index d4ce7c907c..f4c1658b49 100644 --- a/docs/networks.md +++ b/docs/networks.md @@ -1,5 +1,5 @@ -# zkEVM testnet networks +# Xgon testnet networks | Network Name | ChainID | RPC URL | Explorer | Bridge Info | |--------------|---------|---------|----------|------------------| -| Public Testnet | `1402` | https://rpc.public.zkevm-test.net | https://explorer.public.zkevm-test.net | https://public.zkevm-test.net/ \ No newline at end of file +| Public Testnet | `1402` | https://rpc.public.xgon-test.net | https://explorer.public.xgon-test.net | https://public.xgon-test.net/ \ No newline at end of file diff --git a/docs/production-setup.md b/docs/production-setup.md index 790c508421..591cbe524a 100644 --- a/docs/production-setup.md +++ b/docs/production-setup.md @@ -9,14 +9,14 @@ Note that sequencing and proving functionalities are not covered in this documen ## Requirements -- A machine to run the zkEVM node with the following requirements: +- A machine to run the XGON node with the following requirements: - Hardware: 32G RAM, 4 cores, 128G Disk with high IOPS (as the network is super young the current disk requirements are quite low, but they will increase overtime. Also note that this requirement is true if the DBs run on the same machine, but it's recommended to run Postgres on dedicated infra). Currently ARM-based CPUs are not supported - Software: Ubuntu 22.04, Docker -- A L1 node: we recommend using geth, but what it's actually needed is access to a JSON RPC interface for the L1 network (Goerli for zkEVM testnet, Ethereum mainnet for zkEVM mainnet) +- A L1 node: we recommend using geth, but what it's actually needed is access to a JSON RPC interface for the L1 network (Goerli for XGON testnet, Ethereum mainnet for XGON mainnet) ## Setup -This is the most straightforward path to run a zkEVM node, and it's perfectly fine for most use cases, however if you are interested in providing service to many users it's recommended to do some tweaking over the default configuration. Furthermore, this is quite opinionated, feel free to run this software in a different way, for instance it's not needed to use Docker, you could use the Go and C++ binaries directly. +This is the most straightforward path to run a XGON node, and it's perfectly fine for most use cases, however if you are interested in providing service to many users it's recommended to do some tweaking over the default configuration. Furthermore, this is quite opinionated, feel free to run this software in a different way, for instance it's not needed to use Docker, you could use the Go and C++ binaries directly. tl;dr: @@ -25,7 +25,7 @@ tl;dr: ZKEVM_NET=mainnet ZKEVM_DIR=./path/to/install # CHANGE THIS ZKEVM_CONFIG_DIR=./path/to/config # CHANGE THIS -curl -L https://github.com/0xPolygonHermez/zkevm-node/releases/latest/download/$ZKEVM_NET.zip > $ZKEVM_NET.zip && unzip -o $ZKEVM_NET.zip -d $ZKEVM_DIR && rm $ZKEVM_NET.zip +curl -L https://github.com/okx/xgon-node/releases/latest/download/$ZKEVM_NET.zip > $ZKEVM_NET.zip && unzip -o $ZKEVM_DIR.zip -d $ZKEVM_DIR && rm $ZKEVM_DIR.zip mkdir -p $ZKEVM_CONFIG_DIR && cp $ZKEVM_DIR/$ZKEVM_NET/example.env $ZKEVM_CONFIG_DIR/.env # EDIT THIS env file: @@ -41,9 +41,9 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker 2. Define installation path: `ZKEVM_DIR=./path/to/install` 3. Define a config directory: `ZKEVM_CONFIG_DIR=./path/to/config` 4. It's recommended to source this env vars in your `~/.bashrc`, `~/.zshrc` or whatever you're using -5. Download and extract the artifacts: `curl -L https://github.com/0xPolygonHermez/zkevm-node/releases/latest/download/$ZKEVM_NET.zip > $ZKEVM_NET.zip && unzip -o $ZKEVM_NET.zip -d $ZKEVM_DIR && rm $ZKEVM_NET.zip`. Note you may need to install `unzip` for this command to work. +5. Download and extract the artifacts: `curl -L https://github.com/okx/xgon-node/releases/latest/download/$XGON_NET.zip > $XGON_NET.zip && unzip -o $XGON_NET.zip -d $XGON_DIR && rm $XGON_NET.zip`. Note you may need to install `unzip` for this command to work. -> **NOTE:** Take into account this works for the latest release (mainnet), in case you want to deploy a pre-release (testnet) you should get the artifacts directly for that release and not using the "latest" link depicted here. [Here](https://github.com/0xPolygonHermez) you can check the node release deployed for each network. +> **NOTE:** Take into account this works for the latest release (mainnet), in case you want to deploy a pre-release (testnet) you should get the artifacts directly for that release and not using the "latest" link depicted here. [Here](https://github.com/okx) you can check the node release deployed for each network. 6. Copy the file with the env parameters into config directory: `mkdir -p $ZKEVM_CONFIG_DIR && cp $ZKEVM_DIR/$ZKEVM_NET/example.env $ZKEVM_CONFIG_DIR/.env` 7. Edit the env file, with your favourite editor. The example will use nano: `nano $ZKEVM_CONFIG_DIR/.env`. This file contains the configuration that anyone should modify. For advanced configuration: @@ -52,11 +52,11 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker 3. Edit the different configuration files in the $ZKEVM_CONFIG_DIR directory and make the necessary changes 8. Run the node: `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml up -d`. You may need to run this command using `sudo` depending on your Docker setup. 9. Make sure that all components are running: `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml ps`. You should see the following containers: - 1. zkevm-rpc - 2. zkevm-sync - 3. zkevm-state-db - 4. zkevm-pool-db - 5. zkevm-prover + 1. xgon-rpc + 2. xgon-sync + 3. xgon-state-db + 4. xgon-pool-db + 5. xgon-prover If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing: @@ -88,7 +88,7 @@ There are some fundamental changes that can be done towards the basic setup, in In the basic setup, there are Postgres being instanciated as Docker containers. For better performance is recommended to: - Run dedicated instances for Postgres. To achieve this you will need to: - - Remove the Postgres services (`zkevm-pool-db` and `zkevm-state-db`) from the `docker-compose.yml` + - Remove the Postgres services (`xgon-pool-db` and `xgon-state-db`) from the `docker-compose.yml` - Instantiate Postgres elsewhere (note that you will have to create credentials and run some queries to make this work, following the config files and docker-compose should give a clear idea of what to do) - Update the `node.config.toml` to use the correct URI for both DBs - Update `prover.config.json` to use the correct URI for the state DB @@ -101,5 +101,5 @@ Unlike the synchronizer, that needs to have only one instance running (having mo There can be as many instances of it as needed, but in order to not introduce other bottlenecks, it's important to consider the following: - Read replicas of the State DB should be used -- Synchronizer should have an exclusive instance of `zkevm-prover` -- JSON RPCs should scale in correlation with instances of `zkevm-prover`. The most obvious way to do so is by having a dedicated `zkevm-prover` for each `zkevm-rpc`. But depending on the payload of your solution it could be worth to have `1 zkevm-rpc : many zkevm-prover` or `many zkevm-rpc : 1 zkevm-prover`, ... For reference, the `zkevm-prover` implements the EVM, and therefore will be heavily used when calling endpoints such as `eth_call`. On the other hand, there are other endpoints that relay on the `zkevm-state-db` +- Synchronizer should have an exclusive instance of `xgon-prover` +- JSON RPCs should scale in correlation with instances of `xgon-prover`. The most obvious way to do so is by having a dedicated `xgon-prover` for each `xgon-rpc`. But depending on the payload of your solution it could be worth to have `1 xgon-rpc : many xgon-prover` or `many xgon-rpc : 1 xgon-prover`, ... For reference, the `xgon-prover` implements the EVM, and therefore will be heavily used when calling endpoints such as `eth_call`. On the other hand, there are other endpoints that relay on the `xgon-state-db` diff --git a/docs/running_local.md b/docs/running_local.md index f7c2d63464..09b08422e5 100644 --- a/docs/running_local.md +++ b/docs/running_local.md @@ -8,11 +8,11 @@ This documentation will help you running the following components: -- zkEVM Node Databases +- Xgon Node Databases - Explorer Databases - L1 Network - Prover -- zkEVM Node components +- Xgon Node components - Explorers ## Requirements @@ -23,8 +23,8 @@ The current version of the environment requires `go`, `docker` and `docker-compo - - -The `zkevm-node` docker image must be built at least once and every time a change is made to the code. -If you haven't build the `zkevm-node` image yet, you must run: +The `xgon-node` docker image must be built at least once and every time a change is made to the code. +If you haven't build the `xgon-node` image yet, you must run: ```bash make build-docker @@ -32,7 +32,7 @@ make build-docker ## A look at how the binary works: -The `zkevm-node` allows certain commands to interact with smart contracts, run certain components, create encryption files and print out debug information. +The `xgon-node` allows certain commands to interact with smart contracts, run certain components, create encryption files and print out debug information. To interact with the binary program we provide docker compose files, and a Makefile to spin up/down the different services and components, ensuring a smooth deployment locally and better interface in command line for developers. @@ -93,7 +93,7 @@ make run-approve-matic ## Accessing the environment - **Databases**: - - zkEVM Node *State* Database + - Xgon Node *State* Database - `Type:` Postgres DB - `User:` state_user - `Password:` state_password @@ -101,7 +101,7 @@ make run-approve-matic - `Host:` localhost - `Port:` 5432 - `Url:` - - zkEVM Node *Pool* Database + - Xgon Node *Pool* Database - `Type:` Postgres DB - `User:` pool_user - `Password:` pool_password @@ -109,7 +109,7 @@ make run-approve-matic - `Host:` localhost - `Port:` 5433 - `Url:` - - zkEVM Node *JSON-RPC* Database + - Xgon Node *JSON-RPC* Database - `Type:` Postgres DB - `User:` rpc_user - `Password:` rpc_password @@ -139,7 +139,7 @@ make run-approve-matic - `Host:` localhost - `Port:` 8545 - `Url:` - - zkEVM Node + - Xgon Node - `Type:` JSON RPC - `Host:` localhost - `Port:` 8123 @@ -173,7 +173,7 @@ To configure your Metamask to use your local environment, follow these steps: 3. On the left menu, click on Networks 4. Click on `Add Network` button 5. Fill up the L2 network information - 1. `Network Name:` Polygon zkEVM - Local + 1. `Network Name:` Xgon - Local 2. `New RPC URL:` 3. `ChainID:` 1001 4. `Currency Symbol:` ETH diff --git a/docs/snap_restore.md b/docs/snap_restore.md index 7d9de4c8b7..ce85262bf0 100644 --- a/docs/snap_restore.md +++ b/docs/snap_restore.md @@ -11,10 +11,10 @@ This feature creates a dump of entire database ``` NAME: - zkevm-node snapshot - Snapshot the state db + xgon-node snapshot - Snapshot the state db USAGE: - zkevm-node snapshot [command options] [arguments...] + xgon-node snapshot [command options] [arguments...] OPTIONS: --cfg FILE, -c FILE Configuration FILE @@ -27,7 +27,7 @@ OPTIONS: User = "prover_user" Password = "prover_pass" Name = "prover_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -39,7 +39,7 @@ This generates two files in the current working path: #### Example of invocation: ``` -# cd /tmp/ && /app/zkevm-node snap -c /app/config.toml +# cd /tmp/ && /app/xgon-node snap -c /app/config.toml (...) # ls -1 prover_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz @@ -56,10 +56,10 @@ It populates state, and hash databases with the previous backup ``` NAME: - zkevm-node restore - Restore snapshot of the state db + xgon-node restore - Restore snapshot of the state db USAGE: - zkevm-node restore [command options] [arguments...] + xgon-node restore [command options] [arguments...] OPTIONS: --inputfilestate value, --is value Input file stateDB @@ -70,16 +70,16 @@ OPTIONS: #### Example of invocation: ``` -/app/zkevm-node restore -c /app/config.toml --is /tmp/state_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz --ih /tmp/prover_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar +/app/xgon-node restore -c /app/config.toml --is /tmp/state_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz --ih /tmp/prover_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar .gz ``` # How to test -You could use `test/docker-compose.yml` to interact with `zkevm-node`: +You could use `test/docker-compose.yml` to interact with `xgon-node`: * Run the containers: `make run` * Launch a interactive container: ``` -docker-compose up -d zkevm-sh -docker-compose exec zkevm-sh /bin/sh +docker-compose up -d xgon-sh +docker-compose exec xgon-sh /bin/sh ``` * Inside this shell you can execute the examples of invocation diff --git a/docs/zkEVM-custom-endpoints.md b/docs/zkEVM-custom-endpoints.md index ef496c1df5..e95c4edfc4 100644 --- a/docs/zkEVM-custom-endpoints.md +++ b/docs/zkEVM-custom-endpoints.md @@ -1,6 +1,6 @@ -# zkEVM custom endpoints +# Xgon custom endpoints -The zkEVM Node JSON RPC server works as is when compared to the official Ethereum JSON RPC, but there are some extra information that also needs to be shared when talking about a L2 Networks, in our case we have information about Batches, Proofs, L1 transactions and much more +The Xgon Node JSON RPC server works as is when compared to the official Ethereum JSON RPC, but there are some extra information that also needs to be shared when talking about a L2 Networks, in our case we have information about Batches, Proofs, L1 transactions and much more In order to allow users to consume this information, a custom set of endpoints were created to provide this information, they are provided under the prefix `zkevm_` diff --git a/gasprice/config.go b/gasprice/config.go index 6e0426ae40..13b55e8597 100644 --- a/gasprice/config.go +++ b/gasprice/config.go @@ -16,6 +16,8 @@ const ( LastNBatchesType EstimatorType = "lastnbatches" // FollowerType calculate the gas price basing on the L1 gasPrice. FollowerType EstimatorType = "follower" + // FixedType the gas price from config that the unit is usdt + FixedType EstimatorType = "fixed" ) // Config for gas price estimator. @@ -34,5 +36,16 @@ type Config struct { CleanHistoryPeriod types.Duration `mapstructure:"CleanHistoryPeriod"` CleanHistoryTimeRetention types.Duration `mapstructure:"CleanHistoryTimeRetention"` + KafkaURL string `mapstructure:"KafkaURL"` + Topic string `mapstructure:"Topic"` + GroupID string `mapstructure:"GroupID"` + Username string `mapstructure:"Username"` + Password string `mapstructure:"Password"` + RootCAPath string `mapstructure:"RootCAPath"` + L2CoinId int `mapstructure:"L2CoinId"` + // DefaultL2CoinPrice is the native token's coin price + DefaultL2CoinPrice float64 `mapstructure:"DefaultL2CoinPrice"` + GasPriceUsdt float64 `mapstructure:"GasPriceUsdt"` + Factor float64 `mapstructure:"Factor"` } diff --git a/gasprice/fixed.go b/gasprice/fixed.go new file mode 100644 index 0000000000..b6e3d26f36 --- /dev/null +++ b/gasprice/fixed.go @@ -0,0 +1,94 @@ +package gasprice + +import ( + "context" + "fmt" + "math/big" + "strconv" + + "github.com/0xPolygonHermez/zkevm-node/encoding" + "github.com/0xPolygonHermez/zkevm-node/log" +) + +const ( + // OKBWei OKB wei + OKBWei = 1e18 + minOKBWei = 1e-18 +) + +// FixedGasPrice struct +type FixedGasPrice struct { + cfg Config + pool poolInterface + ctx context.Context + eth ethermanInterface + ratePrc *KafkaProcessor +} + +// newFixedGasPriceSuggester inits l2 fixed price suggester. +func newFixedGasPriceSuggester(ctx context.Context, cfg Config, pool poolInterface, ethMan ethermanInterface) *FixedGasPrice { + gps := &FixedGasPrice{ + cfg: cfg, + pool: pool, + ctx: ctx, + eth: ethMan, + ratePrc: newKafkaProcessor(cfg, ctx), + } + gps.UpdateGasPriceAvg() + return gps +} + +// UpdateGasPriceAvg updates the gas price. +func (f *FixedGasPrice) UpdateGasPriceAvg() { + ctx := context.Background() + // Get L1 gasprice + l1GasPrice := f.eth.GetL1GasPrice(f.ctx) + if big.NewInt(0).Cmp(l1GasPrice) == 0 { + log.Warn("gas price 0 received. Skipping update...") + return + } + + l2CoinPrice := f.ratePrc.GetL2CoinPrice() + if l2CoinPrice < minOKBWei { + log.Warn("the L2 native coin price too small...") + return + } + res := new(big.Float).Mul(big.NewFloat(0).SetFloat64(f.cfg.GasPriceUsdt/l2CoinPrice), big.NewFloat(0).SetFloat64(OKBWei)) + // Store l2 gasPrice calculated + result := new(big.Int) + res.Int(result) + minGasPrice := big.NewInt(0).SetUint64(f.cfg.DefaultGasPriceWei) + if minGasPrice.Cmp(result) == 1 { // minGasPrice > result + log.Warn("setting DefaultGasPriceWei for L2") + result = minGasPrice + } + maxGasPrice := new(big.Int).SetUint64(f.cfg.MaxGasPriceWei) + if f.cfg.MaxGasPriceWei > 0 && result.Cmp(maxGasPrice) == 1 { // result > maxGasPrice + log.Warn("setting MaxGasPriceWei for L2") + result = maxGasPrice + } + var truncateValue *big.Int + log.Debug("Full L2 gas price value: ", result, ". Length: ", len(result.String()), ". L1 gas price value: ", l1GasPrice) + + numLength := len(result.String()) + if numLength > 3 { //nolint:gomnd + aux := "%0" + strconv.Itoa(numLength-3) + "d" //nolint:gomnd + var ok bool + value := result.String()[:3] + fmt.Sprintf(aux, 0) + truncateValue, ok = new(big.Int).SetString(value, encoding.Base10) + if !ok { + log.Error("error converting: ", truncateValue) + } + } else { + truncateValue = result + } + log.Debugf("Storing truncated L2 gas price: %v, L2 native coin price: %v", truncateValue, l2CoinPrice) + if truncateValue != nil { + err := f.pool.SetGasPrices(ctx, truncateValue.Uint64(), l1GasPrice.Uint64()) + if err != nil { + log.Errorf("failed to update gas price in poolDB, err: %v", err) + } + } else { + log.Error("nil value detected. Skipping...") + } +} diff --git a/gasprice/fixed_test.go b/gasprice/fixed_test.go new file mode 100644 index 0000000000..eb0878f1a6 --- /dev/null +++ b/gasprice/fixed_test.go @@ -0,0 +1,144 @@ +package gasprice + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/config/types" + "github.com/0xPolygonHermez/zkevm-node/log" +) + +func init() { + log.Init(log.Config{ + Level: "debug", + Outputs: []string{"stdout"}, + }) +} + +func TestUpdateGasPriceFixed(t *testing.T) { + ctx := context.Background() + var d time.Duration = 1000000000 + + cfg := Config{ + Type: FixedType, + DefaultGasPriceWei: 1000000000, + UpdatePeriod: types.NewDuration(d), + Factor: 0.5, + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 40, + GasPriceUsdt: 0.001, + } + l1GasPrice := big.NewInt(10000000000) + l2GasPrice := uint64(25000000000000) + poolM := new(poolMock) + ethM := new(ethermanMock) + ethM.On("GetL1GasPrice", ctx).Return(l1GasPrice).Once() + poolM.On("SetGasPrices", ctx, l2GasPrice, l1GasPrice.Uint64()).Return(nil).Once() + f := newFixedGasPriceSuggester(ctx, cfg, poolM, ethM) + + ethM.On("GetL1GasPrice", ctx).Return(l1GasPrice, l1GasPrice).Once() + poolM.On("SetGasPrices", ctx, l2GasPrice, l1GasPrice.Uint64()).Return(nil).Once() + f.UpdateGasPriceAvg() +} + +func TestUpdateGasPriceAvgCases(t *testing.T) { + var d time.Duration = 1000000000 + testcases := []struct { + cfg Config + l1GasPrice *big.Int + l2GasPrice uint64 + }{ + { + cfg: Config{ + Type: FixedType, + DefaultGasPriceWei: 1000000000, + UpdatePeriod: types.NewDuration(d), + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 40, + GasPriceUsdt: 0.001, + }, + l1GasPrice: big.NewInt(10000000000), + l2GasPrice: uint64(25000000000000), + }, + { + cfg: Config{ + Type: FixedType, + DefaultGasPriceWei: 1000000000, + UpdatePeriod: types.NewDuration(d), + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 1e-19, + GasPriceUsdt: 0.001, + }, + l1GasPrice: big.NewInt(10000000000), + l2GasPrice: uint64(25000000000000), + }, + { // the gas price small than the min gas price + cfg: Config{ + Type: FixedType, + DefaultGasPriceWei: 26000000000000, + UpdatePeriod: types.NewDuration(d), + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 40, + GasPriceUsdt: 0.001, + }, + l1GasPrice: big.NewInt(10000000000), + l2GasPrice: uint64(26000000000000), + }, + { // the gas price bigger than the max gas price + cfg: Config{ + Type: FixedType, + DefaultGasPriceWei: 1000000000000, + MaxGasPriceWei: 23000000000000, + UpdatePeriod: types.NewDuration(d), + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 40, + GasPriceUsdt: 0.001, + }, + l1GasPrice: big.NewInt(10000000000), + l2GasPrice: uint64(23000000000000), + }, + { + cfg: Config{ + Type: FixedType, + DefaultGasPriceWei: 1000000000, + UpdatePeriod: types.NewDuration(d), + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 30, + GasPriceUsdt: 0.001, + }, + l1GasPrice: big.NewInt(10000000000), + l2GasPrice: uint64(33300000000000), + }, + { + cfg: Config{ + Type: FixedType, + DefaultGasPriceWei: 10, + UpdatePeriod: types.NewDuration(d), + KafkaURL: "127.0.0.1:9092", + Topic: "middle_coinPrice_push", + DefaultL2CoinPrice: 30, + GasPriceUsdt: 1e-15, + }, + l1GasPrice: big.NewInt(10000000000), + l2GasPrice: uint64(33), + }, + } + + for _, tc := range testcases { + ctx := context.Background() + poolM := new(poolMock) + ethM := new(ethermanMock) + ethM.On("GetL1GasPrice", ctx).Return(tc.l1GasPrice).Twice() + poolM.On("SetGasPrices", ctx, tc.l2GasPrice, tc.l1GasPrice.Uint64()).Return(nil).Twice() + f := newFixedGasPriceSuggester(ctx, tc.cfg, poolM, ethM) + f.UpdateGasPriceAvg() + } +} diff --git a/gasprice/gaspricesuggester.go b/gasprice/gaspricesuggester.go index 6178bfec78..c26daecdf6 100644 --- a/gasprice/gaspricesuggester.go +++ b/gasprice/gaspricesuggester.go @@ -27,6 +27,9 @@ func NewL2GasPriceSuggester(ctx context.Context, cfg Config, pool poolInterface, case DefaultType: log.Info("Default type selected") gpricer = newDefaultGasPriceSuggester(ctx, cfg, pool) + case FixedType: + log.Info("Fixed type selected") + gpricer = newFixedGasPriceSuggester(ctx, cfg, pool, ethMan) default: log.Fatal("unknown l2 gas price suggester type ", cfg.Type, ". Please specify a valid one: 'lastnbatches', 'follower' or 'default'") } diff --git a/gasprice/kafka_proc.go b/gasprice/kafka_proc.go new file mode 100644 index 0000000000..f99577bb36 --- /dev/null +++ b/gasprice/kafka_proc.go @@ -0,0 +1,180 @@ +package gasprice + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "github.com/0xPolygonHermez/zkevm-node/log" + kafka "github.com/segmentio/kafka-go" + "github.com/segmentio/kafka-go/sasl/plain" + "os" + "strings" + "sync" + "time" +) + +const ( + okbcoinId = 7184 + defaultTime = 10 + defaultMaxData = 10e6 // 10M +) + +// MsgInfo msg info +type MsgInfo struct { + Topic string `json:"topic"` + Data *Body `json:"data"` +} + +// Body msg body +type Body struct { + Id string `json:"id"` + PriceList []*Price `json:"priceList"` +} + +// Price coin price +type Price struct { + CoinId int `json:"coinId"` + Symbol string `json:"symbol"` + FullName string `json:"fullName"` + Price float64 `json:"price"` + PriceStatus int `json:"priceStatus"` + MaxPrice24H float64 `json:"maxPrice24H"` + MinPrice24H float64 `json:"minPrice24H"` + MarketCap float64 `json:"marketCap"` + Timestamp int64 `json:"timestamp"` + Vol24H float64 `json:"vol24h"` + CirculatingSupply float64 `json:"circulatingSupply"` + MaxSupply float64 `json:"maxSupply"` + TotalSupply float64 `json:"totalSupply"` + PriceChange24H float64 `json:"priceChange24H"` + PriceChangeRate24H float64 `json:"priceChangeRate24H"` + CirculatingMarketCap float64 `json:"circulatingMarketCap"` + PriceChange7D float64 `json:"priceChange7D"` + PriceChangeRate7D float64 `json:"priceChangeRate7D"` + PriceChange30D float64 `json:"priceChange30D"` + PriceChangeRate30D float64 `json:"priceChangeRate30D"` + PriceChangeYearStart float64 `json:"priceChangeYearStart"` + PriceChangeRateYearStart float64 `json:"priceChangeRateYearStart"` + ExceptionStatus int `json:"exceptionStatus"` + Source int `json:"source"` + Type string `json:"type"` + Id string `json:"id"` +} + +// KafkaProcessor kafka processor +type KafkaProcessor struct { + kreader *kafka.Reader + L2Price float64 + ctx context.Context + rwLock sync.RWMutex + l2CoinId int +} + +func newKafkaProcessor(cfg Config, ctx context.Context) *KafkaProcessor { + rp := &KafkaProcessor{ + kreader: getKafkaReader(cfg), + L2Price: cfg.DefaultL2CoinPrice, + ctx: ctx, + l2CoinId: okbcoinId, + } + if cfg.L2CoinId != 0 { + rp.l2CoinId = cfg.L2CoinId + } + + go rp.processor() + return rp +} + +func getKafkaReader(cfg Config) *kafka.Reader { + brokers := strings.Split(cfg.KafkaURL, ",") + + var dialer *kafka.Dialer + if cfg.Password != "" && cfg.Username != "" && cfg.RootCAPath != "" { + rootCA, err := os.ReadFile(cfg.RootCAPath) + if err != nil { + panic("kafka read root ca fail") + } + caCertPool := x509.NewCertPool() + if ok := caCertPool.AppendCertsFromPEM(rootCA); !ok { + panic("caCertPool.AppendCertsFromPEM") + } + dialer = &kafka.Dialer{ + Timeout: defaultTime * time.Second, + DualStack: true, + SASLMechanism: plain.Mechanism{Username: cfg.Username, Password: cfg.Password}, + } + { // #nosec G402 + dialer.TLS = &tls.Config{RootCAs: caCertPool, InsecureSkipVerify: true} + } + } + + return kafka.NewReader(kafka.ReaderConfig{ + Brokers: brokers, + GroupID: cfg.GroupID, + Topic: cfg.Topic, + MinBytes: 1, // 1 + MaxBytes: defaultMaxData, + Dialer: dialer, + StartOffset: kafka.LastOffset, // read data from new message + }) +} + +func (rp *KafkaProcessor) processor() { + log.Info("kafka processor start processor ") + defer rp.kreader.Close() + for { + select { + case <-rp.ctx.Done(): + return + default: + value, err := rp.ReadAndCalc(rp.ctx) + if err != nil { + log.Warn("get the destion data fail ", err) + time.Sleep(time.Second * defaultTime) + continue + } + rp.updateL2CoinPrice(value) + } + } +} + +// ReadAndCalc read and calc +func (rp *KafkaProcessor) ReadAndCalc(ctx context.Context) (float64, error) { + m, err := rp.kreader.ReadMessage(ctx) + if err != nil { + return 0, err + } + return rp.parseL2CoinPrice(m.Value) +} + +func (rp *KafkaProcessor) updateL2CoinPrice(price float64) { + rp.rwLock.Lock() + defer rp.rwLock.Unlock() + rp.L2Price = price +} + +// GetL2CoinPrice get L2 coin price +func (rp *KafkaProcessor) GetL2CoinPrice() float64 { + rp.rwLock.RLock() + defer rp.rwLock.RUnlock() + return rp.L2Price +} + +func (rp *KafkaProcessor) parseL2CoinPrice(value []byte) (float64, error) { + msgI := &MsgInfo{} + err := json.Unmarshal(value, &msgI) + if err != nil { + return 0, err + } + if msgI.Data == nil || len(msgI.Data.PriceList) == 0 { + return 0, fmt.Errorf("the data PriceList is empty") + } + for _, price := range msgI.Data.PriceList { + if price.CoinId == rp.l2CoinId { + return price.Price, nil + } + } + return 0, fmt.Errorf("not find a correct coin price coinId=%v", rp.l2CoinId) +} diff --git a/gasprice/kafka_proc_test.go b/gasprice/kafka_proc_test.go new file mode 100644 index 0000000000..5d28387a01 --- /dev/null +++ b/gasprice/kafka_proc_test.go @@ -0,0 +1,58 @@ +package gasprice + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCalculateRate(t *testing.T) { + testcases := []struct { + l2CoinId int + msg string + check func(rate float64, err error) + }{ + { + // error + l2CoinId: okbcoinId, + msg: "{\"topic\":\"middle_coinPrice_push\"}", + check: func(rate float64, err error) { + require.Error(t, err) + }, + }, + { + // error + l2CoinId: okbcoinId, + msg: fmt.Sprintf("{\"topic\":\"middle_coinPrice_push\",\"source\":null,\"type\":null,\"data\":{\"priceList\":[{\"coinId\":%d,\"price\":0.02}],\"id\":\"98a797ce-f61b-4e90-87ac-445e77ad3599\"}}", okbcoinId+1), + check: func(rate float64, err error) { + require.Error(t, err) + }, + }, + { + // correct + l2CoinId: okbcoinId, + msg: fmt.Sprintf("{\"topic\":\"middle_coinPrice_push\",\"source\":null,\"type\":null,\"data\":{\"priceList\":[{\"coinId\":%d,\"price\":0.02}, {\"coinId\":%d,\"price\":0.002}],\"id\":\"98a797ce-f61b-4e90-87ac-445e77ad3599\"}}", 1, okbcoinId), + check: func(rate float64, err error) { + require.Equal(t, rate, 0.002) + require.NoError(t, err) + }, + }, + { + // correct + l2CoinId: okbcoinId, + msg: fmt.Sprintf("{\"topic\":\"middle_coinPrice_push\",\"source\":null,\"type\":null,\"data\":{\"priceList\":[{\"coinId\":%d,\"price\":0.02}, {\"coinId\":%d,\"price\":10}],\"id\":\"98a797ce-f61b-4e90-87ac-445e77ad3599\"}}", 1, okbcoinId), + check: func(rate float64, err error) { + require.Equal(t, rate, float64(10)) + require.NoError(t, err) + }, + }, + } + + for _, tc := range testcases { + rp := newKafkaProcessor(Config{Topic: "middle_coinPrice_push", L2CoinId: tc.l2CoinId}, context.Background()) + rt, err := rp.parseL2CoinPrice([]byte(tc.msg)) + tc.check(rt, err) + } +} diff --git a/go.mod b/go.mod index 909e03c892..9b3abe75c2 100644 --- a/go.mod +++ b/go.mod @@ -103,6 +103,7 @@ require ( github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -147,7 +148,10 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect ) -require github.com/prometheus/client_golang v1.16.0 +require ( + github.com/prometheus/client_golang v1.16.0 + github.com/segmentio/kafka-go v0.4.43 +) replace github.com/0xPolygon/cdk-data-availability => github.com/okx/cdk-data-availability v0.0.1-dac.0.20230913121303-66766c988eea diff --git a/go.sum b/go.sum index fc974b32af..f9a9fa1991 100644 --- a/go.sum +++ b/go.sum @@ -456,6 +456,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -571,6 +572,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= @@ -617,6 +620,8 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.4.43 h1:yKVQ/i6BobbX7AWzwkhulsEn47wpLA8eO6H03bCMqYg= +github.com/segmentio/kafka-go v0.4.43/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -716,6 +721,12 @@ github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vb github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -883,6 +894,7 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= @@ -1010,6 +1022,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= diff --git a/jsonrpc/config.go b/jsonrpc/config.go index d347ba41d2..16ee9e5672 100644 --- a/jsonrpc/config.go +++ b/jsonrpc/config.go @@ -38,6 +38,9 @@ type Config struct { // TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP) // to do the parallel requests to RPC.debug_traceTransaction endpoint TraceBatchUseHTTPS bool `mapstructure:"TraceBatchUseHTTPS"` + + // EnablePendingTransactionFilter enables pending transaction filter that can support query L2 pending transaction + EnablePendingTransactionFilter bool `mapstructure:"EnablePendingTransactionFilter"` } // WebSocketsConfig has parameters to config the rpc websocket support diff --git a/jsonrpc/endpoints_debug_innertx.go b/jsonrpc/endpoints_debug_innertx.go new file mode 100644 index 0000000000..17de168f3f --- /dev/null +++ b/jsonrpc/endpoints_debug_innertx.go @@ -0,0 +1,69 @@ +package jsonrpc + +import ( + "context" + "errors" + "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/jackc/pgx/v4" +) + +func (d *DebugEndpoints) buildInnerTransaction(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (interface{}, types.Error) { + traceCfg := defaultTraceConfig + tracer := "callTracer" + traceCfg.Tracer = &tracer + + // check tracer + if traceCfg.Tracer != nil && *traceCfg.Tracer != "" && !isBuiltInTracer(*traceCfg.Tracer) && !isJSCustomTracer(*traceCfg.Tracer) { + return RPCErrorResponse(types.DefaultErrorCode, "invalid tracer", nil) + } + + stateTraceConfig := state.TraceConfig{ + DisableStack: traceCfg.DisableStack, + DisableStorage: traceCfg.DisableStorage, + EnableMemory: traceCfg.EnableMemory, + EnableReturnData: traceCfg.EnableReturnData, + Tracer: traceCfg.Tracer, + TracerConfig: traceCfg.TracerConfig, + } + result, err := d.state.DebugTransaction(ctx, hash, stateTraceConfig, dbTx) + if errors.Is(err, state.ErrNotFound) { + return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil) + } else if err != nil { + const errorMessage = "failed to get trace" + log.Errorf("%v: %v", errorMessage, err) + return nil, types.NewRPCError(types.DefaultErrorCode, errorMessage) + } + + // if a tracer was specified, then return the trace result + if stateTraceConfig.Tracer != nil && *stateTraceConfig.Tracer != "" && len(result.ExecutorTraceResult) > 0 { + return result.ExecutorTraceResult, nil + } + + receipt, err := d.state.GetTransactionReceipt(ctx, hash, dbTx) + if err != nil { + const errorMessage = "failed to tx receipt" + log.Errorf("%v: %v", errorMessage, err) + return nil, types.NewRPCError(types.DefaultErrorCode, errorMessage) + } + + failed := receipt.Status == ethTypes.ReceiptStatusFailed + var returnValue interface{} + if stateTraceConfig.EnableReturnData { + returnValue = common.Bytes2Hex(result.ReturnValue) + } + + structLogs := d.buildStructLogs(result.StructLogs, *traceCfg) + + resp := traceTransactionResponse{ + Gas: result.GasUsed, + Failed: failed, + ReturnValue: returnValue, + StructLogs: structLogs, + } + + return resp, nil +} diff --git a/jsonrpc/endpoints_eth.go b/jsonrpc/endpoints_eth.go index 01af85ff1d..1a2d665e51 100644 --- a/jsonrpc/endpoints_eth.go +++ b/jsonrpc/endpoints_eth.go @@ -830,13 +830,14 @@ func (e *EthEndpoints) NewPendingTransactionFilter() (interface{}, types.Error) // internal func (e *EthEndpoints) newPendingTransactionFilter(wsConn *websocket.Conn) (interface{}, types.Error) { - return nil, types.NewRPCError(types.DefaultErrorCode, "not supported yet") - // id, err := e.storage.NewPendingTransactionFilter(wsConn) - // if err != nil { - // return rpcErrorResponse(types.DefaultErrorCode, "failed to create new pending transaction filter", err) - // } - - // return id, nil + if !e.cfg.EnablePendingTransactionFilter { + return nil, types.NewRPCError(types.DefaultErrorCode, "not supported yet") + } + id, err := e.storage.NewPendingTransactionFilter(wsConn) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to create new pending transaction filter", err) + } + return id, nil } // SendRawTransaction has two different ways to handle new transactions: diff --git a/jsonrpc/endpoints_eth_innertx.go b/jsonrpc/endpoints_eth_innertx.go new file mode 100644 index 0000000000..a5d66b6a5f --- /dev/null +++ b/jsonrpc/endpoints_eth_innertx.go @@ -0,0 +1,188 @@ +package jsonrpc + +import ( + "context" + "encoding/json" + "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/jackc/pgx/v4" + "math/big" + "strconv" + "strings" + "sync" +) + +var debugEndPoints *DebugEndpoints +var once sync.Once + +// GetInternalTransactions returns a transaction by his hash +func (e *EthEndpoints) GetInternalTransactions(hash types.ArgHash) (interface{}, types.Error) { + once.Do(func() { + debugEndPoints = &DebugEndpoints{ + state: e.state, + } + }) + return debugEndPoints.txMan.NewDbTxScope(debugEndPoints.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { + ret, err := debugEndPoints.buildInnerTransaction(ctx, hash.Hash(), dbTx) + if err != nil { + return ret, err + } + + jr, ok := ret.(json.RawMessage) + if !ok { + return nil, types.NewRPCError(types.ParserErrorCode, "cant transfer to json raw message") + } + r, stderr := jr.MarshalJSON() + if stderr != nil { + return nil, types.NewRPCError(types.ParserErrorCode, stderr.Error()) + } + var of okFrame + stderr = json.Unmarshal(r, &of) + if stderr != nil { + return nil, types.NewRPCError(types.ParserErrorCode, stderr.Error()) + } + result := internalTxTraceToInnerTxs(of) + + return result, nil + }) +} + +type okLog struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` +} + +type okFrame struct { + Type string `json:"type"` + From common.Address `json:"from"` + Gas string `json:"gas"` + GasUsed string `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input string `json:"input" rlp:"optional"` + Output string `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Calls []okFrame `json:"calls,omitempty" rlp:"optional"` + Logs []okLog `json:"logs,omitempty" rlp:"optional"` + // Placed at end on purpose. The RLP will be decoded to 0 instead of + // nil if there are non-empty elements after in the struct. + Value string `json:"value,omitempty" rlp:"optional"` +} + +func internalTxTraceToInnerTxs(tx okFrame) []*InnerTx { + dfs := Dfs{} + indexMap := make(map[int]int) + indexMap[0] = 1 + var level = 0 + var index = 1 + isError := tx.Error != "" + dfs.dfs(tx, level, index, indexMap, isError) + return dfs.innerTxs +} + +type Dfs struct { + innerTxs []*InnerTx +} + +func inArray(dst string, src []string) bool { + for _, v := range src { + if v == dst { + return true + } + } + return false +} + +func (d *Dfs) dfs(tx okFrame, level int, index int, indexMap map[int]int, isError bool) { + if !inArray(strings.ToLower(tx.Type), []string{"call", "create", "create2", + "callcode", "delegatecall", "staticcall", "selfdestruct"}) { + return + } + name := strings.ToLower(tx.Type) + for i := 0; i < level; i++ { + if indexMap[i] == 0 { + continue + } + name = name + "_" + strconv.Itoa(indexMap[i]) + } + innerTx := internalTxTraceToInnerTx(tx, name, level, index) + if !isError { + isError = innerTx.IsError + } else { + innerTx.IsError = isError + } + d.innerTxs = append(d.innerTxs, innerTx) + index = 0 + for _, call := range tx.Calls { + index++ + indexMap[level] = index + d.dfs(call, level+1, index+1, indexMap, isError) + } + if len(tx.Calls) == 0 { + return + } +} + +type InnerTx struct { + Dept big.Int `json:"dept"` + InternalIndex big.Int `json:"internal_index"` + From string `json:"from"` + To string `json:"to"` + Input string `json:"input"` + Output string `json:"output"` + IsError bool `json:"is_error"` + GasUsed uint64 `json:"gas_used"` + Value string `json:"value"` + ValueWei string `json:"value_wei"` + CallValueWei string `json:"call_value_wei"` + Error string `json:"error"` + Gas uint64 `json:"gas"` + //ReturnGas uint64 `json:"return_gas"` + CallType string `json:"call_type"` + Name string `json:"name"` + TraceAddress string `json:"trace_address"` + CodeAddress string `json:"code_address"` +} + +func internalTxTraceToInnerTx(currentTx okFrame, name string, depth int, index int) *InnerTx { + value := currentTx.Value + if value == "" { + value = "0x0" + } + var toAddress string + if currentTx.To != nil { + toAddress = currentTx.To.String() + } + gas, _ := strconv.ParseUint(currentTx.Gas, 0, 64) + gasUsed, _ := strconv.ParseUint(currentTx.GasUsed, 0, 64) + valueWei, _ := hexutil.DecodeBig(value) + callTx := &InnerTx{ + Dept: *big.NewInt(int64(depth)), + From: currentTx.From.String(), + To: toAddress, + ValueWei: valueWei.String(), + CallValueWei: value, + CallType: strings.ToLower(currentTx.Type), + Name: name, + Input: currentTx.Input, + Output: currentTx.Output, + Gas: gas, + GasUsed: gasUsed, + IsError: false, // TODO Nested errors + //ReturnGas: currentTx.Gas - currentTx.GasUsed, + } + callTx.InternalIndex = *big.NewInt(int64(index - 1)) + if strings.ToLower(currentTx.Type) == "callcode" { + callTx.CodeAddress = currentTx.To.String() + } + if strings.ToLower(currentTx.Type) == "delegatecall" { + callTx.ValueWei = "" + } + if currentTx.Error != "" { + callTx.Error = currentTx.Error + callTx.IsError = true + } + return callTx +} diff --git a/pool/config.go b/pool/config.go index c744fd6c82..d5cb55a44b 100644 --- a/pool/config.go +++ b/pool/config.go @@ -7,6 +7,9 @@ import ( // Config is the pool configuration type Config struct { + // FreeClaimGasLimit is the max gas allowed use to do a free claim + FreeClaimGasLimit uint64 `mapstructure:"FreeClaimGasLimit"` + // IntervalToRefreshBlockedAddresses is the time it takes to sync the // blocked address list from db to memory IntervalToRefreshBlockedAddresses types.Duration `mapstructure:"IntervalToRefreshBlockedAddresses"` @@ -37,4 +40,7 @@ type Config struct { // GlobalQueue represents the maximum number of non-executable transaction slots for all accounts GlobalQueue uint64 `mapstructure:"GlobalQueue"` + + // FreeGasAddress is the default free gas address + FreeGasAddress string `mapstructure:"FreeGasAddress"` } diff --git a/pool/pool.go b/pool/pool.go index 32b44b77c7..0157b2c0ab 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -18,6 +18,11 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +const ( + // BridgeClaimMethodSignature for tracking BridgeClaimMethodSignature method + BridgeClaimMethodSignature = "0x2cffd02e" +) + var ( // ErrNotFound indicates an object has not been found for the search criteria used ErrNotFound = errors.New("object not found") @@ -29,6 +34,9 @@ var ( // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced // with a different one without the required price bump. ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") + + // FreeClaimAddress is the default free gas address + FreeClaimAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" ) // Pool is an implementation of the Pool interface @@ -45,6 +53,7 @@ type Pool struct { startTimestamp time.Time gasPrices GasPrices gasPricesMux *sync.RWMutex + l2BridgeAddr common.Address } type preExecutionResponse struct { @@ -63,7 +72,7 @@ type GasPrices struct { } // NewPool creates and initializes an instance of Pool -func NewPool(cfg Config, s storage, st stateInterface, chainID uint64, eventLog *event.EventLog) *Pool { +func NewPool(cfg Config, s storage, st stateInterface, l2BridgeAddr common.Address, chainID uint64, eventLog *event.EventLog) *Pool { startTimestamp := time.Now() p := &Pool{ cfg: cfg, @@ -76,8 +85,11 @@ func NewPool(cfg Config, s storage, st stateInterface, chainID uint64, eventLog eventLog: eventLog, gasPrices: GasPrices{0, 0}, gasPricesMux: new(sync.RWMutex), + l2BridgeAddr: l2BridgeAddr, } + FreeClaimAddress = cfg.FreeGasAddress + p.refreshBlockedAddresses() go func(cfg *Config, p *Pool) { for { @@ -157,7 +169,7 @@ func (p *Pool) StartPollingMinSuggestedGasPrice(ctx context.Context) { // AddTx adds a transaction to the pool with the pending state func (p *Pool) AddTx(ctx context.Context, tx types.Transaction, ip string) error { - poolTx := NewTransaction(tx, ip, false) + poolTx := NewTransaction(tx, ip, false, p) if err := p.validateTx(ctx, *poolTx); err != nil { return err } @@ -213,7 +225,7 @@ func (p *Pool) StoreTx(ctx context.Context, tx types.Transaction, ip string, isW } } - poolTx := NewTransaction(tx, ip, isWIP) + poolTx := NewTransaction(tx, ip, isWIP, p) poolTx.ZKCounters = preExecutionResponse.usedZkCounters return p.storage.AddTx(ctx, *poolTx) @@ -387,11 +399,13 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { } // Reject transactions with a gas price lower than the minimum gas price - p.minSuggestedGasPriceMux.RLock() - gasPriceCmp := poolTx.GasPrice().Cmp(p.minSuggestedGasPrice) - p.minSuggestedGasPriceMux.RUnlock() - if gasPriceCmp == -1 { - return ErrGasPrice + if from != common.HexToAddress(FreeClaimAddress) || !poolTx.IsClaims { + p.minSuggestedGasPriceMux.RLock() + gasPriceCmp := poolTx.GasPrice().Cmp(p.minSuggestedGasPrice) + p.minSuggestedGasPriceMux.RUnlock() + if gasPriceCmp == -1 { + return ErrGasPrice + } } // Transactor should have enough funds to cover the costs diff --git a/pool/pool_test.go b/pool/pool_test.go index 39751375ef..bba01edcff 100644 --- a/pool/pool_test.go +++ b/pool/pool_test.go @@ -200,7 +200,7 @@ func Test_AddTx_OversizedData(t *testing.T) { require.NoError(t, err) const chainID = 2576980377 - p := pool.NewPool(cfg, s, st, chainID, eventLog) + p := pool.NewPool(cfg, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID, eventLog) b := make([]byte, cfg.MaxTxBytesSize+1) to := common.HexToAddress(operations.DefaultSequencerAddress) @@ -649,7 +649,7 @@ func Test_SetAndGetGasPrice(t *testing.T) { require.NoError(t, err) eventLog := event.NewEventLog(event.Config{}, eventStorage) - p := pool.NewPool(cfg, s, nil, chainID.Uint64(), eventLog) + p := pool.NewPool(cfg, s, nil, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID.Uint64(), eventLog) nBig, err := rand.Int(rand.Reader, big.NewInt(0).SetUint64(math.MaxUint64)) require.NoError(t, err) @@ -674,7 +674,7 @@ func TestDeleteGasPricesHistoryOlderThan(t *testing.T) { require.NoError(t, err) eventLog := event.NewEventLog(event.Config{}, eventStorage) - p := pool.NewPool(cfg, s, nil, chainID.Uint64(), eventLog) + p := pool.NewPool(cfg, s, nil, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID.Uint64(), eventLog) ctx := context.Background() @@ -1788,7 +1788,7 @@ func Test_AddTx_NonceTooHigh(t *testing.T) { } func setupPool(t *testing.T, cfg pool.Config, s *pgpoolstorage.PostgresPoolStorage, st *state.State, chainID uint64, ctx context.Context, eventLog *event.EventLog) *pool.Pool { - p := pool.NewPool(cfg, s, st, chainID, eventLog) + p := pool.NewPool(cfg, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID, eventLog) err := p.SetGasPrices(ctx, gasPrice.Uint64(), l1GasPrice.Uint64()) require.NoError(t, err) diff --git a/pool/transaction.go b/pool/transaction.go index b958b2c268..961eafc29a 100644 --- a/pool/transaction.go +++ b/pool/transaction.go @@ -1,6 +1,7 @@ package pool import ( + "strings" "time" "github.com/0xPolygonHermez/zkevm-node/state" @@ -45,10 +46,11 @@ type Transaction struct { IsWIP bool IP string FailedReason *string + IsClaims bool } // NewTransaction creates a new transaction -func NewTransaction(tx types.Transaction, ip string, isWIP bool) *Transaction { +func NewTransaction(tx types.Transaction, ip string, isWIP bool, p *Pool) *Transaction { poolTx := Transaction{ Transaction: tx, Status: TxStatusPending, @@ -57,5 +59,25 @@ func NewTransaction(tx types.Transaction, ip string, isWIP bool) *Transaction { IP: ip, } + poolTx.IsClaims = poolTx.IsClaimTx(p.l2BridgeAddr, p.cfg.FreeClaimGasLimit) + return &poolTx } + +// IsClaimTx checks, if tx is a claim tx +func (tx *Transaction) IsClaimTx(l2BridgeAddr common.Address, freeClaimGasLimit uint64) bool { + if tx.To() == nil { + return false + } + + txGas := tx.Gas() + if txGas > freeClaimGasLimit { + return false + } + + if *tx.To() == l2BridgeAddr && + strings.HasPrefix("0x"+common.Bytes2Hex(tx.Data()), BridgeClaimMethodSignature) { + return true + } + return false +} diff --git a/sequencer/dbmanager.go b/sequencer/dbmanager.go index b47de338ef..465e2ce103 100644 --- a/sequencer/dbmanager.go +++ b/sequencer/dbmanager.go @@ -135,7 +135,7 @@ func (d *dbManager) loadFromPool() { } func (d *dbManager) addTxToWorker(tx pool.Transaction) error { - txTracker, err := d.worker.NewTxTracker(tx.Transaction, tx.ZKCounters, tx.IP) + txTracker, err := d.worker.NewTxTracker(tx, tx.ZKCounters, tx.IP) if err != nil { return err } diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 88437fb638..148bf536cc 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "strconv" "sync" "sync/atomic" "time" @@ -332,7 +333,10 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { tx := f.worker.GetBestFittingTx(f.batch.remainingResources) metrics.WorkerProcessingTime(time.Since(start)) + metrics.GetLogStatistics().CumulativeTiming(metrics.GetTx, time.Since(start)) + if tx != nil { + metrics.GetLogStatistics().CumulativeCounting(metrics.TxCounter) log.Debugf("processing tx: %s", tx.Hash.Hex()) // reset the count of effective GasPrice process attempts (since the tx may have been tried to be processed before) @@ -344,20 +348,25 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { if err != nil { if err == ErrEffectiveGasPriceReprocess { log.Info("reprocessing tx because of effective gas price calculation: %s", tx.Hash.Hex()) + metrics.GetLogStatistics().CumulativeCounting(metrics.ReprocessingTxCounter) continue } else { log.Errorf("failed to process transaction in finalizeBatches, Err: %v", err) + metrics.GetLogStatistics().CumulativeCounting(metrics.FailTxCounter) break } } + metrics.GetLogStatistics().CumulativeValue(metrics.BatchGas, int64(tx.Gas)) break } + f.sharedResourcesMux.Unlock() } else { // wait for new txs // log.Debugf("no transactions to be processed. Sleeping for %v", f.cfg.SleepDuration.Duration) if f.cfg.SleepDuration.Duration > 0 { time.Sleep(f.cfg.SleepDuration.Duration) + metrics.GetLogStatistics().CumulativeCounting(metrics.GetTxPauseCounter) } } @@ -369,10 +378,18 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { if f.isDeadlineEncountered() { log.Infof("closing batch %d because deadline was encountered.", f.batch.batchNumber) + metrics.GetLogStatistics().SetTag(metrics.BatchCloseReason, "deadline") f.finalizeBatch(ctx) + log.Infof(metrics.GetLogStatistics().Summary()) + metrics.GetLogStatistics().ResetStatistics() + metrics.GetLogStatistics().UpdateTimestamp(metrics.NewRound, time.Now()) } else if f.isBatchFull() || f.isBatchAlmostFull() { log.Infof("closing batch %d because it's almost full.", f.batch.batchNumber) + metrics.GetLogStatistics().SetTag(metrics.BatchCloseReason, "full") f.finalizeBatch(ctx) + log.Infof(metrics.GetLogStatistics().Summary()) + metrics.GetLogStatistics().ResetStatistics() + metrics.GetLogStatistics().UpdateTimestamp(metrics.NewRound, time.Now()) } if err := ctx.Err(); err != nil { @@ -412,8 +429,10 @@ func (f *finalizer) isBatchFull() bool { // finalizeBatch retries to until successful closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch func (f *finalizer) finalizeBatch(ctx context.Context) { start := time.Now() + metrics.GetLogStatistics().SetTag(metrics.FinalizeBatchNumber, strconv.Itoa(int(f.batch.batchNumber))) defer func() { metrics.ProcessingTime(time.Since(start)) + metrics.GetLogStatistics().CumulativeTiming(metrics.FinalizeBatchTiming, time.Since(start)) }() var err error @@ -495,6 +514,7 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { } // Reprocess full batch as sanity check + tsReprocessFullBatch := time.Now() if f.cfg.SequentialReprocessFullBatch { // Do the full batch reprocess now _, err := f.reprocessFullBatch(ctx, f.batch.batchNumber, f.batch.initialStateRoot, f.batch.stateRoot) @@ -508,14 +528,18 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { _, _ = f.reprocessFullBatch(ctx, f.batch.batchNumber, f.batch.initialStateRoot, f.batch.stateRoot) }() } + metrics.GetLogStatistics().CumulativeTiming(metrics.FinalizeBatchReprocessFullBatch, time.Since(tsReprocessFullBatch)) // Close the current batch + tsCloseBatch := time.Now() err = f.closeBatch(ctx) if err != nil { return nil, fmt.Errorf("failed to close batch, err: %w", err) } + metrics.GetLogStatistics().CumulativeTiming(metrics.FinalizeBatchCloseBatch, time.Since(tsCloseBatch)) // Metadata for the next batch + tsOpenBatch := time.Now() stateRoot := f.batch.stateRoot lastBatchNumber := f.batch.batchNumber @@ -544,6 +568,7 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { f.processRequest.GlobalExitRoot = batch.globalExitRoot f.processRequest.Transactions = make([]byte, 0, 1) } + metrics.GetLogStatistics().CumulativeTiming(metrics.FinalizeBatchOpenBatch, time.Since(tsOpenBatch)) return batch, err } @@ -558,6 +583,9 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW start := time.Now() defer func() { metrics.ProcessingTime(time.Since(start)) + if tx != nil { + metrics.GetLogStatistics().CumulativeTiming(metrics.ProcessingTxTiming, time.Since(start)) + } }() if f.batch.isEmpty() { @@ -624,6 +652,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW } log.Infof("processTransaction: single tx. Batch.BatchNumber: %d, BatchNumber: %d, OldStateRoot: %s, txHash: %s, GER: %s", f.batch.batchNumber, f.processRequest.BatchNumber, f.processRequest.OldStateRoot, hashStr, f.processRequest.GlobalExitRoot.String()) + tsCommit := time.Now() processBatchResponse, err := f.executor.ProcessBatch(ctx, f.processRequest, true) if err != nil && errors.Is(err, runtime.ErrExecutorDBError) { log.Errorf("failed to process transaction: %s", err) @@ -643,10 +672,15 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW log.Errorf("failed to update status to invalid in the pool for tx: %s, err: %s", tx.Hash.String(), err) } else { metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1) + metrics.GetLogStatistics().CumulativeCounting(metrics.ProcessingInvalidTxCounter) } return nil, err } + if tx != nil { + metrics.GetLogStatistics().CumulativeTiming(metrics.ProcessingTxCommit, time.Since(tsCommit)) + } + tsProcessResponse := time.Now() oldStateRoot := f.batch.stateRoot if len(processBatchResponse.Responses) > 0 && tx != nil { errWg, err = f.handleProcessTransactionResponse(ctx, tx, processBatchResponse, oldStateRoot) @@ -660,6 +694,10 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW f.batch.localExitRoot = processBatchResponse.NewLocalExitRoot log.Infof("processTransaction: data loaded in memory. batch.batchNumber: %d, batchNumber: %d, result.NewStateRoot: %s, result.NewLocalExitRoot: %s, oldStateRoot: %s", f.batch.batchNumber, f.processRequest.BatchNumber, processBatchResponse.NewStateRoot.String(), processBatchResponse.NewLocalExitRoot.String(), oldStateRoot.String()) + if tx != nil { + metrics.GetLogStatistics().CumulativeTiming(metrics.ProcessingTxResponse, time.Since(tsProcessResponse)) + } + return nil, nil } diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go index 8625828aae..0583c1637f 100644 --- a/sequencer/interfaces.go +++ b/sequencer/interfaces.go @@ -90,7 +90,7 @@ type workerInterface interface { AddPendingTxToStore(txHash common.Hash, addr common.Address) DeletePendingTxToStore(txHash common.Hash, addr common.Address) HandleL2Reorg(txHashes []common.Hash) - NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) + NewTxTracker(tx pool.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) AddForcedTx(txHash common.Hash, addr common.Address) DeleteForcedTx(txHash common.Hash, addr common.Address) } diff --git a/sequencer/metrics/logstatistics.go b/sequencer/metrics/logstatistics.go new file mode 100644 index 0000000000..141649d523 --- /dev/null +++ b/sequencer/metrics/logstatistics.go @@ -0,0 +1,40 @@ +package metrics + +import ( + "time" +) + +type LogTag string + +type LogStatistics interface { + CumulativeCounting(tag LogTag) + CumulativeValue(tag LogTag, value int64) + CumulativeTiming(tag LogTag, duration time.Duration) + SetTag(tag LogTag, value string) + Summary() string + ResetStatistics() + + UpdateTimestamp(tag LogTag, tm time.Time) +} + +const ( + TxCounter LogTag = "TxCounter" + GetTx LogTag = "GetTx" + GetTxPauseCounter LogTag = "GetTxPauseCounter" + BatchCloseReason LogTag = "BatchCloseReason" + ReprocessingTxCounter LogTag = "ReProcessingTxCounter" + FailTxCounter LogTag = "FailTxCounter" + NewRound LogTag = "NewRound" + BatchGas LogTag = "BatchGas" + + ProcessingTxTiming LogTag = "ProcessingTxTiming" + ProcessingInvalidTxCounter LogTag = "ProcessingInvalidTxCounter" + ProcessingTxCommit LogTag = "ProcessingTxCommit" + ProcessingTxResponse LogTag = "ProcessingTxResponse" + + FinalizeBatchTiming LogTag = "FinalizeBatchTiming" + FinalizeBatchNumber LogTag = "FinalizeBatchNumber" + FinalizeBatchReprocessFullBatch LogTag = "FinalizeBatchReprocessFullBatch" + FinalizeBatchCloseBatch LogTag = "FinalizeBatchCloseBatch" + FinalizeBatchOpenBatch LogTag = "FinalizeBatchOpenBatch" +) diff --git a/sequencer/metrics/logstatisticsimpl.go b/sequencer/metrics/logstatisticsimpl.go new file mode 100644 index 0000000000..d74dd87596 --- /dev/null +++ b/sequencer/metrics/logstatisticsimpl.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "strconv" + "sync" + "time" +) + +var instance *logStatisticsInstance +var once sync.Once + +func GetLogStatistics() LogStatistics { + once.Do(func() { + instance = &logStatisticsInstance{} + instance.init() + }) + return instance +} + +type logStatisticsInstance struct { + timestamp map[LogTag]time.Time + statistics map[LogTag]int64 // value maybe the counter or time.Duration(ms) + tags map[LogTag]string +} + +func (l *logStatisticsInstance) init() { + l.timestamp = make(map[LogTag]time.Time) + l.statistics = make(map[LogTag]int64) + l.tags = make(map[LogTag]string) +} + +func (l *logStatisticsInstance) CumulativeCounting(tag LogTag) { + l.statistics[tag]++ +} + +func (l *logStatisticsInstance) CumulativeValue(tag LogTag, value int64) { + l.statistics[tag] += value +} + +func (l *logStatisticsInstance) CumulativeTiming(tag LogTag, duration time.Duration) { + l.statistics[tag] += duration.Milliseconds() +} + +func (l *logStatisticsInstance) SetTag(tag LogTag, value string) { + l.tags[tag] = value +} + +func (l *logStatisticsInstance) UpdateTimestamp(tag LogTag, tm time.Time) { + l.timestamp[tag] = tm +} + +func (l *logStatisticsInstance) ResetStatistics() { + l.statistics = make(map[LogTag]int64) + l.tags = make(map[LogTag]string) +} + +func (l *logStatisticsInstance) Summary() string { + batchTotalDuration := "-" + if key, ok := l.timestamp[NewRound]; ok { + batchTotalDuration = strconv.Itoa(int(time.Since(key).Milliseconds())) + } + processTxTiming := "ProcessTx<" + strconv.Itoa(int(l.statistics[ProcessingTxTiming])) + "ms, " + + "Commit<" + strconv.Itoa(int(l.statistics[ProcessingTxCommit])) + "ms>, " + + "ProcessResponse<" + strconv.Itoa(int(l.statistics[ProcessingTxResponse])) + "ms>>, " + + finalizeBatchTiming := "FinalizeBatch<" + strconv.Itoa(int(l.statistics[FinalizeBatchTiming])) + "ms, " + + "ReprocessFullBatch<" + strconv.Itoa(int(l.statistics[FinalizeBatchReprocessFullBatch])) + "ms>, " + + "CloseBatch<" + strconv.Itoa(int(l.statistics[FinalizeBatchCloseBatch])) + "ms>, " + + "OpenBatch<" + strconv.Itoa(int(l.statistics[FinalizeBatchOpenBatch])) + "ms>>, " + + result := "Batch<" + l.tags[FinalizeBatchNumber] + ">, " + + "TotalDuration<" + batchTotalDuration + "ms>, " + + "GasUsed<" + strconv.Itoa(int(l.statistics[BatchGas])) + ">, " + + "Tx<" + strconv.Itoa(int(l.statistics[TxCounter])) + ">, " + + "GetTx<" + strconv.Itoa(int(l.statistics[GetTx])) + "ms>, " + + "GetTxPause<" + strconv.Itoa(int(l.statistics[GetTxPauseCounter])) + ">, " + + "ReprocessTx<" + strconv.Itoa(int(l.statistics[ReprocessingTxCounter])) + ">, " + + "FailTx<" + strconv.Itoa(int(l.statistics[FailTxCounter])) + ">, " + + "InvalidTx<" + strconv.Itoa(int(l.statistics[ProcessingInvalidTxCounter])) + ">, " + + processTxTiming + + finalizeBatchTiming + + "BatchCloseReason<" + l.tags[BatchCloseReason] + ">" + + return result +} diff --git a/sequencer/metrics/logstatisticsimpl_test.go b/sequencer/metrics/logstatisticsimpl_test.go new file mode 100644 index 0000000000..c2ec294811 --- /dev/null +++ b/sequencer/metrics/logstatisticsimpl_test.go @@ -0,0 +1,51 @@ +package metrics + +import ( + "testing" + "time" +) + +func Test_logStatisticsInstance_Summary(t *testing.T) { + type fields struct { + timestamp map[LogTag]time.Time + statistics map[LogTag]int64 + tags map[LogTag]string + } + tests := []struct { + name string + fields fields + want string + }{ + // TODO: Add test cases. + {"1", fields{ + timestamp: map[LogTag]time.Time{NewRound: time.Now().Add(-time.Second)}, + statistics: map[LogTag]int64{ + BatchGas: 111111, + TxCounter: 10, + GetTx: time.Second.Milliseconds(), + GetTxPauseCounter: 2, + ReprocessingTxCounter: 3, + FailTxCounter: 1, + ProcessingInvalidTxCounter: 2, + ProcessingTxTiming: time.Second.Milliseconds() * 30, + ProcessingTxCommit: time.Second.Milliseconds() * 10, + ProcessingTxResponse: time.Second.Milliseconds() * 15, + FinalizeBatchTiming: time.Second.Milliseconds() * 50, + FinalizeBatchReprocessFullBatch: time.Second.Milliseconds() * 20, + FinalizeBatchCloseBatch: time.Second.Milliseconds() * 10, + FinalizeBatchOpenBatch: time.Second.Milliseconds() * 10, + }, + tags: map[LogTag]string{BatchCloseReason: "deadline", FinalizeBatchNumber: "123"}, + }, "test"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := &logStatisticsInstance{ + timestamp: tt.fields.timestamp, + statistics: tt.fields.statistics, + tags: tt.fields.tags, + } + t.Log(l.Summary()) + }) + } +} diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go index 8e515c7c25..9532148429 100644 --- a/sequencer/mock_worker.go +++ b/sequencer/mock_worker.go @@ -4,6 +4,7 @@ package sequencer import ( context "context" + "github.com/0xPolygonHermez/zkevm-node/pool" big "math/big" common "github.com/ethereum/go-ethereum/common" @@ -109,16 +110,16 @@ func (_m *WorkerMock) MoveTxToNotReady(txHash common.Hash, from common.Address, } // NewTxTracker provides a mock function with given fields: tx, counters, ip -func (_m *WorkerMock) NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { +func (_m *WorkerMock) NewTxTracker(tx pool.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { ret := _m.Called(tx, counters, ip) var r0 *TxTracker var r1 error if rf, ok := ret.Get(0).(func(types.Transaction, state.ZKCounters, string) (*TxTracker, error)); ok { - return rf(tx, counters, ip) + return rf(tx.Transaction, counters, ip) } if rf, ok := ret.Get(0).(func(types.Transaction, state.ZKCounters, string) *TxTracker); ok { - r0 = rf(tx, counters, ip) + r0 = rf(tx.Transaction, counters, ip) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*TxTracker) @@ -126,7 +127,7 @@ func (_m *WorkerMock) NewTxTracker(tx types.Transaction, counters state.ZKCounte } if rf, ok := ret.Get(1).(func(types.Transaction, state.ZKCounters, string) error); ok { - r1 = rf(tx, counters, ip) + r1 = rf(tx.Transaction, counters, ip) } else { r1 = ret.Error(1) } diff --git a/sequencer/txtracker.go b/sequencer/txtracker.go index 3a380d2301..025fb1ddf6 100644 --- a/sequencer/txtracker.go +++ b/sequencer/txtracker.go @@ -1,12 +1,12 @@ package sequencer import ( + "github.com/0xPolygonHermez/zkevm-node/pool" "math/big" "time" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" ) // TxTracker is a struct that contains all the tx data needed to be managed by the worker @@ -33,7 +33,8 @@ type TxTracker struct { } // newTxTracker creates and inti a TxTracker -func newTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { +func newTxTracker(ptx pool.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { + tx := ptx.Transaction addr, err := state.GetSender(tx) if err != nil { return nil, err diff --git a/sequencer/worker.go b/sequencer/worker.go index 0e27aace09..a9345b7f80 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -3,6 +3,7 @@ package sequencer import ( "context" "fmt" + "github.com/0xPolygonHermez/zkevm-node/pool" "math/big" "runtime" "sync" @@ -11,7 +12,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" ) // Worker represents the worker component of the sequencer @@ -34,7 +34,7 @@ func NewWorker(state stateInterface) *Worker { } // NewTxTracker creates and inits a TxTracker -func (w *Worker) NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { +func (w *Worker) NewTxTracker(tx pool.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { return newTxTracker(tx, counters, ip) } diff --git a/sonar-project.properties b/sonar-project.properties index 6ddc88abf2..e3c458b17d 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -1 +1 @@ -sonar.projectKey=zkevm-node +sonar.projectKey=xgon-node diff --git a/test/config/grafana/dashboard-dockers.json b/test/config/grafana/dashboard-dockers.json index 555a5c09a4..cd08ad86b3 100644 --- a/test/config/grafana/dashboard-dockers.json +++ b/test/config/grafana/dashboard-dockers.json @@ -573,14 +573,14 @@ "current": { "selected": true, "text": [ - "zkevm-json-rpc", - "zkevm-pool-db", - "zkevm-sequencer" + "xgon-json-rpc", + "xgon-pool-db", + "xgon-sequencer" ], "value": [ - "zkevm-json-rpc", - "zkevm-pool-db", - "zkevm-sequencer" + "xgon-json-rpc", + "xgon-pool-db", + "xgon-sequencer" ] }, "datasource": { diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml index 88926ff512..4b18d633ee 100644 --- a/test/config/test.node.config.toml +++ b/test/config/test.node.config.toml @@ -9,7 +9,7 @@ Outputs = ["stderr"] User = "state_user" Password = "state_password" Name = "state_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -27,13 +27,13 @@ PollMinAllowedGasPriceInterval = "15s" User = "pool_user" Password = "pool_password" Name = "pool_db" - Host = "zkevm-pool-db" + Host = "xgon-pool-db" Port = "5432" EnableLog = false MaxConns = 200 [Etherman] -URL = "http://zkevm-mock-l1-network:8545" +URL = "http://xgon-mock-l1-network:8545" ForkIDChunkSize = 20000 MultiGasProvider = false [Etherscan] @@ -129,10 +129,10 @@ DefaultGasPriceWei = 1000000000 MaxGasPriceWei = 0 [MTClient] -URI = "zkevm-prover:50061" +URI = "xgon-prover:50061" [Executor] -URI = "zkevm-prover:50071" +URI = "xgon-prover:50071" MaxGRPCMessageSize = 100000000 [Metrics] @@ -148,7 +148,7 @@ ProfilingEnabled = true User = "event_user" Password = "event_password" Name = "event_db" - Host = "zkevm-event-db" + Host = "xgon-event-db" Port = "5432" EnableLog = false MaxConns = 200 @@ -157,7 +157,7 @@ ProfilingEnabled = true User = "prover_user" Password = "prover_pass" Name = "prover_db" -Host = "zkevm-state-db" +Host = "xgon-state-db" Port = "5432" EnableLog = false MaxConns = 200 diff --git a/test/dbutils/dbutils.go b/test/dbutils/dbutils.go index 2db5b20145..9c42b6e1eb 100644 --- a/test/dbutils/dbutils.go +++ b/test/dbutils/dbutils.go @@ -11,7 +11,7 @@ import ( // InitOrResetState will initializes the State db running the migrations or // will reset all the known data and rerun the migrations func InitOrResetState(cfg db.Config) error { - return initOrReset(cfg, "zkevm-state-db") + return initOrReset(cfg, "xgon-state-db") } // InitOrResetPool will initializes the Pool db running the migrations or diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 1c52d5e3ff..6131e6c892 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -1,7 +1,7 @@ version: "3.5" networks: default: - name: zkevm + name: Xgon services: grafana: @@ -13,8 +13,8 @@ services: - ./config/grafana/dashboard-dockers.json:/etc/grafana/provisioning/dashboards/dashboard-dockers.json:ro - ./config/grafana/dashboard-node.json:/etc/grafana/provisioning/dashboards/dashboard-node.json:ro environment: - - GF_SECURITY_ADMIN_USER=zkevm - - GF_SECURITY_ADMIN_PASSWORD=zkevm + - GF_SECURITY_ADMIN_USER=Xgon + - GF_SECURITY_ADMIN_PASSWORD=Xgon ports: - 3000:3000 depends_on: @@ -34,7 +34,7 @@ services: - POSTGRES_DB=grafana depends_on: - grafana-db - - zkevm-json-rpc + - xgon-json-rpc grafana-db: container_name: grafana-db @@ -46,26 +46,26 @@ services: - POSTGRES_PASSWORD=password - POSTGRES_DB=grafana - zkevm-sequencer: - container_name: zkevm-sequencer - image: zkevm-node + xgon-sequencer: + container_name: xgon-sequencer + image: xgon-node ports: - 9092:9091 # needed if metrics enabled - 6060:6060 environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db - - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db + - ZKEVM_NODE_POOL_DB_HOST=xgon-pool-db volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components sequencer" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components sequencer" - zkevm-sequence-sender: - container_name: zkevm-sequence-sender - image: zkevm-node + xgon-sequence-sender: + container_name: xgon-sequence-sender + image: xgon-node environment: - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db @@ -77,34 +77,34 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components sequence-sender" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components sequence-sender" - zkevm-json-rpc: - container_name: zkevm-json-rpc - image: zkevm-node + xgon-json-rpc: + container_name: xgon-json-rpc + image: xgon-node ports: - 8123:8123 - 8133:8133 # needed if WebSockets enabled - 9091:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db - - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db + - ZKEVM_NODE_POOL_DB_HOST=xgon-pool-db volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components rpc" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components rpc" - zkevm-aggregator: - container_name: zkevm-aggregator - image: zkevm-node + xgon-aggregator: + container_name: xgon-aggregator + image: xgon-node ports: - 50081:50081 - 9093:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db - ZKEVM_NODE_AGGREGATOR_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 volumes: - ./config/test.node.config.toml:/app/config.toml @@ -112,30 +112,30 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components aggregator" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components aggregator" - zkevm-sync: - container_name: zkevm-sync - image: zkevm-node + xgon-sync: + container_name: xgon-sync + image: xgon-node ports: - 9095:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components synchronizer" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components synchronizer" - zkevm-eth-tx-manager: - container_name: zkevm-eth-tx-manager - image: zkevm-node + xgon-eth-tx-manager: + container_name: xgon-eth-tx-manager + image: xgon-node ports: - 9094:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db volumes: - ./sequencer.keystore:/pk/sequencer.keystore - ./aggregator.keystore:/pk/aggregator.keystore @@ -144,13 +144,13 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components eth-tx-manager" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components eth-tx-manager" - zkevm-l2gaspricer: - container_name: zkevm-l2gaspricer - image: zkevm-node + xgon-l2gaspricer: + container_name: xgon-l2gaspricer + image: xgon-node environment: - - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db + - ZKEVM_NODE_POOL_DB_HOST=xgon-pool-db volumes: - ./test.keystore:/pk/keystore - ./config/test.node.config.toml:/app/config.toml @@ -158,10 +158,10 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components l2gaspricer" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components l2gaspricer" - zkevm-state-db: - container_name: zkevm-state-db + xgon-state-db: + container_name: xgon-state-db image: postgres:15 deploy: resources: @@ -182,8 +182,8 @@ services: - "-N" - "500" - zkevm-pool-db: - container_name: zkevm-pool-db + xgon-pool-db: + container_name: xgon-pool-db image: postgres:15 deploy: resources: @@ -202,8 +202,8 @@ services: - "-N" - "500" - zkevm-event-db: - container_name: zkevm-event-db + xgon-event-db: + container_name: xgon-event-db image: postgres:15 deploy: resources: @@ -224,9 +224,9 @@ services: - "-N" - "500" - zkevm-explorer-l1: - container_name: zkevm-explorer-l1 - image: hermeznetwork/zkevm-explorer:latest + xgon-explorer-l1: + container_name: xgon-explorer-l1 + image: hermeznetwork/xgon-explorer:latest ports: - 4000:4000 environment: @@ -234,8 +234,8 @@ services: - SUBNETWORK=Local Ethereum - COIN=ETH - ETHEREUM_JSONRPC_VARIANT=geth - - ETHEREUM_JSONRPC_HTTP_URL=http://zkevm-mock-l1-network:8545 - - DATABASE_URL=postgres://l1_explorer_user:l1_explorer_password@zkevm-explorer-l1-db:5432/l1_explorer_db + - ETHEREUM_JSONRPC_HTTP_URL=http://xgon-mock-l1-network:8545 + - DATABASE_URL=postgres://l1_explorer_user:l1_explorer_password@xgon-explorer-l1-db:5432/l1_explorer_db - ECTO_USE_SSL=false - MIX_ENV=prod command: @@ -243,8 +243,8 @@ services: - "-c" - "mix do ecto.create, ecto.migrate; mix phx.server" - zkevm-explorer-l1-db: - container_name: zkevm-explorer-l1-db + xgon-explorer-l1-db: + container_name: xgon-explorer-l1-db image: postgres:15 ports: - 5436:5432 @@ -257,9 +257,9 @@ services: - "-N" - "500" - zkevm-explorer-l2: - container_name: zkevm-explorer-l2 - image: hermeznetwork/zkevm-explorer:latest + xgon-explorer-l2: + container_name: xgon-explorer-l2 + image: hermeznetwork/xgon-explorer:latest ports: - 4001:4000 environment: @@ -267,8 +267,8 @@ services: - SUBNETWORK=Polygon Hermez - COIN=ETH - ETHEREUM_JSONRPC_VARIANT=geth - - ETHEREUM_JSONRPC_HTTP_URL=http://zkevm-explorer-json-rpc:8124 - - DATABASE_URL=postgres://l2_explorer_user:l2_explorer_password@zkevm-explorer-l2-db:5432/l2_explorer_db + - ETHEREUM_JSONRPC_HTTP_URL=http://xgon-explorer-json-rpc:8124 + - DATABASE_URL=postgres://l2_explorer_user:l2_explorer_password@xgon-explorer-l2-db:5432/l2_explorer_db - ECTO_USE_SSL=false - MIX_ENV=prod - LOGO=/images/blockscout_logo.svg @@ -278,15 +278,15 @@ services: - "-c" - "mix do ecto.create, ecto.migrate; mix phx.server" - zkevm-explorer-json-rpc: - container_name: zkevm-explorer-json-rpc - image: zkevm-node + xgon-explorer-json-rpc: + container_name: xgon-explorer-json-rpc + image: xgon-node ports: - 8124:8124 - 8134:8134 # needed if WebSockets enabled environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db - - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db + - ZKEVM_NODE_POOL_DB_HOST=xgon-pool-db - ZKEVM_NODE_RPC_PORT=8124 - ZKEVM_NODE_RPC_WEBSOCKETS_PORT=8134 volumes: @@ -295,10 +295,10 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components rpc --http.api eth,net,debug,zkevm,txpool,web3" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components rpc --http.api eth,net,debug,Xgon,txpool,web3" - zkevm-explorer-l2-db: - container_name: zkevm-explorer-l2-db + xgon-explorer-l2-db: + container_name: xgon-explorer-l2-db image: postgres:15 ports: - 5437:5432 @@ -308,9 +308,9 @@ services: - POSTGRES_DB=l2_explorer_db command: [ "postgres", "-N", "500" ] - zkevm-mock-l1-network: - container_name: zkevm-mock-l1-network - image: hermeznetwork/geth-zkevm-contracts:v2.0.0-RC1-fork.5-geth1.12.0 + xgon-mock-l1-network: + container_name: xgon-mock-l1-network + image: hermeznetwork/geth-xgon-contracts:v2.0.0-RC1-fork.5-geth1.12.0 ports: - 8545:8545 - 8546:8546 @@ -336,9 +336,9 @@ services: - "full" - "--rpc.allow-unprotected-txs" - zkevm-prover: - container_name: zkevm-prover - image: hermeznetwork/zkevm-prover:v2.2.0 + xgon-prover: + container_name: xgon-prover + image: hermeznetwork/xgon-prover:v2.2.0 ports: # - 50051:50051 # Prover - 50052:50052 # Mock prover @@ -360,11 +360,11 @@ services: command: > /app/zkprover-mock server --statedb-port 43061 --executor-port 43071 --test-vector-path /app/testvectors - zkevm-approve: - container_name: zkevm-approve - image: zkevm-node + xgon-approve: + container_name: xgon-approve + image: xgon-node environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db volumes: - ./sequencer.keystore:/pk/keystore - ./config/test.node.config.toml:/app/config.toml @@ -372,10 +372,10 @@ services: command: - "/bin/sh" - "-c" - - "/app/zkevm-node approve --network custom --custom-network-file /app/genesis.json --key-store-path /pk/keystore --pw testonly --am 115792089237316195423570985008687907853269984665640564039457584007913129639935 -y --cfg /app/config.toml" + - "/app/xgon-node approve --network custom --custom-network-file /app/genesis.json --key-store-path /pk/keystore --pw testonly --am 115792089237316195423570985008687907853269984665640564039457584007913129639935 -y --cfg /app/config.toml" - zkevm-permissionless-db: - container_name: zkevm-permissionless-db + xgon-permissionless-db: + container_name: xgon-permissionless-db image: postgres:15 deploy: resources: @@ -396,9 +396,9 @@ services: - "-N" - "500" - zkevm-permissionless-node: - container_name: zkevm-permissionless-node - image: zkevm-node + xgon-permissionless-node: + container_name: xgon-permissionless-node + image: xgon-node ports: - 8125:8125 environment: @@ -406,26 +406,26 @@ services: - ZKEVM_NODE_STATEDB_USER=test_user - ZKEVM_NODE_STATEDB_PASSWORD=test_password - ZKEVM_NODE_STATEDB_NAME=state_db - - ZKEVM_NODE_STATEDB_HOST=zkevm-permissionless-db + - ZKEVM_NODE_STATEDB_HOST=xgon-permissionless-db - ZKEVM_NODE_POOL_DB_USER=test_user - ZKEVM_NODE_POOL_DB_PASSWORD=test_password - ZKEVM_NODE_POOL_DB_NAME=pool_db - - ZKEVM_NODE_POOL_DB_HOST=zkevm-permissionless-db + - ZKEVM_NODE_POOL_DB_HOST=xgon-permissionless-db - ZKEVM_NODE_RPC_PORT=8125 - - ZKEVM_NODE_RPC_SEQUENCERNODEURI=http://zkevm-json-rpc:8123 - - ZKEVM_NODE_MTCLIENT_URI=zkevm-permissionless-prover:50061 - - ZKEVM_NODE_EXECUTOR_URI=zkevm-permissionless-prover:50071 + - ZKEVM_NODE_RPC_SEQUENCERNODEURI=http://xgon-json-rpc:8123 + - ZKEVM_NODE_MTCLIENT_URI=xgon-permissionless-prover:50061 + - ZKEVM_NODE_EXECUTOR_URI=xgon-permissionless-prover:50071 volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json command: - "/bin/sh" - "-c" - - "/app/zkevm-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components \"rpc,synchronizer\"" + - "/app/xgon-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components \"rpc,synchronizer\"" - zkevm-permissionless-prover: - container_name: zkevm-permissionless-prover - image: hermeznetwork/zkevm-prover:v2.2.0 + xgon-permissionless-prover: + container_name: xgon-permissionless-prover + image: hermeznetwork/xgon-prover:v2.2.0 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover @@ -436,9 +436,9 @@ services: command: > zkProver -c /usr/src/app/config.json - zkevm-metrics: + xgon-metrics: image: prom/prometheus:v2.39.1 - container_name: zkevm-metrics + container_name: xgon-metrics restart: unless-stopped ports: - 9090:9090 @@ -448,14 +448,14 @@ services: volumes: - ../config/metrics/prometheus:/etc/prometheus - zkevm-sh: - container_name: zkevm-sh - image: zkevm-node + xgon-sh: + container_name: xgon-sh + image: xgon-node stdin_open: true tty: true environment: - - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db - - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db + - ZKEVM_NODE_STATEDB_HOST=xgon-state-db + - ZKEVM_NODE_POOL_DB_HOST=xgon-pool-db volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json diff --git a/test/scripts/postgres/run.sh b/test/scripts/postgres/run.sh index f0dcb1b95e..aee4a9915a 100644 --- a/test/scripts/postgres/run.sh +++ b/test/scripts/postgres/run.sh @@ -17,8 +17,8 @@ main(){ echo "${script_contents}" > "${script_file_path}" - docker cp "${script_file_path}" zkevm-state-db:"${script_file_path}" - docker exec zkevm-state-db bash -c "chmod a+x ${script_file_path} && psql ${DBNAME} ${DBUSER} -v ON_ERROR_STOP=ON --single-transaction -f ${script_file_path}" + docker cp "${script_file_path}" xgon-state-db:"${script_file_path}" + docker exec xgon-state-db bash -c "chmod a+x ${script_file_path} && psql ${DBNAME} ${DBUSER} -v ON_ERROR_STOP=ON --single-transaction -f ${script_file_path}" echo "Done" done diff --git a/tools/executor/README.md b/tools/executor/README.md index 990bf2cd9e..7ab0622fb3 100644 --- a/tools/executor/README.md +++ b/tools/executor/README.md @@ -70,7 +70,7 @@ In case some vector doesn't use the default genesis: ```bash make run-db make run-zkprover -docker-compose up -d zkevm-sync +docker-compose up -d xgon-sync ``` 2. Get the entries of the merkletree in JSON format: `PGPASSWORD=prover_pass psql -h 127.0.0.1 -p 5432 -U prover_user -d prover_db -c "select row_to_json(t) from (select encode(hash, 'hex') as hash, encode(data, 'hex') as data from state.merkletree) t" > newGenesis.json`