diff --git a/build/dockerfiles/coordinator-api.Dockerfile.dockerignore b/build/dockerfiles/coordinator-api.Dockerfile.dockerignore index e67a9b0efe..3fcfcdc46d 100644 --- a/build/dockerfiles/coordinator-api.Dockerfile.dockerignore +++ b/build/dockerfiles/coordinator-api.Dockerfile.dockerignore @@ -4,3 +4,5 @@ docs/ l2geth/ rpc-gateway/ *target/* + +permissionless-batches/conf/ \ No newline at end of file diff --git a/build/dockerfiles/coordinator-cron.Dockerfile.dockerignore b/build/dockerfiles/coordinator-cron.Dockerfile.dockerignore index e67a9b0efe..3fcfcdc46d 100644 --- a/build/dockerfiles/coordinator-cron.Dockerfile.dockerignore +++ b/build/dockerfiles/coordinator-cron.Dockerfile.dockerignore @@ -4,3 +4,5 @@ docs/ l2geth/ rpc-gateway/ *target/* + +permissionless-batches/conf/ \ No newline at end of file diff --git a/build/dockerfiles/db_cli.Dockerfile.dockerignore b/build/dockerfiles/db_cli.Dockerfile.dockerignore index e67a9b0efe..3fcfcdc46d 100644 --- a/build/dockerfiles/db_cli.Dockerfile.dockerignore +++ b/build/dockerfiles/db_cli.Dockerfile.dockerignore @@ -4,3 +4,5 @@ docs/ l2geth/ rpc-gateway/ *target/* + +permissionless-batches/conf/ \ No newline at end of file diff --git a/build/dockerfiles/gas_oracle.Dockerfile.dockerignore b/build/dockerfiles/gas_oracle.Dockerfile.dockerignore index 8734d3f9b6..3fcfcdc46d 100644 --- a/build/dockerfiles/gas_oracle.Dockerfile.dockerignore +++ b/build/dockerfiles/gas_oracle.Dockerfile.dockerignore @@ -1,5 +1,8 @@ assets/ +contracts/ docs/ l2geth/ rpc-gateway/ -*target/* \ No newline at end of file +*target/* + +permissionless-batches/conf/ \ No newline at end of file diff --git a/build/dockerfiles/recovery_permissionless_batches.Dockerfile b/build/dockerfiles/recovery_permissionless_batches.Dockerfile new file mode 100644 index 0000000000..317ec1450c --- /dev/null +++ b/build/dockerfiles/recovery_permissionless_batches.Dockerfile @@ -0,0 +1,30 @@ +# Download Go dependencies +FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base + +WORKDIR /src +COPY go.work* ./ +COPY ./rollup/go.* ./rollup/ +COPY ./common/go.* ./common/ +COPY ./coordinator/go.* ./coordinator/ +COPY ./database/go.* ./database/ +COPY ./tests/integration-test/go.* ./tests/integration-test/ +COPY ./bridge-history-api/go.* ./bridge-history-api/ +RUN go mod download -x + +# Build rollup_relayer +FROM base as builder + +RUN --mount=target=. \ + --mount=type=cache,target=/root/.cache/go-build \ + cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer + +# Pull rollup_relayer into a second stage deploy ubuntu container +FROM ubuntu:20.04 + +RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y + +ENV CGO_LDFLAGS="-ldl" + +COPY --from=builder /bin/rollup_relayer /bin/ +WORKDIR /app +ENTRYPOINT ["rollup_relayer"] \ No newline at end of file diff --git a/build/dockerfiles/recovery_permissionless_batches.Dockerfile.dockerignore b/build/dockerfiles/recovery_permissionless_batches.Dockerfile.dockerignore new file mode 100644 index 0000000000..3fcfcdc46d --- /dev/null +++ b/build/dockerfiles/recovery_permissionless_batches.Dockerfile.dockerignore @@ -0,0 +1,8 @@ +assets/ +contracts/ +docs/ +l2geth/ +rpc-gateway/ +*target/* + +permissionless-batches/conf/ \ No newline at end of file diff --git a/build/dockerfiles/rollup_relayer.Dockerfile.dockerignore b/build/dockerfiles/rollup_relayer.Dockerfile.dockerignore index 8734d3f9b6..3fcfcdc46d 100644 --- a/build/dockerfiles/rollup_relayer.Dockerfile.dockerignore +++ b/build/dockerfiles/rollup_relayer.Dockerfile.dockerignore @@ -1,5 +1,8 @@ assets/ +contracts/ docs/ l2geth/ rpc-gateway/ -*target/* \ No newline at end of file +*target/* + +permissionless-batches/conf/ \ No newline at end of file diff --git a/go.work b/go.work index b42509f9b4..bc205704bd 100644 --- a/go.work +++ b/go.work @@ -3,10 +3,11 @@ go 1.22 toolchain go1.22.2 use ( - ./bridge-history-api + //./bridge-history-api ./common ./coordinator ./database ./rollup ./tests/integration-test + //../go-ethereum ) diff --git a/go.work.sum b/go.work.sum index f252c743d6..1173bc3c6a 100644 --- a/go.work.sum +++ b/go.work.sum @@ -718,8 +718,10 @@ github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4= github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA= github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA= github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY= @@ -1167,6 +1169,7 @@ github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20L github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A= github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4= diff --git a/permissionless-batches/.gitignore b/permissionless-batches/.gitignore new file mode 100644 index 0000000000..ac3d45c877 --- /dev/null +++ b/permissionless-batches/.gitignore @@ -0,0 +1 @@ +conf/ \ No newline at end of file diff --git a/permissionless-batches/README.md b/permissionless-batches/README.md new file mode 100644 index 0000000000..42481fd031 --- /dev/null +++ b/permissionless-batches/README.md @@ -0,0 +1,172 @@ +# Permissionless Batches +Permissionless batches aka enforced batches is a feature that provides guarantee to users that they can exit Scroll even if the operator is down or censoring. +It allows anyone to take over and submit a batch (permissionless batch submission) together with a proof after a certain time period has passed without a batch being finalized on L1. + +Once permissionless batch mode is activated, the operator can no longer submit batches in a permissioned way. Only the security council can deactivate permissionless batch mode and reinstate the operator as the only batch submitter. +There are two types of situations to consider: +- `Permissionless batch mode is activated:` This means that finalization halted for some time. Now anyone can submit batches utilizing the [batch production toolkit](#batch-production-toolkit). +- `Permissionless batch mode is deactivated:` This means that the security council has decided to reinstate the operator as the only batch submitter. The operator needs to [recover](#operator-recovery) the sequencer and relayer to resume batch submission and the valid L2 chain. + + +## Batch production toolkit +The batch production toolkit is a set of tools that allow anyone to submit a batch in permissionless mode. It consists of three main components: +1. l2geth state recovery from L1 +2. l2geth block production +3. production, proving and submission of batch with `docker-compose.yml` + +### Prerequisites +- Unix-like OS, 32GB RAM +- Docker +- [l2geth](https://github.com/scroll-tech/go-ethereum/) or [Docker image](https://hub.docker.com/r/scrolltech/l2geth) of corresponding version [TODO link list with versions](#batch-production-toolkit). +- access to an Ethereum L1 RPC node (beacon node and execution client) +- ability to run a prover or access to a proving service (e.g. Sindri) +- L1 account with funds to pay for the batch submission + +### 1. l2geth state recovery from L1 +Once permissionless mode is activated there's no blocks being produced and propagated on L2. The first step is to recover the latest state of the L2 chain from L1. This is done by running l2geth in recovery mode. +More information about l2geth recovery (aka L1 follower mode) can be found [here TODO: put correct link once released](https://github.com/scroll-tech/scroll-documentation/pull/374). + +Running l2geth in recovery mode requires following configuration: +- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode +- `--da.blob.beaconnode` - L1 RPC beacon node +- `--l1.endpoint` - L1 RPC execution client +- `--da.sync=true` - enables syncing with L1 +- `--da.recovery` - enables recovery mode +- `--da.recovery.initiall1block` - initial L1 block (commit tx of initial batch) +- `--da.recovery.initialbatch` - batch where to start recovery from. Can be found on [Scrollscan Explorer](https://scrollscan.com/batches). +- `--da.recovery.l2endblock` - until which L2 block recovery should run (optional) + +```bash +./build/bin/geth --scroll<-sepolia> \ +--datadir "tmp/datadir" \ +--gcmode archive \ +--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \ +--da.blob.beaconnode "" \ +--l1.endpoint "" \ +--da.sync=true --da.recovery --da.recovery.initiall1block "" --da.recovery.initialbatch "" --da.recovery.l2endblock "" \ +--verbosity 3 +``` + +### 2. l2geth block production +After the state is recovered, the next step is to produce blocks on L2. This is done by running l2geth in block production mode. +As a prerequisite, the state recovery must be completed and the latest state of the L2 chain must be available. + +You also need to generate a keystore e.g. with [Clef](https://geth.ethereum.org/docs/fundamentals/account-management) to be able to sign blocks. +This key is not used for any funds, but required for block production to work. Once you generated blocks you can safely discard it. + +Running l2geth in block production mode requires following configuration: +- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode +- `--da.blob.beaconnode` - L1 RPC beacon node +- `--l1.endpoint` - L1 RPC execution client +- `--da.sync=true` - enables syncing with L1 +- `--da.recovery` - enables recovery mode +- `--da.recovery.produceblocks` - enables block production +- `--miner.etherbase '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' --mine` - enables mining. the address is not used, but required for mining to work +- `---miner.gaslimit 1 --miner.gasprice 1 --miner.maxaccountsnum 100 --rpc.gascap 0 --gpo.ignoreprice 1` - gas limits for block production + +```bash +./build/bin/geth --scroll<-sepolia> \ +--datadir "tmp/datadir" \ +--gcmode archive \ +--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \ +--da.blob.beaconnode "" \ +--l1.endpoint "" \ +--da.sync=true --da.recovery --da.recovery.produceblocks \ +--miner.gaslimit 1 --miner.gasprice 1 --miner.maxaccountsnum 100 --rpc.gascap 0 --gpo.ignoreprice 1 \ +--miner.etherbase '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' --mine \ +--ccc \ +--verbosity 3 +``` + +### 3. production, proving and submission of batch with `docker-compose.yml` +After the blocks are produced, the next step is to produce a batch, prove it and submit it to L1. This is done by running the `docker-compose.yml` in the `permissionless-batches` folder. + + +#### Producing a batch +To produce a batch you need to run the `batch-production-submission` profile in `docker-compose.yml`. + +1. Fill `conf/genesis.json` with the latest genesis state from the L2 chain. The genesis for the current fork can be found here: [TODO link list with versions](#batch-production-toolkit) +2. Make sure that `l2geth` with your locally produced blocks is running and reachable from the Docker network (e.g. `http://host.docker.internal:8545`) +3. Fill in required fields in `conf/relayer/config.json` + + +Run with `docker compose --profile batch-production-submission up`. +This will produce chunks, a batch and bundle which will be proven in the next step. +`Success! You're ready to generate proofs!` indicates that everything is working correctly and the batch is ready to be proven. + +#### Proving a batch +To prove the chunk, batch and bundle you just generated you need to run the `proving` profile in `docker-compose.yml`. + +1. Make sure `verifier` `low_version_circuit` and `high_version_circuit` in `conf/coordinator/config.json` are correct for the latest fork: [TODO link list with versions](#batch-production-toolkit) +2. Download the latest `assets` and `params` for the circuit from [TODO link list with versions](#batch-production-toolkit) into `conf/coordinator/assets` and `conf/coordinator/params` respectively. +3. Fill in the required fields in `conf/proving-service/config.json`. It is recommended to use Sindri. You'll need to obtain credits and an API key from their [website](https://sindri.app/). +4. Alternatively, you can run your own prover: https://github.com/scroll-tech/scroll-prover. However, this requires more configuration. + +Run with `docker compose --profile proving up`. + + +#### Batch submission +To submit the batch you need to run the `batch-production-submission` profile in `docker-compose.yml`. + +1. Fill in required fields in `conf/relayer/config.json` for the sender config. + +Run with `docker compose --profile batch-production-submission up`. +This will submit the batch to L1 and finalize it. The transaction will be retried in case of failure. + +**Troubleshooting** +- in case the submission fails it will print the calldata for the transaction in an error message. You can use this with `cast call --trace --rpc-url "$SCROLL_L1_DEPLOYMENT_RPC" "$L1_SCROLL_CHAIN_PROXY_ADDR" ` to see what went wrong. + - `0x4df567b9: ErrorNotInEnforcedBatchMode`: permissionless batch mode is not activated, you can't submit a batch + - `0xa5d305cc: ErrorBatchIsEmpty`: no blob was provided. This is usually returned if you do the `cast call`, permissionless mode is activated but you didn't provide a blob in the transaction. + +## Operator recovery +Operator recovery needs to be run by the rollup operator to resume normal rollup operation after permissionless batch mode is deactivated. It consists of two main components: +1. l2geth recovery +2. Relayer recovery + +These steps are required to resume permissioned batch submission and the valid L2 chain. They will restore the entire history of the batches submitted during permissionless mode. + +### Prerequisites +- l2geth with the latest state of the L2 chain (before permissionless mode was activated) +- signer key for the sequencer according to Clique consensus +- relayer and coordinator are set up, running and up-to-date with the latest state of the L2 chain (before permissionless mode was activated) + +### l2geth recovery +Running l2geth in recovery mode requires following configuration: +- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode +- `--da.blob.beaconnode` - L1 RPC beacon node +- `--l1.endpoint` - L1 RPC execution client +- `--da.sync=true` - enables syncing with L1 +- `--da.recovery` - enables recovery mode +- `--da.recovery.signblocks` - enables signing blocks with the sequencer and configured key +- `--da.recovery.initiall1block` - initial L1 block (commit tx of initial batch) +- `--da.recovery.initialbatch` - batch where to start recovery from. Can be found on [Scrollscan Explorer](https://scrollscan.com/batches). +- `--da.recovery.l2endblock` - until which L2 block recovery should run (optional) + +```bash +./build/bin/geth --scroll<-sepolia> \ +--datadir "tmp/datadir" \ +--gcmode archive \ +--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \ +--da.blob.beaconnode "" \ +--l1.endpoint "" \ +--da.sync=true --da.recovery --da.recovery.signblocks --da.recovery.initiall1block "" --da.recovery.initialbatch "" --da.recovery.l2endblock "" \ +--verbosity 3 +``` + +After the recovery is finished, start the sequencer in normal operation and continue issuing L2 blocks as normal. This will resume the L2 chain, allow the relayer (after running recovery) to create new batches and allow other L2 follower nodes to sync up the valid and signed L2 chain. + +### Relayer recovery +Start the relayer with the following additional top-level configuration: +``` + "recovery_config": { + "enable": true + } +``` + +This will make the relayer recover all the chunks, batches and bundles that were submitted during permissionless mode. These batches are marked automatically as proven and finalized. +Once this process is finished, start the relayer normally without the recovery config to resume normal operation. +``` + "recovery_config": { + "enable": false + } +``` \ No newline at end of file diff --git a/permissionless-batches/conf/coordinator/assets/.gitkeep b/permissionless-batches/conf/coordinator/assets/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/permissionless-batches/conf/coordinator/config.json b/permissionless-batches/conf/coordinator/config.json new file mode 100644 index 0000000000..fd38cc25f4 --- /dev/null +++ b/permissionless-batches/conf/coordinator/config.json @@ -0,0 +1,38 @@ +{ + "prover_manager": { + "provers_per_session": 1, + "session_attempts": 5, + "bundle_collection_time_sec": 3600, + "batch_collection_time_sec": 3600, + "chunk_collection_time_sec": 3600, + "verifier": { + "mock_mode": false, + "low_version_circuit": { + "params_path": "./conf/params", + "assets_path": "./conf/assets", + "fork_name": "darwinV2", + "min_prover_version": "v4.4.55" + }, + "high_version_circuit": { + "params_path": "./conf/params", + "assets_path": "./conf/assets", + "fork_name": "darwinV2", + "min_prover_version": "v4.4.56" + } + } + }, + "db": { + "driver_name": "postgres", + "dsn": "postgres://db/scroll?sslmode=disable&user=postgres", + "maxOpenNum": 200, + "maxIdleNum": 20 + }, + "l2": { + "chain_id": 111 + }, + "auth": { + "secret": "prover secret key", + "challenge_expire_duration_sec": 3600, + "login_expire_duration_sec": 3600 + } +} diff --git a/permissionless-batches/conf/coordinator/params/.gitkeep b/permissionless-batches/conf/coordinator/params/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/permissionless-batches/conf/genesis.json b/permissionless-batches/conf/genesis.json new file mode 100644 index 0000000000..bebd2dde38 --- /dev/null +++ b/permissionless-batches/conf/genesis.json @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/permissionless-batches/conf/proving-service/batch/.gitkeep b/permissionless-batches/conf/proving-service/batch/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/permissionless-batches/conf/proving-service/bundle/.gitkeep b/permissionless-batches/conf/proving-service/bundle/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/permissionless-batches/conf/proving-service/chunk/.gitkeep b/permissionless-batches/conf/proving-service/chunk/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/permissionless-batches/conf/proving-service/config.json b/permissionless-batches/conf/proving-service/config.json new file mode 100644 index 0000000000..de7fc9334c --- /dev/null +++ b/permissionless-batches/conf/proving-service/config.json @@ -0,0 +1,26 @@ +{ + "prover_name_prefix": "prover_", + "keys_dir": "/app/", + "db_path": "/app/", + "coordinator": { + "base_url": "http://coordinator:8390", + "retry_count": 3, + "retry_wait_time_sec": 5, + "connection_timeout_sec": 60 + }, + "l2geth": { + "endpoint": "" + }, + "prover": { + "circuit_type": 2, + "circuit_version": "v0.13.1", + "n_workers": 1, + "cloud": { + "base_url": "https://sindri.app/api/v1/", + "api_key": "", + "retry_count": 3, + "retry_wait_time_sec": 5, + "connection_timeout_sec": 60 + } + } +} \ No newline at end of file diff --git a/permissionless-batches/conf/relayer/config.json b/permissionless-batches/conf/relayer/config.json new file mode 100644 index 0000000000..3b06b6dcc3 --- /dev/null +++ b/permissionless-batches/conf/relayer/config.json @@ -0,0 +1,55 @@ +{ + "l1_config": { + "endpoint": "" + }, + "l2_config": { + "confirmations": "0x0", + "endpoint": "", + "relayer_config": { + "commit_sender_signer_config": { + "signer_type": "PrivateKey", + "private_key_signer_config": { + "private_key": "1414141414141414141414141414141414141414141414141414141414141414" + } + }, + "l1_commit_gas_limit_multiplier": 1.2 + }, + "chunk_proposer_config": { + "propose_interval_milliseconds": 100, + "max_block_num_per_chunk": 100, + "max_tx_num_per_chunk": 100, + "max_l1_commit_gas_per_chunk": 11234567, + "max_l1_commit_calldata_size_per_chunk": 112345, + "chunk_timeout_sec": 300, + "max_row_consumption_per_chunk": 1048319, + "gas_cost_increase_multiplier": 1.2, + "max_uncompressed_batch_bytes_size": 634880 + }, + "batch_proposer_config": { + "propose_interval_milliseconds": 1000, + "max_l1_commit_gas_per_batch": 11234567, + "max_l1_commit_calldata_size_per_batch": 112345, + "batch_timeout_sec": 300, + "gas_cost_increase_multiplier": 1.2, + "max_uncompressed_batch_bytes_size": 634880 + }, + "bundle_proposer_config": { + "max_batch_num_per_bundle": 20, + "bundle_timeout_sec": 36000 + } + }, + "db_config": { + "driver_name": "postgres", + "dsn": "postgres://db/scroll?sslmode=disable&user=postgres", + "maxOpenNum": 200, + "maxIdleNum": 20 + }, + "recovery_config": { + "enable": true, + "l1_block_height": , + "latest_finalized_batch": , + "l2_block_height_limit": , + "force_latest_finalized_batch": false, + "force_l1_message_count": 0 + } +} diff --git a/permissionless-batches/docker-compose.yml b/permissionless-batches/docker-compose.yml new file mode 100644 index 0000000000..dca785c1fd --- /dev/null +++ b/permissionless-batches/docker-compose.yml @@ -0,0 +1,121 @@ +name: permissionless-batches + +services: + relayer-batch-production: + build: + context: ../ + dockerfile: build/dockerfiles/recovery_permissionless_batches.Dockerfile + container_name: permissionless-batches-relayer + volumes: + - ./conf/relayer/config.json:/app/conf/config.json + - ./conf/genesis.json:/app/conf/genesis.json + command: "--config /app/conf/config.json" + profiles: + - batch-production-submission + depends_on: + db: + condition: service_healthy + + db: + image: postgres:17.0 + environment: + POSTGRES_HOST_AUTH_METHOD: trust + POSTGRES_USER: postgres + POSTGRES_DB: scroll + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U postgres" ] + interval: 1s + timeout: 1s + retries: 10 + volumes: + - db_data:/var/lib/postgresql/data + ports: + - "5432:5432" + + coordinator: + build: + context: ../ + dockerfile: build/dockerfiles/coordinator-api.Dockerfile + volumes: + - ./conf/coordinator/config.json:/app/conf/config.json + - ./conf/genesis.json:/app/conf/genesis.json + command: "--config /app/conf/config.json --http.port 8390 --verbosity 5" + profiles: + - proving + depends_on: + db: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8390/coordinator/v1/challenge"] + interval: 1s + timeout: 1s + retries: 10 + start_period: 5m + + coordinator-cron: + build: + context: ../ + dockerfile: build/dockerfiles/coordinator-cron.Dockerfile + volumes: + - ./conf/coordinator/:/app/conf + command: "--config /app/conf/config.json --verbosity 3" + profiles: + - proving + depends_on: + db: + condition: service_healthy + + + proving-service-chunk: + image: scrolltech/sdk-cloud-prover:sindri-v0.0.5 + platform: linux/amd64 + command: "--config /app/config.json" + profiles: + - proving + environment: + PROVER_NAME_PREFIX: "sindri_chunk" + CIRCUIT_TYPE: 1 # 1 for chunk proving + N_WORKERS: 1 + volumes: + - ./conf/proving-service/chunk/:/app/ + - ./conf/proving-service/config.json:/app/config.json + depends_on: + coordinator: + condition: service_healthy + + proving-service-batch: + image: scrolltech/sdk-cloud-prover:sindri-v0.0.5 + platform: linux/amd64 + command: "--config /app/config.json" + profiles: + - proving + environment: + PROVER_NAME_PREFIX: "sindri_batch" + CIRCUIT_TYPE: 2 # 2 for batch proving + N_WORKERS: 1 + volumes: + - ./conf/proving-service/batch/:/app + - ./conf/proving-service/config.json:/app/config.json + depends_on: + coordinator: + condition: service_healthy + + proving-service-bundle: + image: scrolltech/sdk-cloud-prover:sindri-v0.0.5 + platform: linux/amd64 + command: "--config /app/config.json" + profiles: + - proving + environment: + PROVER_NAME_PREFIX: "sindri_bundle" + CIRCUIT_TYPE: 3 # 3 for bundle proving + N_WORKERS: 1 + volumes: + - ./conf/proving-service/bundle/:/app + - ./conf/proving-service/config.json:/app/config.json + depends_on: + coordinator: + condition: service_healthy + +volumes: + db_data: \ No newline at end of file diff --git a/rollup/abi/bridge_abi_test.go b/rollup/abi/bridge_abi_test.go index 5bed6ebb96..2fde2f2552 100644 --- a/rollup/abi/bridge_abi_test.go +++ b/rollup/abi/bridge_abi_test.go @@ -5,9 +5,10 @@ import ( "math/big" "testing" + "github.com/stretchr/testify/assert" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common/hexutil" - "github.com/stretchr/testify/assert" ) func TestPackCommitBatch(t *testing.T) { @@ -173,6 +174,7 @@ func TestPackSetL2BaseFee(t *testing.T) { } func TestPrintABISignatures(t *testing.T) { + fmt.Println("methods") // print all error signatures of ABI abi, err := ScrollChainMetaData.GetAbi() if err != nil { @@ -184,6 +186,7 @@ func TestPrintABISignatures(t *testing.T) { } fmt.Println("------------------------------") + fmt.Println("errors") for _, errors := range abi.Errors { fmt.Println(hexutil.Encode(errors.ID[:4]), errors.Sig, errors.Name) } diff --git a/rollup/cmd/permissionless_batches/app/app.go b/rollup/cmd/permissionless_batches/app/app.go new file mode 100644 index 0000000000..e0188303fa --- /dev/null +++ b/rollup/cmd/permissionless_batches/app/app.go @@ -0,0 +1,142 @@ +package app + +import ( + "context" + "fmt" + "os" + + "github.com/prometheus/client_golang/prometheus" + "github.com/scroll-tech/da-codec/encoding" + "github.com/urfave/cli/v2" + + "github.com/scroll-tech/go-ethereum/ethclient" + "github.com/scroll-tech/go-ethereum/log" + + "scroll-tech/common/database" + "scroll-tech/common/observability" + "scroll-tech/common/utils" + "scroll-tech/common/version" + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/controller/permissionless_batches" + "scroll-tech/rollup/internal/controller/watcher" +) + +var app *cli.App + +func init() { + // Set up rollup-relayer app info. + app = cli.NewApp() + app.Action = action + app.Name = "permissionless-batches" + app.Usage = "The Scroll Rollup Relayer for permissionless batch production" + app.Version = version.Version + app.Flags = append(app.Flags, utils.CommonFlags...) + app.Flags = append(app.Flags, utils.RollupRelayerFlags...) + app.Commands = []*cli.Command{} + app.Before = func(ctx *cli.Context) error { + return utils.LogSetup(ctx) + } +} + +func action(ctx *cli.Context) error { + // Load config file. + cfgFile := ctx.String(utils.ConfigFileFlag.Name) + cfg, err := config.NewConfig(cfgFile) + if err != nil { + log.Crit("failed to load config file", "config file", cfgFile, "error", err) + } + + subCtx, cancel := context.WithCancel(ctx.Context) + defer cancel() + + // Sanity check config. Make sure the required fields are set. + if cfg.RecoveryConfig == nil { + return fmt.Errorf("recovery config must be specified") + } + if cfg.RecoveryConfig.L1BeaconNodeEndpoint == "" { + return fmt.Errorf("L1 beacon node endpoint must be specified") + } + if cfg.RecoveryConfig.L1BlockHeight == 0 { + return fmt.Errorf("L1 block height must be specified") + } + if cfg.RecoveryConfig.LatestFinalizedBatch == 0 { + return fmt.Errorf("latest finalized batch must be specified") + } + + // init db connection + db, err := database.InitDB(cfg.DBConfig) + if err != nil { + log.Crit("failed to init db connection", "err", err) + } + defer func() { + if err = database.CloseDB(db); err != nil { + log.Crit("failed to close db connection", "error", err) + } + }() + + registry := prometheus.DefaultRegisterer + observability.Server(ctx, db) + + genesisPath := ctx.String(utils.Genesis.Name) + genesis, err := utils.ReadGenesis(genesisPath) + if err != nil { + log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err) + } + + minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name)) + chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry) + batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry) + bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry) + + // Init l2geth connection + l2client, err := ethclient.Dial(cfg.L2Config.Endpoint) + if err != nil { + return fmt.Errorf("failed to connect to L2geth at RPC=%s: %w", cfg.L2Config.Endpoint, err) + } + + l2Watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry) + + recovery := permissionless_batches.NewRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2Watcher) + + if recovery.RecoveryNeeded() { + if err = recovery.Run(); err != nil { + return fmt.Errorf("failed to run recovery: %w", err) + } + log.Info("Success! You're ready to generate proofs!") + } else { + log.Info("No recovery needed, submitting batch and proof to L1...") + submitter, err := permissionless_batches.NewSubmitter(subCtx, db, cfg.L2Config.RelayerConfig, genesis.Config) + if err != nil { + return fmt.Errorf("failed to create submitter: %w", err) + } + if err = submitter.Submit(false); err != nil { + return fmt.Errorf("failed to submit batch: %w", err) + } + + log.Info("Transaction submitted to L1, waiting for confirmation...") + + // Catch CTRL-C to ensure a graceful shutdown. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + + select { + case <-subCtx.Done(): + case confirmation := <-submitter.Sender().ConfirmChan(): + if confirmation.IsSuccessful { + log.Info("Transaction confirmed on L1, your permissionless batch is part of the ledger!", "tx hash", confirmation.TxHash) + } + case <-interrupt: + log.Info("CTRL-C received, shutting down...") + } + } + + return nil +} + +// Run rollup relayer cmd instance. +func Run() { + if err := app.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/rollup/cmd/permissionless_batches/main.go b/rollup/cmd/permissionless_batches/main.go new file mode 100644 index 0000000000..95e157492b --- /dev/null +++ b/rollup/cmd/permissionless_batches/main.go @@ -0,0 +1,7 @@ +package main + +import "scroll-tech/rollup/cmd/permissionless_batches/app" + +func main() { + app.Run() +} diff --git a/rollup/cmd/rollup_relayer/app/app.go b/rollup/cmd/rollup_relayer/app/app.go index d6d1e8a763..9c23a6e69c 100644 --- a/rollup/cmd/rollup_relayer/app/app.go +++ b/rollup/cmd/rollup_relayer/app/app.go @@ -11,13 +11,13 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/urfave/cli/v2" "scroll-tech/common/database" "scroll-tech/common/observability" "scroll-tech/common/utils" "scroll-tech/common/version" - "scroll-tech/rollup/internal/config" "scroll-tech/rollup/internal/controller/relayer" "scroll-tech/rollup/internal/controller/watcher" @@ -108,6 +108,29 @@ func action(ctx *cli.Context) error { l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry) + if cfg.RecoveryConfig.Enable { + log.Info("Starting rollup-relayer in recovery mode", "version", version.Version) + + l1Client, err := ethclient.Dial(cfg.L1Config.Endpoint) + if err != nil { + return fmt.Errorf("failed to connect to L1 client: %w", err) + } + reader, err := l1.NewReader(context.Background(), l1.Config{ + ScrollChainAddress: genesis.Config.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: genesis.Config.Scroll.L1Config.L1MessageQueueAddress, + }, l1Client) + if err != nil { + return fmt.Errorf("failed to create L1 reader: %w", err) + } + + fullRecovery := relayer.NewFullRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2watcher, l1Client, reader) + if err = fullRecovery.RestoreFullPreviousState(); err != nil { + log.Crit("failed to restore full previous state", "error", err) + } + + return nil + } + // Watcher loop to fetch missing blocks go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) { number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations) @@ -115,7 +138,8 @@ func action(ctx *cli.Context) error { log.Error("failed to get block number", "err", loopErr) return } - l2watcher.TryFetchRunningMissingBlocks(number) + // errors are logged in the try method as well + _ = l2watcher.TryFetchRunningMissingBlocks(number) }) go utils.Loop(subCtx, time.Duration(cfg.L2Config.ChunkProposerConfig.ProposeIntervalMilliseconds)*time.Millisecond, chunkProposer.TryProposeChunk) diff --git a/rollup/internal/config/config.go b/rollup/internal/config/config.go index 0b9ce5ac36..ae6d294fce 100644 --- a/rollup/internal/config/config.go +++ b/rollup/internal/config/config.go @@ -18,9 +18,10 @@ import ( // Config load configuration items. type Config struct { - L1Config *L1Config `json:"l1_config"` - L2Config *L2Config `json:"l2_config"` - DBConfig *database.Config `json:"db_config"` + L1Config *L1Config `json:"l1_config"` + L2Config *L2Config `json:"l2_config"` + DBConfig *database.Config `json:"db_config"` + RecoveryConfig *RecoveryConfig `json:"recovery_config"` } type ConfigForReplay struct { diff --git a/rollup/internal/config/recovery.go b/rollup/internal/config/recovery.go new file mode 100644 index 0000000000..34b1f82062 --- /dev/null +++ b/rollup/internal/config/recovery.go @@ -0,0 +1,13 @@ +package config + +type RecoveryConfig struct { + Enable bool `json:"enable"` + + L1BeaconNodeEndpoint string `json:"l1_beacon_node_endpoint"` + L1BlockHeight uint64 `json:"l1_block_height"` + LatestFinalizedBatch uint64 `json:"latest_finalized_batch"` // the latest finalized batch number + ForceLatestFinalizedBatch bool `json:"force_latest_finalized_batch"` // whether to force usage of the latest finalized batch - mainly used for testing + + L2BlockHeightLimit uint64 `json:"l2_block_height_limit"` + ForceL1MessageCount uint64 `json:"force_l1_message_count"` +} diff --git a/rollup/internal/controller/permissionless_batches/minimal_recovery.go b/rollup/internal/controller/permissionless_batches/minimal_recovery.go new file mode 100644 index 0000000000..0e323ed238 --- /dev/null +++ b/rollup/internal/controller/permissionless_batches/minimal_recovery.go @@ -0,0 +1,457 @@ +package permissionless_batches + +import ( + "context" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + "gorm.io/gorm" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/ethclient" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/l1" + + "scroll-tech/common/types" + + "scroll-tech/database/migrate" + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/controller/watcher" + "scroll-tech/rollup/internal/orm" +) + +const ( + // defaultFakeRestoredChunkIndex is the default index of the last restored fake chunk. It is used to be able to generate new chunks pretending that we have already processed some chunks. + defaultFakeRestoredChunkIndex uint64 = 1337 + // defaultFakeRestoredBundleIndex is the default index of the last restored fake bundle. It is used to be able to generate new bundles pretending that we have already processed some bundles. + defaultFakeRestoredBundleIndex uint64 = 1 +) + +type MinimalRecovery struct { + ctx context.Context + cfg *config.Config + genesis *core.Genesis + db *gorm.DB + chunkORM *orm.Chunk + batchORM *orm.Batch + bundleORM *orm.Bundle + + chunkProposer *watcher.ChunkProposer + batchProposer *watcher.BatchProposer + bundleProposer *watcher.BundleProposer + l2Watcher *watcher.L2WatcherClient +} + +func NewRecovery(ctx context.Context, cfg *config.Config, genesis *core.Genesis, db *gorm.DB, chunkProposer *watcher.ChunkProposer, batchProposer *watcher.BatchProposer, bundleProposer *watcher.BundleProposer, l2Watcher *watcher.L2WatcherClient) *MinimalRecovery { + return &MinimalRecovery{ + ctx: ctx, + cfg: cfg, + genesis: genesis, + db: db, + chunkORM: orm.NewChunk(db), + batchORM: orm.NewBatch(db), + bundleORM: orm.NewBundle(db), + chunkProposer: chunkProposer, + batchProposer: batchProposer, + bundleProposer: bundleProposer, + l2Watcher: l2Watcher, + } +} + +func (r *MinimalRecovery) RecoveryNeeded() bool { + chunk, err := r.chunkORM.GetLatestChunk(r.ctx) + if err != nil || chunk == nil { + return true + } + if chunk.Index <= defaultFakeRestoredChunkIndex { + return true + } + + batch, err := r.batchORM.GetLatestBatch(r.ctx) + if err != nil { + return true + } + if batch.Index <= r.cfg.RecoveryConfig.LatestFinalizedBatch { + return true + } + + bundle, err := r.bundleORM.GetLatestBundle(r.ctx) + if err != nil { + return true + } + if bundle.Index <= defaultFakeRestoredBundleIndex { + return true + } + + return false +} + +func (r *MinimalRecovery) Run() error { + // Make sure we start from a clean state. + if err := r.resetDB(); err != nil { + return fmt.Errorf("failed to reset DB: %w", err) + } + + // Restore minimal previous state required to be able to create new chunks, batches and bundles. + restoredFinalizedChunk, restoredFinalizedBatch, restoredFinalizedBundle, err := r.restoreMinimalPreviousState() + if err != nil { + return fmt.Errorf("failed to restore minimal previous state: %w", err) + } + + // Fetch and insert the missing blocks from the last block in the latestFinalizedBatch to the latest L2 block. + fromBlock := restoredFinalizedChunk.EndBlockNumber + toBlock, err := r.fetchL2Blocks(fromBlock, r.cfg.RecoveryConfig.L2BlockHeightLimit) + if err != nil { + return fmt.Errorf("failed to fetch L2 blocks: %w", err) + } + + // Create chunks for L2 blocks. + log.Info("Creating chunks for L2 blocks", "from", fromBlock, "to", toBlock) + + var latestChunk *orm.Chunk + var count int + for { + if err = r.chunkProposer.ProposeChunk(); err != nil { + return fmt.Errorf("failed to propose chunk: %w", err) + } + count++ + + latestChunk, err = r.chunkORM.GetLatestChunk(r.ctx) + if err != nil { + return fmt.Errorf("failed to get latest latestFinalizedChunk: %w", err) + } + + log.Info("Chunk created", "index", latestChunk.Index, "hash", latestChunk.Hash, "StartBlockNumber", latestChunk.StartBlockNumber, "EndBlockNumber", latestChunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", latestChunk.TotalL1MessagesPoppedBefore) + + // We have created chunks for all available L2 blocks. + if latestChunk.EndBlockNumber >= toBlock { + break + } + } + + log.Info("Chunks created", "count", count, "latest Chunk", latestChunk.Index, "hash", latestChunk.Hash, "StartBlockNumber", latestChunk.StartBlockNumber, "EndBlockNumber", latestChunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", latestChunk.TotalL1MessagesPoppedBefore, "PrevL1MessageQueueHash", latestChunk.PrevL1MessageQueueHash, "PostL1MessageQueueHash", latestChunk.PostL1MessageQueueHash) + + // Create batch for the created chunks. We only allow 1 batch it needs to be submitted (and finalized) with a proof in a single step. + log.Info("Creating batch for chunks", "from", restoredFinalizedChunk.Index+1, "to", latestChunk.Index) + + r.batchProposer.TryProposeBatch() + latestBatch, err := r.batchORM.GetLatestBatch(r.ctx) + if err != nil { + return fmt.Errorf("failed to get latest latestFinalizedBatch: %w", err) + } + + // Sanity check that the batch was created correctly: + // 1. should be a new batch + // 2. should contain all chunks created + if restoredFinalizedBatch.Index+1 != latestBatch.Index { + return fmt.Errorf("batch was not created correctly, expected %d but got %d", restoredFinalizedBatch.Index+1, latestBatch.Index) + } + + firstChunkInBatch, err := r.chunkORM.GetChunkByIndex(r.ctx, latestBatch.StartChunkIndex) + if err != nil { + return fmt.Errorf("failed to get first chunk in batch: %w", err) + } + lastChunkInBatch, err := r.chunkORM.GetChunkByIndex(r.ctx, latestBatch.EndChunkIndex) + if err != nil { + return fmt.Errorf("failed to get last chunk in batch: %w", err) + } + + // Make sure that the batch contains all previously created chunks and thus all blocks. If not the user will need to + // produce another batch (running the application again) starting from the end block of the last chunk in the batch + 1. + if latestBatch.EndChunkIndex != latestChunk.Index { + log.Warn("Produced batch does not contain all chunks and blocks. You'll need to produce another batch starting from end block+1.", "starting block", firstChunkInBatch.StartBlockNumber, "end block", lastChunkInBatch.EndBlockNumber, "latest block", latestChunk.EndBlockNumber) + } + + log.Info("Batch created", "index", latestBatch.Index, "hash", latestBatch.Hash, "StartChunkIndex", latestBatch.StartChunkIndex, "EndChunkIndex", latestBatch.EndChunkIndex, "starting block", firstChunkInBatch.StartBlockNumber, "ending block", lastChunkInBatch.EndBlockNumber, "PrevL1MessageQueueHash", latestBatch.PrevL1MessageQueueHash, "PostL1MessageQueueHash", latestBatch.PostL1MessageQueueHash) + + if err = r.bundleProposer.UpdateDBBundleInfo([]*orm.Batch{latestBatch}, encoding.CodecVersion(latestBatch.CodecVersion)); err != nil { + return fmt.Errorf("failed to create bundle: %w", err) + } + + latestBundle, err := r.bundleORM.GetLatestBundle(r.ctx) + if err != nil { + return fmt.Errorf("failed to get latest bundle: %w", err) + } + + // Sanity check that the bundle was created correctly: + // 1. should be a new bundle + // 2. should only contain 1 batch, the one we created + if restoredFinalizedBundle.Index == latestBundle.Index { + return fmt.Errorf("bundle was not created correctly") + } + if latestBundle.StartBatchIndex != latestBatch.Index || latestBundle.EndBatchIndex != latestBatch.Index { + return fmt.Errorf("bundle does not contain the correct batch: %d != %d", latestBundle.StartBatchIndex, latestBatch.Index) + } + + log.Info("Bundle created", "index", latestBundle.Index, "hash", latestBundle.Hash, "StartBatchIndex", latestBundle.StartBatchIndex, "EndBatchIndex", latestBundle.EndBatchIndex, "starting block", firstChunkInBatch.StartBlockNumber, "ending block", lastChunkInBatch.EndBlockNumber) + + return nil +} + +// restoreMinimalPreviousState restores the minimal previous state required to be able to create new chunks, batches and bundles. +func (r *MinimalRecovery) restoreMinimalPreviousState() (*orm.Chunk, *orm.Batch, *orm.Bundle, error) { + log.Info("Restoring previous state with", "L1 block height", r.cfg.RecoveryConfig.L1BlockHeight, "latest finalized batch", r.cfg.RecoveryConfig.LatestFinalizedBatch) + + l1Client, err := ethclient.Dial(r.cfg.L1Config.Endpoint) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to connect to L1 client: %w", err) + } + reader, err := l1.NewReader(r.ctx, l1.Config{ + ScrollChainAddress: r.genesis.Config.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: r.genesis.Config.Scroll.L1Config.L1MessageQueueV2Address, + }, l1Client) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to create L1 reader: %w", err) + } + + // 1. Sanity check user input: Make sure that the user's L1 block height is not higher than the latest finalized block number. + latestFinalizedL1Block, err := reader.GetLatestFinalizedBlockNumber() + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get latest finalized L1 block number: %w", err) + } + if r.cfg.RecoveryConfig.L1BlockHeight > latestFinalizedL1Block { + return nil, nil, nil, fmt.Errorf("specified L1 block height is higher than the latest finalized block number: %d > %d", r.cfg.RecoveryConfig.L1BlockHeight, latestFinalizedL1Block) + } + + log.Info("Latest finalized L1 block number", "latest finalized L1 block", latestFinalizedL1Block) + + // 2. Make sure that the specified batch is indeed finalized on the L1 rollup contract and is the latest finalized batch. + var latestFinalizedBatchIndex uint64 + if r.cfg.RecoveryConfig.ForceLatestFinalizedBatch { + latestFinalizedBatchIndex = r.cfg.RecoveryConfig.LatestFinalizedBatch + } else { + latestFinalizedBatchIndex, err = reader.LatestFinalizedBatchIndex(latestFinalizedL1Block) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get latest finalized batch: %w", err) + } + if r.cfg.RecoveryConfig.LatestFinalizedBatch != latestFinalizedBatchIndex { + return nil, nil, nil, fmt.Errorf("batch %d is not the latest finalized batch: %d", r.cfg.RecoveryConfig.LatestFinalizedBatch, latestFinalizedBatchIndex) + } + } + + // Find the commit event for the latest finalized batch. + var batchCommitEvent *l1.CommitBatchEvent + err = reader.FetchRollupEventsInRangeWithCallback(r.cfg.RecoveryConfig.L1BlockHeight, latestFinalizedL1Block, func(event l1.RollupEvent) bool { + if event.Type() == l1.CommitEventType && event.BatchIndex().Uint64() == latestFinalizedBatchIndex { + batchCommitEvent = event.(*l1.CommitBatchEvent) + // We found the commit event for the batch, stop searching. + return false + } + + // Continue until we find the commit event for the batch. + return true + }) + if batchCommitEvent == nil { + return nil, nil, nil, fmt.Errorf("commit event not found for batch %d", latestFinalizedBatchIndex) + } + + log.Info("Found commit event for batch", "batch", batchCommitEvent.BatchIndex(), "hash", batchCommitEvent.BatchHash(), "L1 block height", batchCommitEvent.BlockNumber(), "L1 tx hash", batchCommitEvent.TxHash()) + + // 3. Fetch commit tx data for latest finalized batch and decode it. + daBatch, daBlobPayload, err := r.decodeLatestFinalizedBatch(reader, batchCommitEvent) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to decode latest finalized batch: %w", err) + } + fmt.Println(daBatch, daBlobPayload) + + blocksInBatch := daBlobPayload.Blocks() + + if len(blocksInBatch) == 0 { + return nil, nil, nil, fmt.Errorf("no blocks in batch %d", batchCommitEvent.BatchIndex()) + } + lastBlockInBatch := blocksInBatch[len(blocksInBatch)-1] + + log.Info("Last L2 block in batch", "batch", batchCommitEvent.BatchIndex(), "L2 block", lastBlockInBatch, "PostL1MessageQueueHash", daBlobPayload.PostL1MessageQueueHash()) + + // 4. Get the L1 messages count after the latest finalized batch. + var l1MessagesCount uint64 + if r.cfg.RecoveryConfig.ForceL1MessageCount == 0 { + l1MessagesCount, err = reader.NextUnfinalizedL1MessageQueueIndex(latestFinalizedL1Block) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get L1 messages count: %w", err) + } + } else { + l1MessagesCount = r.cfg.RecoveryConfig.ForceL1MessageCount + } + + log.Info("L1 messages count after latest finalized batch", "batch", batchCommitEvent.BatchIndex(), "count", l1MessagesCount) + + // 5. Insert minimal state to DB. + chunk, err := r.chunkORM.InsertPermissionlessChunk(r.ctx, defaultFakeRestoredChunkIndex, daBatch.Version(), daBlobPayload, l1MessagesCount) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to insert chunk raw: %w", err) + } + + log.Info("Inserted last finalized chunk to DB", "chunk", chunk.Index, "hash", chunk.Hash, "StartBlockNumber", chunk.StartBlockNumber, "EndBlockNumber", chunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", chunk.TotalL1MessagesPoppedBefore) + + batch, err := r.batchORM.InsertPermissionlessBatch(r.ctx, batchCommitEvent.BatchIndex(), batchCommitEvent.BatchHash(), daBatch.Version(), chunk) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to insert batch raw: %w", err) + } + + log.Info("Inserted last finalized batch to DB", "batch", batch.Index, "hash", batch.Hash) + + var bundle *orm.Bundle + err = r.db.Transaction(func(dbTX *gorm.DB) error { + bundle, err = r.bundleORM.InsertBundle(r.ctx, []*orm.Batch{batch}, encoding.CodecVersion(batch.CodecVersion), dbTX) + if err != nil { + return fmt.Errorf("failed to insert bundle: %w", err) + } + if err = r.bundleORM.UpdateProvingStatus(r.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); err != nil { + return fmt.Errorf("failed to update proving status: %w", err) + } + if err = r.bundleORM.UpdateRollupStatus(r.ctx, bundle.Hash, types.RollupFinalized); err != nil { + return fmt.Errorf("failed to update rollup status: %w", err) + } + + log.Info("Inserted last finalized bundle to DB", "bundle", bundle.Index, "hash", bundle.Hash, "StartBatchIndex", bundle.StartBatchIndex, "EndBatchIndex", bundle.EndBatchIndex) + + return nil + }) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to insert bundle: %w", err) + } + return chunk, batch, bundle, nil +} + +func (r *MinimalRecovery) decodeLatestFinalizedBatch(reader *l1.Reader, event *l1.CommitBatchEvent) (encoding.DABatch, encoding.DABlobPayload, error) { + blockHeader, err := reader.FetchBlockHeaderByNumber(event.BlockNumber()) + if err != nil { + return nil, nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + + args, err := reader.FetchCommitTxData(event) + if err != nil { + return nil, nil, fmt.Errorf("failed to fetch commit tx data: %w", err) + } + + codecVersion := encoding.CodecVersion(args.Version) + if codecVersion < encoding.CodecV7 { + return nil, nil, fmt.Errorf("codec version %d is not supported", codecVersion) + } + + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, nil, fmt.Errorf("failed to get codec: %w", err) + } + + // Since we only store the last batch hash committed in a single tx in the contracts we can also only ever + // finalize a last batch of a tx. This means we can assume here that the batch given in the event is the last batch + // that was committed in the tx. + + if event.BatchIndex().Uint64()+1 < uint64(len(args.BlobHashes)) { + return nil, nil, fmt.Errorf("batch index %d+1 is lower than the number of blobs %d", event.BatchIndex().Uint64(), len(args.BlobHashes)) + } + firstBatchIndex := event.BatchIndex().Uint64() + 1 - uint64(len(args.BlobHashes)) + + var targetBatch encoding.DABatch + var targetBlobVersionedHash common.Hash + parentBatchHash := args.ParentBatchHash + for i, blobVersionedHash := range args.BlobHashes { + batchIndex := firstBatchIndex + uint64(i) + + calculatedBatch, err := codec.NewDABatchFromParams(batchIndex, blobVersionedHash, parentBatchHash) + if err != nil { + return nil, nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex().Uint64(), err) + } + parentBatchHash = calculatedBatch.Hash() + + if batchIndex == event.BatchIndex().Uint64() { + if calculatedBatch.Hash() != event.BatchHash() { + return nil, nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex(), event.BatchHash().String(), calculatedBatch.Hash().String()) + } + // We found the batch we are looking for, break out of the loop. + targetBatch = calculatedBatch + targetBlobVersionedHash = blobVersionedHash + break + } + } + + if targetBatch == nil { + return nil, nil, fmt.Errorf("target batch with index %d could not be found and decoded", event.BatchIndex()) + } + + // sanity check that this is indeed the last batch in the tx + if targetBatch.Hash() != args.LastBatchHash { + return nil, nil, fmt.Errorf("last batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex(), args.LastBatchHash.String(), targetBatch.Hash().String()) + } + + // TODO: add support for multiple blob clients + blobClient := blob_client.NewBlobClients() + if r.cfg.RecoveryConfig.L1BeaconNodeEndpoint != "" { + client, err := blob_client.NewBeaconNodeClient(r.cfg.RecoveryConfig.L1BeaconNodeEndpoint) + if err != nil { + return nil, nil, fmt.Errorf("failed to create beacon node client: %w", err) + } + blobClient.AddBlobClient(client) + } + + blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(r.ctx, targetBlobVersionedHash, blockHeader.Time) + if err != nil { + return nil, nil, fmt.Errorf("failed to get blob by versioned hash and block time for batch %d: %w", event.BatchIndex(), err) + } + + daBlobPayload, err := codec.DecodeBlob(blob) + if err != nil { + return nil, nil, fmt.Errorf("failed to decode blob for batch %d: %w", event.BatchIndex(), err) + } + + return targetBatch, daBlobPayload, nil +} + +func (r *MinimalRecovery) fetchL2Blocks(fromBlock uint64, l2BlockHeightLimit uint64) (uint64, error) { + if l2BlockHeightLimit > 0 && fromBlock > l2BlockHeightLimit { + return 0, fmt.Errorf("fromBlock (latest finalized L2 block) is higher than specified L2BlockHeightLimit: %d > %d", fromBlock, l2BlockHeightLimit) + } + + log.Info("Fetching L2 blocks with", "fromBlock", fromBlock, "l2BlockHeightLimit", l2BlockHeightLimit) + + // Fetch and insert the missing blocks from the last block in the batch to the latest L2 block. + latestL2Block, err := r.l2Watcher.Client.BlockNumber(r.ctx) + if err != nil { + return 0, fmt.Errorf("failed to get latest L2 block number: %w", err) + } + + log.Info("Latest L2 block number", "latest L2 block", latestL2Block) + + if l2BlockHeightLimit > latestL2Block { + return 0, fmt.Errorf("l2BlockHeightLimit is higher than the latest L2 block number, not all blocks are available in L2geth: %d > %d", l2BlockHeightLimit, latestL2Block) + } + + toBlock := latestL2Block + if l2BlockHeightLimit > 0 { + toBlock = l2BlockHeightLimit + } + + err = r.l2Watcher.GetAndStoreBlocks(r.ctx, fromBlock, toBlock) + if err != nil { + return 0, fmt.Errorf("failed to get and store blocks: %w", err) + } + + log.Info("Fetched L2 blocks from", "fromBlock", fromBlock, "toBlock", toBlock) + + return toBlock, nil +} + +func (r *MinimalRecovery) resetDB() error { + sqlDB, err := r.db.DB() + if err != nil { + return fmt.Errorf("failed to get db connection: %w", err) + } + + // reset and init DB + var v int64 + err = migrate.Rollback(sqlDB, &v) + if err != nil { + return fmt.Errorf("failed to rollback db: %w", err) + } + + err = migrate.Migrate(sqlDB) + if err != nil { + return fmt.Errorf("failed to migrate db: %w", err) + } + + return nil +} diff --git a/rollup/internal/controller/permissionless_batches/submitter.go b/rollup/internal/controller/permissionless_batches/submitter.go new file mode 100644 index 0000000000..9e4d201edb --- /dev/null +++ b/rollup/internal/controller/permissionless_batches/submitter.go @@ -0,0 +1,268 @@ +package permissionless_batches + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/prometheus/client_golang/prometheus" + "github.com/scroll-tech/da-codec/encoding" + "gorm.io/gorm" + + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rpc" + + "scroll-tech/common/types" + "scroll-tech/common/types/message" + bridgeAbi "scroll-tech/rollup/abi" + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/controller/sender" + "scroll-tech/rollup/internal/orm" +) + +type Submitter struct { + ctx context.Context + + db *gorm.DB + l2BlockOrm *orm.L2Block + chunkOrm *orm.Chunk + batchOrm *orm.Batch + bundleOrm *orm.Bundle + + cfg *config.RelayerConfig + + finalizeSender *sender.Sender + l1RollupABI *abi.ABI + + chainCfg *params.ChainConfig +} + +func NewSubmitter(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig) (*Submitter, error) { + registry := prometheus.DefaultRegisterer + finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderSignerConfig, "permissionless_batches_submitter", "finalize_sender", types.SenderTypeFinalizeBatch, db, registry) + if err != nil { + return nil, fmt.Errorf("new finalize sender failed, err: %w", err) + } + + return &Submitter{ + ctx: ctx, + db: db, + l2BlockOrm: orm.NewL2Block(db), + chunkOrm: orm.NewChunk(db), + batchOrm: orm.NewBatch(db), + bundleOrm: orm.NewBundle(db), + cfg: cfg, + finalizeSender: finalizeSender, + l1RollupABI: bridgeAbi.ScrollChainABI, + chainCfg: chainCfg, + }, nil + +} + +func (s *Submitter) Sender() *sender.Sender { + return s.finalizeSender +} + +func (s *Submitter) Submit(withProof bool) error { + // Check if the bundle is already finalized + bundle, err := s.bundleOrm.GetLatestBundle(s.ctx) + if err != nil { + return fmt.Errorf("error loading latest bundle: %w", err) + } + + if bundle.Index != defaultFakeRestoredBundleIndex+1 { + return fmt.Errorf("unexpected bundle index %d with hash %s, expected %d", bundle.Index, bundle.Hash, defaultFakeRestoredBundleIndex+1) + } + + if types.RollupStatus(bundle.RollupStatus) == types.RollupFinalized { + return fmt.Errorf("bundle %d %s is already finalized. nothing to do", bundle.Index, bundle.Hash) + } + + if bundle.StartBatchIndex != bundle.EndBatchIndex { + return fmt.Errorf("bundle %d %s has unexpected batch indices (should only contain a single batch): start %d, end %d", bundle.Index, bundle.Hash, bundle.StartBatchIndex, bundle.EndBatchIndex) + } + if bundle.StartBatchHash != bundle.EndBatchHash { + return fmt.Errorf("bundle %d %s has unexpected batch hashes (should only contain a single batch): start %s, end %s", bundle.Index, bundle.Hash, bundle.StartBatchHash, bundle.EndBatchHash) + } + + batch, err := s.batchOrm.GetBatchByIndex(s.ctx, bundle.StartBatchIndex) + if err != nil { + return fmt.Errorf("failed to load batch %d: %w", bundle.StartBatchIndex, err) + } + if batch == nil { + return fmt.Errorf("batch %d not found", bundle.StartBatchIndex) + } + if batch.Hash != bundle.StartBatchHash { + return fmt.Errorf("bundle %d %s has unexpected batch hash: %s", bundle.Index, bundle.Hash, batch.Hash) + } + + log.Info("submitting batch", "index", batch.Index, "hash", batch.Hash) + + endChunk, err := s.chunkOrm.GetChunkByIndex(s.ctx, batch.EndChunkIndex) + if err != nil || endChunk == nil { + return fmt.Errorf("failed to get end chunk with index %d of batch: %w", batch.EndChunkIndex, err) + } + + var aggProof message.BundleProof + if withProof { + firstChunk, err := s.chunkOrm.GetChunkByIndex(s.ctx, batch.StartChunkIndex) + if err != nil || firstChunk == nil { + return fmt.Errorf("failed to get first chunk %d of batch: %w", batch.StartChunkIndex, err) + } + + hardForkName := encoding.GetHardforkName(s.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime) + + aggProof, err = s.bundleOrm.GetVerifiedProofByHash(s.ctx, bundle.Hash, hardForkName) + if err != nil { + return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err) + } + + if err = aggProof.SanityCheck(); err != nil { + return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", bundle.Index, err) + } + } + + var calldata []byte + var blob *kzg4844.Blob + switch encoding.CodecVersion(bundle.CodecVersion) { + case encoding.CodecV7: + calldata, blob, err = s.constructCommitAndFinalizeCalldataAndBlob(batch, endChunk, aggProof) + if err != nil { + return fmt.Errorf("failed to construct CommitAndFinalize calldata and blob, bundle index: %v, batch index: %v, err: %w", bundle.Index, batch.Index, err) + } + default: + return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion) + } + // + fmt.Println(len(blob)) + txHash, err := s.finalizeSender.SendTransaction("commitAndFinalize-"+bundle.Hash, &s.cfg.RollupContractAddress, calldata, []*kzg4844.Blob{blob}, 0) + if err != nil { + //fmt.Println("blob", common.Bytes2Hex(blob[:])) + log.Error("commitAndFinalize in layer1 failed", "with proof", withProof, "index", bundle.Index, + "batch index", bundle.StartBatchIndex, + "RollupContractAddress", s.cfg.RollupContractAddress, "err", err, "calldata", common.Bytes2Hex(calldata)) + + var rpcError rpc.DataError + if errors.As(err, &rpcError) { + log.Error("rpc.DataError ", "error", rpcError.Error(), "message", rpcError.ErrorData()) + } + + return fmt.Errorf("commitAndFinalize failed, bundle index: %d, err: %w", bundle.Index, err) + } + + log.Info("commitAndFinalize in layer1", "with proof", withProof, "batch index", bundle.StartBatchIndex, "tx hash", txHash.String()) + + // Updating rollup status in database. + err = s.db.Transaction(func(dbTX *gorm.DB) error { + if err = s.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(s.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil { + log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err) + return err + } + + if err = s.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(s.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil { + log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err) + return err + } + + return nil + }) + if err != nil { + log.Warn("failed to update rollup status of bundle and batches", "err", err) + return err + } + + // Updating the proving status when finalizing without proof, thus the coordinator could omit this task. + // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus + if !withProof { + txErr := s.db.Transaction(func(dbTX *gorm.DB) error { + if updateErr := s.bundleOrm.UpdateProvingStatus(s.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { + return updateErr + } + if updateErr := s.batchOrm.UpdateProvingStatusByBundleHash(s.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { + return updateErr + } + for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { + tmpBatch, getErr := s.batchOrm.GetBatchByIndex(s.ctx, batchIndex) + if getErr != nil { + return getErr + } + if updateErr := s.chunkOrm.UpdateProvingStatusByBatchHash(s.ctx, tmpBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { + return updateErr + } + } + return nil + }) + if txErr != nil { + log.Error("Updating chunk and batch proving status when finalizing without proof failure", "bundleHash", bundle.Hash, "err", txErr) + } + } + + return nil +} + +func (s *Submitter) constructCommitAndFinalizeCalldataAndBlob(batch *orm.Batch, endChunk *orm.Chunk, aggProof message.BundleProof) ([]byte, *kzg4844.Blob, error) { + // Create the FinalizeStruct tuple as an abi-compatible struct + finalizeStruct := struct { + BatchHeader []byte + TotalL1MessagesPoppedOverall *big.Int + PostStateRoot common.Hash + WithdrawRoot common.Hash + ZkProof []byte + }{ + BatchHeader: batch.BatchHeader, + TotalL1MessagesPoppedOverall: new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore + endChunk.TotalL1MessagesPoppedInChunk), + PostStateRoot: common.HexToHash(batch.StateRoot), + WithdrawRoot: common.HexToHash(batch.WithdrawRoot), + } + if aggProof != nil { + finalizeStruct.ZkProof = aggProof.Proof() + } + + calldata, err := s.l1RollupABI.Pack("commitAndFinalizeBatch", uint8(batch.CodecVersion), common.HexToHash(batch.ParentBatchHash), finalizeStruct) + if err != nil { + return nil, nil, fmt.Errorf("failed to pack commitAndFinalizeBatch: %w", err) + } + + chunks, err := s.chunkOrm.GetChunksInRange(s.ctx, batch.StartChunkIndex, batch.EndChunkIndex) + if err != nil { + return nil, nil, fmt.Errorf("failed to get chunks in range for batch %d: %w", batch.Index, err) + } + if chunks[len(chunks)-1].Index != batch.EndChunkIndex { + return nil, nil, fmt.Errorf("unexpected last chunk index %d, expected %d", chunks[len(chunks)-1].Index, batch.EndChunkIndex) + } + + var batchBlocks []*encoding.Block + for _, c := range chunks { + blocks, err := s.l2BlockOrm.GetL2BlocksInRange(s.ctx, c.StartBlockNumber, c.EndBlockNumber) + if err != nil { + return nil, nil, fmt.Errorf("failed to get blocks in range for batch %d: %w", batch.Index, err) + } + + batchBlocks = append(batchBlocks, blocks...) + } + + encodingBatch := &encoding.Batch{ + Index: batch.Index, + ParentBatchHash: common.HexToHash(batch.ParentBatchHash), + PrevL1MessageQueueHash: common.HexToHash(batch.PrevL1MessageQueueHash), + PostL1MessageQueueHash: common.HexToHash(batch.PostL1MessageQueueHash), + Blocks: batchBlocks, + } + + codec, err := encoding.CodecFromVersion(encoding.CodecVersion(batch.CodecVersion)) + if err != nil { + return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", batch.CodecVersion, err) + } + + daBatch, err := codec.NewDABatch(encodingBatch) + if err != nil { + return nil, nil, fmt.Errorf("failed to create DA batch: %w", err) + } + + return calldata, daBatch.Blob(), nil +} diff --git a/rollup/internal/controller/relayer/full_recovery.go b/rollup/internal/controller/relayer/full_recovery.go new file mode 100644 index 0000000000..acffc772b4 --- /dev/null +++ b/rollup/internal/controller/relayer/full_recovery.go @@ -0,0 +1,354 @@ +package relayer + +import ( + "context" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/ethclient" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/l1" + "gorm.io/gorm" + + "scroll-tech/common/types" + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/controller/watcher" + "scroll-tech/rollup/internal/orm" + butils "scroll-tech/rollup/internal/utils" +) + +type FullRecovery struct { + ctx context.Context + cfg *config.Config + genesis *core.Genesis + db *gorm.DB + blockORM *orm.L2Block + chunkORM *orm.Chunk + batchORM *orm.Batch + bundleORM *orm.Bundle + + chunkProposer *watcher.ChunkProposer + batchProposer *watcher.BatchProposer + bundleProposer *watcher.BundleProposer + l2Watcher *watcher.L2WatcherClient + l1Client *ethclient.Client + l1Reader *l1.Reader +} + +func NewFullRecovery(ctx context.Context, cfg *config.Config, genesis *core.Genesis, db *gorm.DB, chunkProposer *watcher.ChunkProposer, batchProposer *watcher.BatchProposer, bundleProposer *watcher.BundleProposer, l2Watcher *watcher.L2WatcherClient, l1Client *ethclient.Client, l1Reader *l1.Reader) *FullRecovery { + return &FullRecovery{ + ctx: ctx, + cfg: cfg, + genesis: genesis, + db: db, + blockORM: orm.NewL2Block(db), + chunkORM: orm.NewChunk(db), + batchORM: orm.NewBatch(db), + bundleORM: orm.NewBundle(db), + + chunkProposer: chunkProposer, + batchProposer: batchProposer, + bundleProposer: bundleProposer, + l2Watcher: l2Watcher, + l1Client: l1Client, + l1Reader: l1Reader, + } +} + +// RestoreFullPreviousState restores the full state from L1. +// The DB state should be clean: the latest batch in the DB should be finalized on L1. This function will +// restore all batches between the latest finalized batch in the DB and the latest finalized batch on L1. +func (f *FullRecovery) RestoreFullPreviousState() error { + log.Info("Restoring full previous state") + + // 1. Get latest finalized batch stored in DB + latestDBBatch, err := f.batchORM.GetLatestBatch(f.ctx) + if err != nil { + return fmt.Errorf("failed to get latest batch from DB: %w", err) + } + + log.Info("Latest finalized batch in DB", "batch", latestDBBatch.Index, "hash", latestDBBatch.Hash) + + // 2. Get latest finalized L1 block + latestFinalizedL1Block, err := f.l1Reader.GetLatestFinalizedBlockNumber() + if err != nil { + return fmt.Errorf("failed to get latest finalized L1 block number: %w", err) + } + + log.Info("Latest finalized L1 block number", "latest finalized L1 block", latestFinalizedL1Block) + + // 3. Get latest finalized batch from contract (at latest finalized L1 block) + latestFinalizedBatchContract, err := f.l1Reader.LatestFinalizedBatchIndex(latestFinalizedL1Block) + if err != nil { + return fmt.Errorf("failed to get latest finalized batch: %w", err) + } + + log.Info("Latest finalized batch from L1 contract", "latest finalized batch", latestFinalizedBatchContract, "at latest finalized L1 block", latestFinalizedL1Block) + + // 4. Get batches one by one from stored in DB to latest finalized batch. + receipt, err := f.l1Client.TransactionReceipt(f.ctx, common.HexToHash(latestDBBatch.CommitTxHash)) + if err != nil { + return fmt.Errorf("failed to get transaction receipt of latest DB batch finalization transaction: %w", err) + } + fromBlock := receipt.BlockNumber.Uint64() + + log.Info("Fetching rollup events from L1", "from block", fromBlock, "to block", latestFinalizedL1Block, "from batch", latestDBBatch.Index, "to batch", latestFinalizedBatchContract) + + commitsHeapMap := common.NewHeapMap[uint64, *l1.CommitBatchEvent](func(event *l1.CommitBatchEvent) uint64 { + return event.BatchIndex().Uint64() + }) + batchEventsHeap := common.NewHeap[*batchEvents]() + var bundles [][]*batchEvents + + err = f.l1Reader.FetchRollupEventsInRangeWithCallback(fromBlock, latestFinalizedL1Block, func(event l1.RollupEvent) bool { + // We're only interested in batches that are newer than the latest finalized batch in the DB. + if event.BatchIndex().Uint64() <= latestDBBatch.Index { + return true + } + + switch event.Type() { + case l1.CommitEventType: + commitEvent := event.(*l1.CommitBatchEvent) + commitsHeapMap.Push(commitEvent) + + case l1.FinalizeEventType: + finalizeEvent := event.(*l1.FinalizeBatchEvent) + + var bundle []*batchEvents + + // with bundles all commited batches until this finalized batch are finalized in the same bundle + for commitsHeapMap.Len() > 0 { + commitEvent := commitsHeapMap.Peek() + if commitEvent.BatchIndex().Uint64() > finalizeEvent.BatchIndex().Uint64() { + break + } + + bEvents := newBatchEvents(commitEvent, finalizeEvent) + commitsHeapMap.Pop() + batchEventsHeap.Push(bEvents) + bundle = append(bundle, bEvents) + } + + bundles = append(bundles, bundle) + + // Stop fetching rollup events if we reached the latest finalized batch. + if finalizeEvent.BatchIndex().Uint64() >= latestFinalizedBatchContract { + return false + } + + case l1.RevertEventType: + // We ignore reverted batches. + commitsHeapMap.RemoveByKey(event.BatchIndex().Uint64()) + } + + return true + }) + if err != nil { + return fmt.Errorf("failed to fetch rollup events: %w", err) + } + + // 5. Process all finalized batches: fetch L2 blocks and reproduce chunks and batches. + for batchEventsHeap.Len() > 0 { + nextBatch := batchEventsHeap.Pop().Value() + if err = f.processFinalizedBatch(nextBatch); err != nil { + return fmt.Errorf("failed to process finalized batch %d %s: %w", nextBatch.commit.BatchIndex(), nextBatch.commit.BatchHash(), err) + } + + log.Info("Processed finalized batch", "batch", nextBatch.commit.BatchIndex(), "hash", nextBatch.commit.BatchHash()) + } + + // 6. Create bundles if needed. + for _, bundle := range bundles { + var dbBatches []*orm.Batch + var lastBatchInBundle *orm.Batch + + for _, batch := range bundle { + dbBatch, err := f.batchORM.GetBatchByIndex(f.ctx, batch.commit.BatchIndex().Uint64()) + if err != nil { + return fmt.Errorf("failed to get batch by index for bundle generation: %w", err) + } + // Bundles are only supported for codec version 3 and above. + if encoding.CodecVersion(dbBatch.CodecVersion) < encoding.CodecV3 { + break + } + + dbBatches = append(dbBatches, dbBatch) + lastBatchInBundle = dbBatch + } + + if len(dbBatches) == 0 { + continue + } + + err = f.db.Transaction(func(dbTX *gorm.DB) error { + newBundle, err := f.bundleORM.InsertBundle(f.ctx, dbBatches, encoding.CodecVersion(lastBatchInBundle.CodecVersion), dbTX) + if err != nil { + return fmt.Errorf("failed to insert bundle to DB: %w", err) + } + if err = f.batchORM.UpdateBundleHashInRange(f.ctx, newBundle.StartBatchIndex, newBundle.EndBatchIndex, newBundle.Hash, dbTX); err != nil { + return fmt.Errorf("failed to update bundle_hash %s for batches (%d to %d): %w", newBundle.Hash, newBundle.StartBatchIndex, newBundle.EndBatchIndex, err) + } + + if err = f.bundleORM.UpdateFinalizeTxHashAndRollupStatus(f.ctx, newBundle.Hash, lastBatchInBundle.FinalizeTxHash, types.RollupFinalized, dbTX); err != nil { + return fmt.Errorf("failed to update finalize tx hash and rollup status for bundle %s: %w", newBundle.Hash, err) + } + + if err = f.bundleORM.UpdateProvingStatus(f.ctx, newBundle.Hash, types.ProvingTaskVerified, dbTX); err != nil { + return fmt.Errorf("failed to update proving status for bundle %s: %w", newBundle.Hash, err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("failed to insert bundle in DB transaction: %w", err) + } + } + + return nil +} + +func (f *FullRecovery) processFinalizedBatch(nextBatch *batchEvents) error { + log.Info("Processing finalized batch", "batch", nextBatch.commit.BatchIndex(), "hash", nextBatch.commit.BatchHash()) + + // 5.1. Fetch commit tx data for batch (via commit event). + args, err := f.l1Reader.FetchCommitTxData(nextBatch.commit) + if err != nil { + return fmt.Errorf("failed to fetch commit tx data: %w", err) + } + + codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version)) + if err != nil { + return fmt.Errorf("failed to get codec: %w", err) + } + + daChunksRawTxs, err := codec.DecodeDAChunksRawTx(args.Chunks) + if err != nil { + return fmt.Errorf("failed to decode DA chunks: %w", err) + } + lastChunk := daChunksRawTxs[len(daChunksRawTxs)-1] + lastBlockInBatch := lastChunk.Blocks[len(lastChunk.Blocks)-1].Number() + + log.Info("Fetching L2 blocks from l2geth", "batch", nextBatch.commit.BatchIndex(), "last L2 block in batch", lastBlockInBatch) + + // 5.2. Fetch L2 blocks for the entire batch. + if err = f.l2Watcher.TryFetchRunningMissingBlocks(lastBlockInBatch); err != nil { + return fmt.Errorf("failed to fetch L2 blocks: %w", err) + } + + // 5.3. Reproduce chunks. + daChunks := make([]*encoding.Chunk, 0, len(daChunksRawTxs)) + dbChunks := make([]*orm.Chunk, 0, len(daChunksRawTxs)) + for _, daChunkRawTxs := range daChunksRawTxs { + start := daChunkRawTxs.Blocks[0].Number() + end := daChunkRawTxs.Blocks[len(daChunkRawTxs.Blocks)-1].Number() + + blocks, err := f.blockORM.GetL2BlocksInRange(f.ctx, start, end) + if err != nil { + return fmt.Errorf("failed to get L2 blocks in range: %w", err) + } + + log.Info("Reproducing chunk", "start block", start, "end block", end) + + var chunk encoding.Chunk + for _, block := range blocks { + chunk.Blocks = append(chunk.Blocks, block) + } + + metrics, err := butils.CalculateChunkMetrics(&chunk, codec.Version()) + if err != nil { + return fmt.Errorf("failed to calculate chunk metrics: %w", err) + } + + err = f.db.Transaction(func(dbTX *gorm.DB) error { + dbChunk, err := f.chunkORM.InsertChunk(f.ctx, &chunk, codec.Version(), *metrics, dbTX) + if err != nil { + return fmt.Errorf("failed to insert chunk to DB: %w", err) + } + if err := f.blockORM.UpdateChunkHashInRange(f.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil { + return fmt.Errorf("failed to update chunk_hash for l2_blocks (chunk hash: %s, start block: %d, end block: %d): %w", dbChunk.Hash, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, err) + } + + if err = f.chunkORM.UpdateProvingStatus(f.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil { + return fmt.Errorf("failed to update proving status for chunk %s: %w", dbChunk.Hash, err) + } + + daChunks = append(daChunks, &chunk) + dbChunks = append(dbChunks, dbChunk) + + log.Info("Inserted chunk", "index", dbChunk.Index, "hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber) + + return nil + }) + if err != nil { + return fmt.Errorf("failed to insert chunk in DB transaction: %w", err) + } + } + + // 5.4 Reproduce batch. + dbParentBatch, err := f.batchORM.GetLatestBatch(f.ctx) + if err != nil { + return fmt.Errorf("failed to get latest batch from DB: %w", err) + } + + var batch encoding.Batch + batch.Index = dbParentBatch.Index + 1 + batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash) + batch.TotalL1MessagePoppedBefore = dbChunks[0].TotalL1MessagesPoppedBefore + + for _, chunk := range daChunks { + batch.Chunks = append(batch.Chunks, chunk) + } + + metrics, err := butils.CalculateBatchMetrics(&batch, codec.Version()) + if err != nil { + return fmt.Errorf("failed to calculate batch metrics: %w", err) + } + + err = f.db.Transaction(func(dbTX *gorm.DB) error { + dbBatch, err := f.batchORM.InsertBatch(f.ctx, &batch, codec.Version(), *metrics, dbTX) + if err != nil { + return fmt.Errorf("failed to insert batch to DB: %w", err) + } + if err = f.chunkORM.UpdateBatchHashInRange(f.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, dbBatch.Hash, dbTX); err != nil { + return fmt.Errorf("failed to update batch_hash for chunks (batch hash: %s, start chunk: %d, end chunk: %d): %w", dbBatch.Hash, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, err) + } + + if err = f.batchORM.UpdateProvingStatus(f.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); err != nil { + return fmt.Errorf("failed to update proving status for batch %s: %w", dbBatch.Hash, err) + } + if err = f.batchORM.UpdateRollupStatusCommitAndFinalizeTxHash(f.ctx, dbBatch.Hash, types.RollupFinalized, nextBatch.commit.TxHash().Hex(), nextBatch.finalize.TxHash().Hex(), dbTX); err != nil { + return fmt.Errorf("failed to update rollup status for batch %s: %w", dbBatch.Hash, err) + } + + log.Info("Inserted batch", "index", dbBatch.Index, "hash", dbBatch.Hash, "start chunk", dbBatch.StartChunkIndex, "end chunk", dbBatch.EndChunkIndex) + + return nil + }) + if err != nil { + return fmt.Errorf("failed to insert batch in DB transaction: %w", err) + } + + return nil +} + +type batchEvents struct { + commit *l1.CommitBatchEvent + finalize *l1.FinalizeBatchEvent +} + +func newBatchEvents(commit *l1.CommitBatchEvent, finalize *l1.FinalizeBatchEvent) *batchEvents { + if commit.BatchIndex().Uint64() > finalize.BatchIndex().Uint64() { + panic(fmt.Sprintf("commit and finalize batch index mismatch: %d != %d", commit.BatchIndex().Uint64(), finalize.BatchIndex().Uint64())) + } + + return &batchEvents{ + commit: commit, + finalize: finalize, + } +} + +func (e *batchEvents) CompareTo(other *batchEvents) int { + return e.commit.BatchIndex().Cmp(other.commit.BatchIndex()) +} diff --git a/rollup/internal/controller/relayer/l2_relayer.go b/rollup/internal/controller/relayer/l2_relayer.go index 15ac84445e..8277fcb6a5 100644 --- a/rollup/internal/controller/relayer/l2_relayer.go +++ b/rollup/internal/controller/relayer/l2_relayer.go @@ -12,6 +12,8 @@ import ( "github.com/go-resty/resty/v2" "github.com/prometheus/client_golang/prometheus" "github.com/scroll-tech/da-codec/encoding" + "gorm.io/gorm" + "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" @@ -20,7 +22,6 @@ import ( "github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" - "gorm.io/gorm" "scroll-tech/common/types" "scroll-tech/common/types/message" diff --git a/rollup/internal/controller/watcher/bundle_proposer.go b/rollup/internal/controller/watcher/bundle_proposer.go index bb180cd61e..1cb78778bf 100644 --- a/rollup/internal/controller/watcher/bundle_proposer.go +++ b/rollup/internal/controller/watcher/bundle_proposer.go @@ -9,9 +9,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/scroll-tech/da-codec/encoding" + "gorm.io/gorm" + "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" - "gorm.io/gorm" "scroll-tech/rollup/internal/config" "scroll-tech/rollup/internal/orm" @@ -99,7 +100,7 @@ func (p *BundleProposer) TryProposeBundle() { } } -func (p *BundleProposer) updateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error { +func (p *BundleProposer) UpdateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error { if len(batches) == 0 { return nil } @@ -194,7 +195,7 @@ func (p *BundleProposer) proposeBundle() error { p.bundleFirstBlockTimeoutReached.Inc() p.bundleBatchesNum.Set(float64(len(batches))) - return p.updateDBBundleInfo(batches, codecVersion) + return p.UpdateDBBundleInfo(batches, codecVersion) } currentTimeSec := uint64(time.Now().Unix()) @@ -208,7 +209,7 @@ func (p *BundleProposer) proposeBundle() error { p.bundleFirstBlockTimeoutReached.Inc() p.bundleBatchesNum.Set(float64(len(batches))) - return p.updateDBBundleInfo(batches, codecVersion) + return p.UpdateDBBundleInfo(batches, codecVersion) } log.Debug("pending batches are not enough and do not contain a timeout batch") diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index 03db91e43d..2706fa49be 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -188,7 +188,7 @@ func (p *ChunkProposer) SetReplayDB(replayDB *gorm.DB) { // TryProposeChunk tries to propose a new chunk. func (p *ChunkProposer) TryProposeChunk() { p.chunkProposerCircleTotal.Inc() - if err := p.proposeChunk(); err != nil { + if err := p.ProposeChunk(); err != nil { p.proposeChunkFailureTotal.Inc() log.Error("propose new chunk failed", "err", err) return @@ -268,7 +268,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en return nil } -func (p *ChunkProposer) proposeChunk() error { +func (p *ChunkProposer) ProposeChunk() error { // unchunkedBlockHeight >= 1, assuming genesis batch with chunk 0, block 0 is committed. unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx) if err != nil { diff --git a/rollup/internal/controller/watcher/l2_watcher.go b/rollup/internal/controller/watcher/l2_watcher.go index 7c75a31981..55ec90b1d1 100644 --- a/rollup/internal/controller/watcher/l2_watcher.go +++ b/rollup/internal/controller/watcher/l2_watcher.go @@ -59,12 +59,12 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat const blocksFetchLimit = uint64(10) // TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks. -func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) { +func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) error { w.metrics.fetchRunningMissingBlocksTotal.Inc() heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx) if err != nil { log.Error("failed to GetL2BlocksLatestHeight", "err", err) - return + return fmt.Errorf("failed to GetL2BlocksLatestHeight: %w", err) } // Fetch and store block traces for missing blocks @@ -75,16 +75,18 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) { to = blockHeight } - if err = w.getAndStoreBlocks(w.ctx, from, to); err != nil { + if err = w.GetAndStoreBlocks(w.ctx, from, to); err != nil { log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err) - return + return fmt.Errorf("fail to getAndStoreBlockTraces: %w", err) } w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to)) w.metrics.rollupL2BlocksFetchedGap.Set(float64(blockHeight - to)) } + + return nil } -func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64) error { +func (w *L2WatcherClient) GetAndStoreBlocks(ctx context.Context, from, to uint64) error { var blocks []*encoding.Block for number := from; number <= to; number++ { log.Debug("retrieving block", "height", number) diff --git a/rollup/internal/orm/batch.go b/rollup/internal/orm/batch.go index 2d83c4791d..a79429f0e4 100644 --- a/rollup/internal/orm/batch.go +++ b/rollup/internal/orm/batch.go @@ -5,12 +5,15 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/go-ethereum/log" "gorm.io/gorm" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/log" + "scroll-tech/common/types" "scroll-tech/common/types/message" "scroll-tech/common/utils" @@ -328,6 +331,52 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer return &newBatch, nil } +func (o *Batch) InsertPermissionlessBatch(ctx context.Context, batchIndex *big.Int, batchHash common.Hash, codecVersion encoding.CodecVersion, chunk *Chunk) (*Batch, error) { + now := time.Now() + newBatch := &Batch{ + Index: batchIndex.Uint64(), + Hash: batchHash.Hex(), + StartChunkIndex: chunk.Index, + StartChunkHash: chunk.Hash, + EndChunkIndex: chunk.Index, + EndChunkHash: chunk.Hash, + PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash, + PostL1MessageQueueHash: chunk.PostL1MessageQueueHash, + BatchHeader: []byte{1, 2, 3}, + CodecVersion: int16(codecVersion), + EnableCompress: false, + ProvingStatus: int16(types.ProvingTaskVerified), + ProvedAt: &now, + RollupStatus: int16(types.RollupFinalized), + FinalizedAt: &now, + } + + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + + if err := db.Create(newBatch).Error; err != nil { + return nil, fmt.Errorf("Batch.InsertPermissionlessBatch error: %w", err) + } + + return newBatch, nil +} + +// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch. +func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { + updateFields := make(map[string]interface{}) + updateFields["oracle_status"] = int(status) + updateFields["oracle_tx_hash"] = txHash + + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash) + } + return nil +} + // UpdateProvingStatus updates the proving status of a batch. func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { updateFields := make(map[string]interface{}) @@ -356,6 +405,29 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ return nil } +func (o *Batch) UpdateRollupStatusCommitAndFinalizeTxHash(ctx context.Context, hash string, status types.RollupStatus, commitTxHash string, finalizeTxHash string, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["commit_tx_hash"] = commitTxHash + updateFields["committed_at"] = utils.NowUTC() + updateFields["finalize_tx_hash"] = finalizeTxHash + updateFields["finalized_at"] = utils.NowUTC() + + updateFields["rollup_status"] = int(status) + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateRollupStatusCommitAndFinalizeTxHash error: %w, batch hash: %v, status: %v, commitTxHash: %v, finalizeTxHash: %v", err, hash, status.String(), commitTxHash, finalizeTxHash) + } + return nil +} + // UpdateRollupStatus updates the rollup status of a batch. func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error { updateFields := make(map[string]interface{}) diff --git a/rollup/internal/orm/bundle.go b/rollup/internal/orm/bundle.go index 044434838d..fb79648153 100644 --- a/rollup/internal/orm/bundle.go +++ b/rollup/internal/orm/bundle.go @@ -59,8 +59,8 @@ func (*Bundle) TableName() string { return "bundle" } -// getLatestBundle retrieves the latest bundle from the database. -func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) { +// GetLatestBundle retrieves the latest bundle from the database. +func (o *Bundle) GetLatestBundle(ctx context.Context) (*Bundle, error) { db := o.db.WithContext(ctx) db = db.Model(&Bundle{}) db = db.Order("index desc") @@ -70,7 +70,7 @@ func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) { if errors.Is(err, gorm.ErrRecordNotFound) { return nil, nil } - return nil, fmt.Errorf("getLatestBundle error: %w", err) + return nil, fmt.Errorf("GetLatestBundle error: %w", err) } return &latestBundle, nil } @@ -106,7 +106,7 @@ func (o *Bundle) GetBundles(ctx context.Context, fields map[string]interface{}, // GetFirstUnbundledBatchIndex retrieves the first unbundled batch index. func (o *Bundle) GetFirstUnbundledBatchIndex(ctx context.Context) (uint64, error) { // Get the latest bundle - latestBundle, err := o.getLatestBundle(ctx) + latestBundle, err := o.GetLatestBundle(ctx) if err != nil { return 0, fmt.Errorf("Bundle.GetFirstUnbundledBatchIndex error: %w", err) } @@ -237,14 +237,18 @@ func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status ty // UpdateRollupStatus updates the rollup status for a bundle. // only used in unit tests. -func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error { +func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error { updateFields := make(map[string]interface{}) updateFields["rollup_status"] = int(status) if status == types.RollupFinalized { updateFields["finalized_at"] = utils.NowUTC() } - db := o.db.WithContext(ctx) + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) db = db.Model(&Bundle{}) db = db.Where("hash", hash) diff --git a/rollup/internal/orm/chunk.go b/rollup/internal/orm/chunk.go index f8f6c0da08..fda5269409 100644 --- a/rollup/internal/orm/chunk.go +++ b/rollup/internal/orm/chunk.go @@ -7,9 +7,12 @@ import ( "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/go-ethereum/log" "gorm.io/gorm" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/log" + "scroll-tech/common/types" "scroll-tech/common/utils" @@ -278,6 +281,48 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer return &newChunk, nil } +func (o *Chunk) InsertPermissionlessChunk(ctx context.Context, index uint64, codecVersion encoding.CodecVersion, daBlobPayload encoding.DABlobPayload, totalL1MessagePoppedBefore uint64) (*Chunk, error) { + // Create some unique identifier. It is not really used for anything except in DB. + var chunkBytes []byte + for _, block := range daBlobPayload.Blocks() { + blockBytes := block.Encode() + chunkBytes = append(chunkBytes, blockBytes...) + } + hash := crypto.Keccak256Hash(chunkBytes) + + numBlocks := len(daBlobPayload.Blocks()) + emptyHash := common.Hash{}.Hex() + newChunk := &Chunk{ + Index: index, + Hash: hash.Hex(), + StartBlockNumber: daBlobPayload.Blocks()[0].Number(), + StartBlockHash: emptyHash, + EndBlockNumber: daBlobPayload.Blocks()[numBlocks-1].Number(), + EndBlockHash: emptyHash, + StartBlockTime: daBlobPayload.Blocks()[0].Timestamp(), + TotalL1MessagesPoppedInChunk: 0, // this needs to be 0 so that the calculation of the total L1 messages popped before for the next chunk is correct + TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore, + PrevL1MessageQueueHash: daBlobPayload.PrevL1MessageQueueHash().Hex(), + PostL1MessageQueueHash: daBlobPayload.PostL1MessageQueueHash().Hex(), + ParentChunkHash: emptyHash, + StateRoot: emptyHash, + ParentChunkStateRoot: emptyHash, + WithdrawRoot: emptyHash, + CodecVersion: int16(codecVersion), + EnableCompress: false, + ProvingStatus: int16(types.ProvingTaskVerified), + } + + db := o.db.WithContext(ctx) + db = db.Model(&Chunk{}) + + if err := db.Create(newChunk).Error; err != nil { + return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash) + } + + return newChunk, nil +} + // InsertTestChunkForProposerTool inserts a new chunk into the database only for analysis usage by proposer tool. func (o *Chunk) InsertTestChunkForProposerTool(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, totalL1MessagePoppedBefore uint64, dbTX ...*gorm.DB) (*Chunk, error) { if chunk == nil || len(chunk.Blocks) == 0 {