forked from ethereum-optimism/optimism
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathconfig.yml
2240 lines (2181 loc) · 77.3 KB
/
config.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
version: 2.1
parameters:
ci_builder_image:
type: string
default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.49.0
ci_builder_rust_image:
type: string
default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder-rust:latest
base_image:
type: string
default: default
# The dispatch parameters are used to manually dispatch pipelines that normally only run post-merge on develop
# from the CircleCI UI. Example configuration:
# when:
# or:
# - equal: [ "develop", <<pipeline.git.branch>> ]
# - equal: [ true, <<pipeline.parameters.main_dispatch>> ]
# Add a new `*_dispatch` parameter for any pipeline you want manual dispatch for.
main_dispatch:
type: boolean
default: true # default to running main in case the manual run cancelled an automatic run
fault_proofs_dispatch:
type: boolean
default: false
reproducibility_dispatch:
type: boolean
default: false
kontrol_dispatch:
type: boolean
default: false
sdk_dispatch:
type: boolean
default: false
docker_publish_dispatch:
type: boolean
default: false
orbs:
go: circleci/[email protected]
gcp-cli: circleci/[email protected]
slack: circleci/[email protected]
shellcheck: circleci/[email protected]
commands:
gcp-oidc-authenticate:
description: "Authenticate with GCP using a CircleCI OIDC token."
parameters:
project_id:
type: env_var_name
default: GCP_PROJECT_ID
workload_identity_pool_id:
type: env_var_name
default: GCP_WIP_ID
workload_identity_pool_provider_id:
type: env_var_name
default: GCP_WIP_PROVIDER_ID
service_account_email:
type: env_var_name
default: GCP_SERVICE_ACCOUNT_EMAIL
gcp_cred_config_file_path:
type: string
default: /home/circleci/gcp_cred_config.json
oidc_token_file_path:
type: string
default: /home/circleci/oidc_token.json
steps:
- run:
name: "Create OIDC credential configuration"
command: |
# Store OIDC token in temp file
echo $CIRCLE_OIDC_TOKEN > << parameters.oidc_token_file_path >>
# Create a credential configuration for the generated OIDC ID Token
gcloud iam workload-identity-pools create-cred-config \
"projects/${<< parameters.project_id >>}/locations/global/workloadIdentityPools/${<< parameters.workload_identity_pool_id >>}/providers/${<< parameters.workload_identity_pool_provider_id >>}"\
--output-file="<< parameters.gcp_cred_config_file_path >>" \
--service-account="${<< parameters.service_account_email >>}" \
--credential-source-file=<< parameters.oidc_token_file_path >>
- run:
name: "Authenticate with GCP using OIDC"
command: |
# Configure gcloud to leverage the generated credential configuration
gcloud auth login --brief --cred-file "<< parameters.gcp_cred_config_file_path >>"
# Configure ADC
echo "export GOOGLE_APPLICATION_CREDENTIALS='<< parameters.gcp_cred_config_file_path >>'" | tee -a "$BASH_ENV"
check-changed:
description: "Conditionally halts a step if certain modules change"
parameters:
patterns:
type: string
description: "Comma-separated list of dependencies"
no_go_deps:
type: string
default: ""
description: "If set, does not trigger on `go.mod` / `go.sum` changes."
steps:
- run:
name: "Check for changes"
environment:
CHECK_CHANGED_NO_GO_DEPS: "<<parameters.no_go_deps>>"
command: |
cd ops/check-changed
pip3 install -r requirements.txt
python3 main.py "<<parameters.patterns>>"
notify-failures-on-develop:
description: "Notify Slack"
parameters:
channel:
type: string
default: C03N11M0BBN
mentions:
type: string
default: ""
steps:
- slack/notify:
channel: << parameters.channel >>
event: fail
template: basic_fail_1
branch_pattern: develop
mentions: "<< parameters.mentions >>"
jobs:
cannon-go-lint-and-test:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: medium
steps:
- checkout
- check-changed:
patterns: cannon,packages/contracts-bedrock/src/cannon,op-preimage,go.mod
- attach_workspace:
at: "."
- run:
name: prep Cannon results dir
command: mkdir -p /tmp/test-results
- run:
name: build Cannon example binaries
command: make elf # only compile ELF binaries with Go, we do not have MIPS GCC for creating the debug-dumps.
working_directory: cannon/example
- run:
name: Cannon Go lint
command: |
make lint
working_directory: cannon
- run:
name: Cannon Go tests
command: |
mkdir -p /testlogs
gotestsum --format=testname --junitfile=/tmp/test-results/cannon.xml --jsonfile=/testlogs/log.json \
-- -parallel=2 -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage.out ./...
working_directory: cannon
- run:
name: upload Cannon coverage
command: codecov --verbose --clean --flags cannon-go-tests
- store_test_results:
path: /tmp/test-results
- store_artifacts:
path: /testlogs
when: always
cannon-build-test-vectors:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: medium
steps:
- checkout
- check-changed:
patterns: cannon/mipsevm/tests/open_mips_tests/test
- run:
name: Build MIPS test vectors
command: python3 maketests.py && git diff --exit-code
working_directory: cannon/mipsevm/tests/open_mips_tests
pnpm-monorepo:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
steps:
- checkout
- run:
name: "Check L1 geth version"
command: ./ops/scripts/geth-version-checker.sh || (echo "geth version is wrong, update ci-builder"; false)
- run:
name: git submodules
command: make submodules
- restore_cache:
name: Restore PNPM Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
- restore_cache:
name: Restore Go modules cache
key: gomod-{{ checksum "go.sum" }}
# Fetch node_modules into the pnpm store
# This will cache node_modules based on pnpm-lock so other steps can instantly install them with `pnpm install --prefer-offline`
# --prefer-offline installs node_modules instantly by just reading from cache if it exists rather than fetching from network
# when installing node_modules pnpm simply adds symlinks instead of copying the files which is why it is pretty much instant to run --prefer-offline
# this allows a caching strategy of only checking pnpm-lockfile so we don't have to keep it in sync with our packages
# For more information see https://pnpm.io/cli/fetch
- run:
name: Fetch dependencies
command: pnpm fetch --frozen-lockfile --prefer-offline
- save_cache:
name: Save PNPM Package Cache
key: pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
paths:
- "node_modules"
- run:
name: Install dependencies
command: pnpm install:ci:offline
- run:
name: print forge version
command: forge --version
- run:
name: Build monorepo
environment:
FOUNDRY_PROFILE: ci
command: pnpm build
- run:
name: Generate L2OO allocs
command: DEVNET_L2OO="true" make devnet-allocs
- run:
name: Copy L2OO allocs to .devnet-l2oo
command: cp -r .devnet/ .devnet-l2oo/
- run:
name: Generate Plasma allocs
command: DEVNET_PLASMA="true" make devnet-allocs
- run:
name: Copy Plasma allocs to .devnet-plasma
command: cp -r .devnet/ .devnet-plasma/
- run:
name: Generate Generic Plasma allocs
command: DEVNET_PLASMA="true" GENERIC_PLASMA="true" make devnet-allocs
- run:
name: Copy Plasma allocs to .devnet-plasma
command: cp -r .devnet/ .devnet-plasma-generic/
- run:
name: Generate default allocs
command: make devnet-allocs
- persist_to_workspace:
root: "."
paths:
- "packages/**/dist"
- "packages/contracts-bedrock/cache"
- "packages/contracts-bedrock/artifacts"
- "packages/contracts-bedrock/forge-artifacts"
- "packages/contracts-bedrock/tsconfig.tsbuildinfo"
- "packages/contracts-bedrock/tsconfig.build.tsbuildinfo"
- ".devnet/allocs-l1.json"
- ".devnet/allocs-l2-delta.json"
- ".devnet/allocs-l2-ecotone.json"
- ".devnet/allocs-l2-fjord.json"
- ".devnet/addresses.json"
- ".devnet-l2oo/allocs-l1.json"
- ".devnet-l2oo/addresses.json"
- ".devnet-l2oo/allocs-l2-delta.json"
- ".devnet-l2oo/allocs-l2-ecotone.json"
- ".devnet-l2oo/allocs-l2-fjord.json"
- ".devnet-plasma/allocs-l1.json"
- ".devnet-plasma/addresses.json"
- ".devnet-plasma/allocs-l2-delta.json"
- ".devnet-plasma/allocs-l2-ecotone.json"
- ".devnet-plasma/allocs-l2-fjord.json"
- ".devnet-plasma-generic/allocs-l1.json"
- ".devnet-plasma-generic/addresses.json"
- ".devnet-plasma-generic/allocs-l2-delta.json"
- ".devnet-plasma-generic/allocs-l2-ecotone.json"
- ".devnet-plasma-generic/allocs-l2-fjord.json"
- "packages/contracts-bedrock/deploy-config/devnetL1.json"
- "packages/contracts-bedrock/deployments/devnetL1"
- notify-failures-on-develop
docker-build:
environment:
DOCKER_BUILDKIT: 1
parameters:
docker_tags:
description: Docker image tags, comma-separated
type: string
docker_name:
description: "Docker buildx bake target"
type: string
default: ""
registry:
description: Docker registry
type: string
default: "us-docker.pkg.dev"
repo:
description: Docker repo
type: string
default: "oplabs-tools-artifacts/images"
save_image_tag:
description: Save docker image with given tag
type: string
default: ""
platforms:
description: Platforms to build for, comma-separated
type: string
default: "linux/amd64"
publish:
description: Publish the docker image (multi-platform, all tags)
type: boolean
default: false
release:
description: Run the release script
type: boolean
default: false
resource_class:
description: Docker resoruce class
type: string
default: medium
machine:
image: <<pipeline.parameters.base_image>>
resource_class: "<<parameters.resource_class>>"
docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages
steps:
- checkout
- attach_workspace:
at: /tmp/docker_images
- run:
command: mkdir -p /tmp/docker_images
- when:
condition:
or:
- "<<parameters.publish>>"
- "<<parameters.release>>"
steps:
- gcp-cli/install
- when:
condition:
or:
- "<<parameters.publish>>"
- "<<parameters.release>>"
steps:
- gcp-oidc-authenticate
- run:
name: Build
command: |
# Check to see if DOCKER_HUB_READ_ONLY_TOKEN is set (i.e. we are in repo) before attempting to use secrets.
# Building should work without this read only login, but may get rate limited.
if [[ -v DOCKER_HUB_READ_ONLY_TOKEN ]]; then
echo "$DOCKER_HUB_READ_ONLY_TOKEN" | docker login -u "$DOCKER_HUB_READ_ONLY_USER" --password-stdin
fi
export REGISTRY="<<parameters.registry>>"
export REPOSITORY="<<parameters.repo>>"
export IMAGE_TAGS="$(echo -ne "<<parameters.docker_tags>>" | sed "s/[^a-zA-Z0-9\n,]/-/g")"
export GIT_COMMIT="$(git rev-parse HEAD)"
export GIT_DATE="$(git show -s --format='%ct')"
export PLATFORMS="<<parameters.platforms>>"
echo "Checking git tags pointing at $GIT_COMMIT:"
tags_at_commit=$(git tag --points-at $GIT_COMMIT)
echo "Tags at commit:\n$tags_at_commit"
filtered_tags=$(echo "$tags_at_commit" | grep "^<<parameters.docker_name>>/" || true)
echo "Filtered tags: $filtered_tags"
if [ -z "$filtered_tags" ]; then
export GIT_VERSION="untagged"
else
sorted_tags=$(echo "$filtered_tags" | sed "s/<<parameters.docker_name>>\///" | sort -V)
echo "Sorted tags: $sorted_tags"
# prefer full release tag over "-rc" release candidate tag if both exist
full_release_tag=$(echo "$sorted_tags" | grep -v -- "-rc" || true)
if [ -z "$full_release_tag" ]; then
export GIT_VERSION=$(echo "$sorted_tags" | tail -n 1)
else
export GIT_VERSION=$(echo "$full_release_tag" | tail -n 1)
fi
fi
echo "Setting GIT_VERSION=$GIT_VERSION"
# Create, start (bootstrap) and use a *named* docker builder
# This allows us to cross-build multi-platform,
# and naming allows us to use the DLC (docker-layer-cache)
docker buildx create --driver=docker-container --name=buildx-build --bootstrap --use
DOCKER_OUTPUT_DESTINATION=""
if [ "<<parameters.publish>>" == "true" ]; then
gcloud auth configure-docker <<parameters.registry>>
echo "Building for platforms $PLATFORMS and then publishing to registry"
DOCKER_OUTPUT_DESTINATION="--push"
if [ "<<parameters.save_image_tag>>" != "" ]; then
echo "ERROR: cannot save image to docker when publishing to registry"
exit 1
fi
else
if [ "<<parameters.save_image_tag>>" == "" ]; then
echo "Running $PLATFORMS build without destination (cache warm-up)"
DOCKER_OUTPUT_DESTINATION=""
elif [[ $PLATFORMS == *,* ]]; then
echo "ERROR: cannot perform multi-arch (platforms: $PLATFORMS) build while also loading the result into regular docker"
exit 1
else
echo "Running single-platform $PLATFORMS build and loading into docker"
DOCKER_OUTPUT_DESTINATION="--load"
fi
fi
# Let them cook!
docker buildx bake \
--progress plain \
--builder=buildx-build \
-f docker-bake.hcl \
$DOCKER_OUTPUT_DESTINATION \
<<parameters.docker_name>>
no_output_timeout: 45m
- when:
condition: "<<parameters.publish>>"
steps:
- notify-failures-on-develop
- when:
condition: "<<parameters.save_image_tag>>"
steps:
- run:
name: Save
command: |
IMAGE_NAME="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>:<<parameters.save_image_tag>>"
docker save -o /tmp/docker_images/<<parameters.docker_name>>.tar $IMAGE_NAME
- persist_to_workspace:
root: /tmp/docker_images
paths: # only write the one file, to avoid concurrent workspace-file additions
- "<<parameters.docker_name>>.tar"
- when:
condition: "<<parameters.release>>"
steps:
- run:
name: Tag
command: |
./ops/scripts/ci-docker-tag-op-stack-release.sh <<parameters.registry>>/<<parameters.repo>> $CIRCLE_TAG $CIRCLE_SHA1
- when:
condition:
or:
- and:
- "<<parameters.publish>>"
- "<<parameters.release>>"
- and:
- "<<parameters.publish>>"
- equal: [develop, << pipeline.git.branch >>]
steps:
- gcp-oidc-authenticate:
service_account_email: GCP_SERVICE_ATTESTOR_ACCOUNT_EMAIL
- run:
name: Sign
command: |
git clone https://github.com/ethereum-optimism/binary_signer
cd binary_signer/signer
git checkout tags/v1.0.3
IMAGE_PATH="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>:<<pipeline.git.revision>>"
echo $IMAGE_PATH
pip3 install -r requirements.txt
python3 ./sign_image.py --command="sign"\
--attestor-project-name="$ATTESTOR_PROJECT_NAME"\
--attestor-name="$ATTESTOR_NAME"\
--image-path="$IMAGE_PATH"\
--signer-logging-level="INFO"\
--attestor-key-id="//cloudkms.googleapis.com/v1/projects/$ATTESTOR_PROJECT_NAME/locations/global/keyRings/$ATTESTOR_NAME-key-ring/cryptoKeys/$ATTESTOR_NAME-key/cryptoKeyVersions/1"
# Verify newly published images (built on AMD machine) will run on ARM
check-cross-platform:
docker:
- image: cimg/base:current
resource_class: arm.medium
parameters:
registry:
description: Docker registry
type: string
default: "us-docker.pkg.dev"
repo:
description: Docker repo
type: string
default: "oplabs-tools-artifacts/images"
op_component:
description: "Name of op-stack component (e.g. op-node)"
type: string
default: ""
docker_tag:
description: "Tag of docker image"
type: string
default: "<<pipeline.git.revision>>"
steps:
- setup_remote_docker
- run:
name: "Verify Image Platform"
command: |
image_name="<<parameters.registry>>/<<parameters.repo>>/<<parameters.op_component>>:<<parameters.docker_tag>>"
echo "Retrieving Docker image manifest: $image_name"
MANIFEST=$(docker manifest inspect $image_name)
echo "Verifying 'linux/arm64' is supported..."
SUPPORTED_PLATFORM=$(echo "$MANIFEST" | jq -r '.manifests[] | select(.platform.architecture == "arm64" and .platform.os == "linux")')
echo $SUPPORT_PLATFORM
if [ -z "$SUPPORTED_PLATFORM" ]; then
echo "Platform 'linux/arm64' not supported by this image"
exit 1
fi
- run:
name: "Pull and run docker image"
command: |
image_name="<<parameters.registry>>/<<parameters.repo>>/<<parameters.op_component>>:<<parameters.docker_tag>>"
docker pull $image_name || exit 1
docker run $image_name <<parameters.op_component>> --version || exit 1
contracts-bedrock-coverage:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
steps:
- checkout
- run:
name: git submodules
command: make submodules
- check-changed:
patterns: contracts-bedrock,op-node
- run:
name: print forge version
command: forge --version
working_directory: packages/contracts-bedrock
# We do not use the pre-built contracts becuase forge coverage uses different optimizer settings
- run:
name: test and generate coverage
command: pnpm coverage:lcov
no_output_timeout: 18m
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
- run:
name: upload coverage
command: codecov --verbose --clean --flags contracts-bedrock-tests
environment:
FOUNDRY_PROFILE: ci
contracts-bedrock-tests:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
steps:
- checkout
- run:
name: git submodules
command: make submodules
- check-changed:
patterns: contracts-bedrock,op-node
- run:
name: print forge version
command: forge --version
working_directory: packages/contracts-bedrock
- run:
name: run tests
command: pnpm test
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
no_output_timeout: 15m
contracts-bedrock-checks:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
steps:
- checkout
- run:
name: git submodules
command: make submodules
- restore_cache:
name: Restore PNPM Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
- attach_workspace: { at: "." }
- check-changed:
patterns: contracts-bedrock,op-node
- setup_remote_docker:
docker_layer_caching: true
# populate node modules from the cache
- run:
name: Install dependencies
command: pnpm install:ci
- run:
name: forge version
command: forge --version
- run:
# The solc warnings check must be the first step to build the contracts, that way the
# warnings are output here. On subsequent runs, forge will read artifacts from the cache
# so warnings would not occur.
name: solc warnings check
command: |
forge build --deny-warnings || echo "export SOLC_WARNINGS_CHECK=1" >> "$BASH_ENV"
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
- run:
# Semver lock must come second because one of the later steps may modify the cache & force a contracts rebuild.
name: semver lock
command: |
pnpm semver-lock
git diff --exit-code semver-lock.json || echo "export SEMVER_LOCK_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: check deploy configs
command: pnpm validate-deploy-configs || echo "export DEPLOY_CONFIGS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: lint
command: |
pnpm lint:check || echo "export LINT_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: gas snapshot
command: |
pnpm gas-snapshot --check || echo "export GAS_SNAPSHOT_STATUS=1" >> "$BASH_ENV"
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
no_output_timeout: 15m
- run:
name: invariant docs
command: |
pnpm autogen:invariant-docs
git diff --exit-code ./invariant-docs/*.md || echo "export INVARIANT_DOCS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: snapshots
command: |
pnpm snapshots:check || echo "export SNAPSHOTS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: size check
command: |
forge build --sizes --skip "/**/test/**" --skip "/**/scripts/**" || echo "export SIZE_CHECK=1" >> "$BASH_ENV"
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
- run:
name: check statuses
command: |
if [[ "$LINT_STATUS" -ne 0 ]]; then
echo "Linting failed, see job output for details."
FAILED=1
fi
if [[ "$SOLC_WARNINGS_CHECK" -ne 0 ]]; then
echo "Solidity emitted warnings, see job output for details."
FAILED=1
fi
if [[ "$GAS_SNAPSHOT_STATUS" -ne 0 ]]; then
echo "Gas snapshot failed, see job output for details."
FAILED=1
fi
if [[ "$SEMVER_LOCK_STATUS" -ne 0 ]]; then
echo "Semver lock failed, see job output for details."
FAILED=1
fi
if [[ "$INVARIANT_DOCS_STATUS" -ne 0 ]]; then
echo "Invariant docs failed, see job output for details."
FAILED=1
fi
if [[ "$DEPLOY_CONFIGS_STATUS" -ne 0 ]]; then
echo "Deploy config check failed, see job output for details."
FAILED=1
fi
if [[ "$SNAPSHOTS_STATUS" -ne 0 ]]; then
echo "Snapshots check failed, see job output for details."
FAILED=1
fi
if [[ "$SIZE_CHECK" -ne 0 ]]; then
echo "Contract(s) exceed size limit, see job output for details."
FAILED=1
fi
if [[ "$FAILED" -ne 0 ]]; then
exit 1
fi
contracts-bedrock-validate-spaces:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: medium
steps:
- checkout
- restore_cache:
name: Restore PNPM Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
- attach_workspace: { at: "." }
- run:
name: Install dependencies
command: pnpm install:ci
- check-changed:
patterns: contracts-bedrock
- run:
name: validate spacers
command: pnpm validate-spacers
working_directory: packages/contracts-bedrock
js-lint-test:
parameters:
package_name:
description: Package name
type: string
dependencies:
description: Regex matching dependent packages
type: string
default: this-package-does-not-exist
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
- restore_cache:
name: Restore PNPM Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
- check-changed:
patterns: <<parameters.package_name>>,<<parameters.dependencies>>
# populate node modules from the cache
- run:
name: Install dependencies
command: pnpm install:ci
- run:
name: Lint
command: pnpm lint && git diff --exit-code
working_directory: packages/<<parameters.package_name>>
- run:
name: Test
command: pnpm test
working_directory: packages/<<parameters.package_name>>
todo-issues:
machine:
image: <<pipeline.parameters.base_image>>
steps:
- checkout
- run:
name: Install ripgrep
command: sudo apt-get install -y ripgrep
- run:
name: Check TODO issues
command: ./ops/scripts/todo-checker.sh --verbose
- notify-failures-on-develop
fuzz-golang:
parameters:
package_name:
description: Go package name
type: string
on_changes:
description: changed pattern to fire fuzzer on
type: string
uses_artifacts:
description: should load in foundry artifacts
type: boolean
default: false
docker:
- image: <<pipeline.parameters.ci_builder_image>>
steps:
- checkout
- check-changed:
patterns: "<<parameters.package_name>>"
- attach_workspace:
at: "."
if: ${{ uses_artifacts }}
- restore_cache:
name: Restore Go modules cache
key: gomod-{{ checksum "go.sum" }}
- restore_cache:
name: Restore Go build cache
key: golang-build-cache
- run:
name: Fuzz
command: make fuzz
working_directory: "<<parameters.package_name>>"
- save_cache:
key: golang-build-cache
paths:
- "/root/.cache/go-build"
l1-geth-version-check:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
steps:
- checkout
- run:
name: "Check L1 geth version"
command: ./ops/scripts/geth-version-checker.sh || (echo "geth version is wrong, update ci-builder"; false)
go-lint:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
steps:
- checkout
- restore_cache:
name: Restore Go modules cache
key: gomod-{{ checksum "go.sum" }}
- restore_cache:
key: golang-build-cache
- restore_cache:
key: golang-lint-cache
- run:
name: run Go linter
command: |
# Identify how many cores it defaults to
golangci-lint --help | grep concurrency
make lint-go
working_directory: .
- save_cache:
key: golang-build-cache
paths:
- "/root/.cache/go-build"
- save_cache:
key: golang-lint-cache
paths:
- "/root/.cache/golangci-lint"
go-test:
parameters:
module:
description: Go Module Name
type: string
docker:
- image: <<pipeline.parameters.ci_builder_image>> # only used to enable codecov.
resource_class: xlarge
steps:
- checkout
- restore_cache:
name: Restore Go modules cache
key: gomod-{{ checksum "go.sum" }}
- restore_cache:
keys:
- golang-build-cache-<<parameters.module>>
- golang-build-cache-
- run:
name: prep results dir
command: mkdir -p /tmp/test-results && mkdir -p /testlogs
- run:
name: run tests
command: |
gotestsum --format=testname --junitfile=/tmp/test-results/<<parameters.module>>.xml --jsonfile=/testlogs/log.json \
-- -parallel=8 -coverpkg=github.com/ethereum-optimism/optimism/... -coverprofile=coverage.out ./...
working_directory: <<parameters.module>>
- save_cache:
key: golang-build-cache-<<parameters.module>>
paths:
- "/root/.cache/go-build"
# TODO(CLI-148): Fix codecov
#- run:
#name: upload coverage
#command: codecov --verbose --clean --flags bedrock-go-tests
- store_test_results:
path: /tmp/test-results
- store_artifacts:
path: /testlogs
when: always
go-e2e-test:
parameters:
variant:
type: string
default: ''
module:
description: Go Module Name
type: string
target:
description: The make target to execute
type: string
parallelism:
description: Number of parallel test runs
type: integer
default: 6
notify:
description: Whether to notify on failure
type: boolean
default: false
mentions:
description: Slack user or group to mention when notifying of failures
type: string
default: ""
environment:
DEVNET_L2OO: 'false'
OP_E2E_USE_L2OO: 'false'
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
parallelism: <<parameters.parallelism>>
steps:
- checkout
- when:
condition:
equal: ['-l2oo', <<parameters.variant>>]
steps:
- run:
name: Set DEVNET_L2OO = true
command: echo 'export DEVNET_L2OO=true' >> $BASH_ENV
- run:
name: Set OP_E2E_USE_L2OO = true
command: echo 'export OP_E2E_USE_L2OO=true' >> $BASH_ENV
- when:
condition:
equal: ['-plasma', <<parameters.variant>>]
steps:
- run:
name: Set OP_E2E_USE_PLASMA = true
command: echo 'export OP_E2E_USE_PLASMA=true' >> $BASH_ENV
- check-changed:
patterns: op-(.+),cannon,contracts-bedrock
- run:
name: prep results dir
command: mkdir -p /tmp/test-results
- restore_cache:
name: Restore Go modules cache
key: gomod-{{ checksum "go.sum" }}
- restore_cache:
name: Restore Go build cache
key: golang-build-cache
- attach_workspace:
at: /tmp/workspace
- run:
name: Load devnet-allocs
command: |
mkdir -p .devnet
cp /tmp/workspace/.devnet<<parameters.variant>>/allocs-l2-delta.json .devnet/allocs-l2-delta.json
cp /tmp/workspace/.devnet<<parameters.variant>>/allocs-l2-ecotone.json .devnet/allocs-l2-ecotone.json
cp /tmp/workspace/.devnet<<parameters.variant>>/allocs-l2-fjord.json .devnet/allocs-l2-fjord.json
cp /tmp/workspace/.devnet<<parameters.variant>>/allocs-l1.json .devnet/allocs-l1.json
cp /tmp/workspace/.devnet<<parameters.variant>>/addresses.json .devnet/addresses.json
cp /tmp/workspace/packages/contracts-bedrock/deploy-config/devnetL1.json packages/contracts-bedrock/deploy-config/devnetL1.json
cp -r /tmp/workspace/packages/contracts-bedrock/deployments/devnetL1 packages/contracts-bedrock/deployments/devnetL1
- run:
name: print go's available MIPS targets
command: go tool dist list | grep mips
- run:
name: run tests
no_output_timeout: 20m
command: |
mkdir -p /testlogs
# The below env var gets overridden when running make test-cannon, but we
# need to explicitly set it here to prevent Cannon from running when we don't
# want it to.
export OP_E2E_CANNON_ENABLED="false"
# Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
JUNIT_FILE=/tmp/test-results/<<parameters.module>>_<<parameters.target>>.xml JSON_LOG_FILE=/testlogs/test.log make <<parameters.target>>
working_directory: <<parameters.module>>
- store_artifacts:
path: /testlogs
when: always
- store_artifacts:
path: /tmp/test-results
when: always
- store_test_results:
path: /tmp/test-results
- when:
condition: "<<parameters.notify>>"
steps:
- notify-failures-on-develop:
mentions: "<<parameters.mentions>>"
go-lint-test-build:
parameters:
binary_name:
description: Binary name to build
type: string
working_directory:
description: Working directory
type: string
build:
description: Whether or not to build the binary
type: boolean
default: true
dependencies:
description: Regex matching dependent packages
type: string
default: this-package-does-not-exist
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: medium
steps:
- checkout
- check-changed:
patterns: <<parameters.working_directory>>,<<parameters.dependencies>>
- run:
name: Lint
command: make lint
working_directory: <<parameters.working_directory>>
- run:
name: Test
command: |
mkdir -p /test-results
gotestsum --format=testname --junitfile /test-results/tests.xml --jsonfile /test-results/log.json -- -parallel=2
working_directory: <<parameters.working_directory>>
- store_test_results:
path: /test-results
- store_artifacts:
path: /testlogs
when: always
- when: