diff --git a/.env.aws.template b/.env.aws.template index f9b85ffb46..1bcab5cb48 100644 --- a/.env.aws.template +++ b/.env.aws.template @@ -15,7 +15,8 @@ DOMAIN_NAME= # These variables need to be specified but have default values PROVIDER=aws -PREFIX="e2b-" # prefix identifier for all resources +# prefix identifier for all resources +PREFIX=e2b- # prod, staging, dev TERRAFORM_ENVIRONMENT=dev diff --git a/.env.gcp.template b/.env.gcp.template index 5c47427fa2..6c5bced521 100644 --- a/.env.gcp.template +++ b/.env.gcp.template @@ -20,7 +20,8 @@ POSTGRES_CONNECTION_STRING= # These variables need to be specified but have default values PROVIDER=gcp -PREFIX="e2b-" # prefix identifier for all resources +# prefix identifier for all resources +PREFIX=e2b- # prod, staging, dev TERRAFORM_ENVIRONMENT=dev diff --git a/Makefile b/Makefile index cbc901f2ea..ff129ddaf4 100644 --- a/Makefile +++ b/Makefile @@ -108,8 +108,8 @@ copy-public-builds: ifeq ($(PROVIDER),aws) mkdir -p ./.kernels mkdir -p ./.firecrackers - gsutil -m cp -r gs://e2b-prod-public-builds/kernels/* ./.kernels/ - gsutil -m cp -r gs://e2b-prod-public-builds/firecrackers/* ./.firecrackers/ + aws s3 cp s3://e2b-prod-public-builds/kernels/ ./.kernels/ --recursive --no-sign-request --endpoint-url https://storage.googleapis.com + aws s3 cp s3://e2b-prod-public-builds/firecrackers/ ./.firecrackers/ --recursive --no-sign-request --endpoint-url https://storage.googleapis.com aws s3 cp ./.kernels/ s3://${AWS_BUCKET_PREFIX}fc-kernels/ --recursive --profile ${AWS_PROFILE} aws s3 cp ./.firecrackers/ s3://${AWS_BUCKET_PREFIX}fc-versions/ --recursive --profile ${AWS_PROFILE} rm -rf ./.kernels diff --git a/iac/provider-aws/Makefile b/iac/provider-aws/Makefile index ab8cbe3ac5..567cabf87d 100644 --- a/iac/provider-aws/Makefile +++ b/iac/provider-aws/Makefile @@ -51,7 +51,12 @@ switch: .PHONY: init init: # Create S3 bucket for Terraform state if it doesn't exist - aws s3api create-bucket --bucket $(TERRAFORM_STATE_BUCKET) --region $(TEMPLATE_BUCKET_LOCATION) --profile $(AWS_PROFILE) --create-bucket-configuration LocationConstraint=$(TEMPLATE_BUCKET_LOCATION) >/dev/null 2>&1 || true + # us-east-1 requires omitting LocationConstraint; all other regions require it + @if [ "$(TEMPLATE_BUCKET_LOCATION)" = "us-east-1" ]; then \ + aws s3api create-bucket --bucket $(TERRAFORM_STATE_BUCKET) --region $(TEMPLATE_BUCKET_LOCATION) --profile $(AWS_PROFILE) 2>/dev/null || true; \ + else \ + aws s3api create-bucket --bucket $(TERRAFORM_STATE_BUCKET) --region $(TEMPLATE_BUCKET_LOCATION) --profile $(AWS_PROFILE) --create-bucket-configuration LocationConstraint=$(TEMPLATE_BUCKET_LOCATION) 2>/dev/null || true; \ + fi $(tf_vars) $(TF) init -upgrade -reconfigure -backend-config="bucket=${TERRAFORM_STATE_BUCKET}" $(tf_vars) $(TF) apply -target=module.init -input=false -compact-warnings diff --git a/self-host.md b/self-host.md index f7274a3070..e7f5d11b20 100644 --- a/self-host.md +++ b/self-host.md @@ -81,7 +81,7 @@ Check if you can use config for terraform state management > Get Supabase JWT Secret: go to the [Supabase dashboard](https://supabase.com/dashboard) -> Select your Project -> Project Settings -> Data API -> JWT Settings - e2b-posthog-api-key (optional, for monitoring) 9. Run `make plan-without-jobs` and then `make apply` -10. Run `make plan` and then `make apply`. Note: This will work after the TLS certificates was issued. It can take some time; you can check the status in the Google Cloud Console +10. Run `make plan` and then `make apply`. Note: This will work after the TLS certificates was issued. It can take some time; you can check the status in the Google Cloud Console. Database migrations run automatically via the API's db-migrator task. 11. Setup data in the cluster by running `make prep-cluster` in `packages/shared` to create an initial user, team, and build a base template. - You can also run `make seed-db` in `packages/db` to create more users and teams. @@ -140,16 +140,16 @@ Now, you should see the right quota options in `All Quotas` and be able to reque - `{prefix}supabase-jwt-secrets` - Supabase JWT secret (optional / required for the [E2B dashboard](https://github.com/e2b-dev/dashboard)) - `{prefix}grafana` - JSON with `API_KEY`, `OTLP_URL`, `OTEL_COLLECTOR_TOKEN`, `USERNAME` keys (optional, for monitoring) - `{prefix}launch-darkly-api-key` - LaunchDarkly SDK key (optional, for feature flags) -6. Build Packer AMIs for the cluster nodes: +6. Build the Packer AMI for cluster nodes (a single shared AMI used by all node types): ```sh - cd iac/provider-aws/packer - # Build AMIs for control server, API, client, clickhouse, and build nodes + cd iac/provider-aws/nomad-cluster-disk-image + make init # install Packer plugins + make build # build the AMI (~5 min, launches a t3.large) ``` 7. Run `make build-and-upload` to build and push container images and binaries 8. Run `make copy-public-builds` to copy Firecracker kernels and rootfs to your S3 buckets - > This requires `gsutil` to download from the public GCS bucket and `aws` CLI to upload to your S3 buckets 9. Run `make plan-without-jobs` and then `make apply` to provision the cluster infrastructure -10. Run `make plan` and then `make apply` to deploy all Nomad jobs +10. Run `make plan` and then `make apply` to deploy all Nomad jobs (this also runs database migrations automatically via the API's db-migrator task) 11. Setup data in the cluster by running `make prep-cluster` in `packages/shared` to create an initial user, team, and build a base template ### AWS Architecture