Skip to content

Commit 0a4342f

Browse files
committed
do migrations directly in instrumentation file
1 parent 8c1517e commit 0a4342f

File tree

4 files changed

+110
-103
lines changed

4 files changed

+110
-103
lines changed

apps/web/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ COPY . .
1010
RUN corepack enable pnpm
1111
RUN --mount=type=cache,id=pnpm,target=/root/.local/share/pnpm/store pnpm i --frozen-lockfile
1212

13-
ARG DOCKER_BUILD=true
13+
ARG NEXT_PUBLIC_DOCKER_BUILD=true
1414
ENV NEXT_PUBLIC_WEB_URL=http://localhost:3000
1515
ENV NEXT_PUBLIC_CAP_AWS_BUCKET=capso
1616
ENV NEXT_PUBLIC_CAP_AWS_REGION=us-east-1

apps/web/instrumentation.node.ts

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
// This file is used to run database migrations in the docker builds or other self hosting environments.
2+
// It is not suitable (a.k.a DEADLY) for serverless environments where the server will be restarted on each request.
3+
//
4+
import { db } from "@cap/database";
5+
import { migrate } from "drizzle-orm/mysql2/migrator";
6+
import path from "path";
7+
import { buildEnv, serverEnv } from "@cap/env";
8+
import {
9+
BucketAlreadyOwnedByYou,
10+
CreateBucketCommand,
11+
PutBucketPolicyCommand,
12+
S3Client,
13+
} from "@aws-sdk/client-s3";
14+
15+
export async function register() {
16+
console.log("Waiting 5 seconds to run migrations");
17+
18+
// Function to trigger migrations with retry logic
19+
const triggerMigrations = async (retryCount = 0, maxRetries = 3) => {
20+
try {
21+
await runMigrations();
22+
} catch (error) {
23+
console.error(
24+
`🚨 Error triggering migrations (attempt ${retryCount + 1}):`,
25+
error
26+
);
27+
if (retryCount < maxRetries - 1) {
28+
console.log(
29+
`🔄 Retrying in 5 seconds... (${retryCount + 1}/${maxRetries})`
30+
);
31+
setTimeout(() => triggerMigrations(retryCount + 1, maxRetries), 5000);
32+
} else {
33+
console.error(`🚨 All ${maxRetries} migration attempts failed.`);
34+
process.exit(1); // Exit with error code if all attempts fail
35+
}
36+
}
37+
};
38+
39+
// Add a timeout to trigger migrations after 5 seconds on server start
40+
setTimeout(() => triggerMigrations(), 5000);
41+
42+
setTimeout(() => createS3Bucket(), 5000);
43+
}
44+
45+
async function createS3Bucket() {
46+
const s3Client = new S3Client({
47+
endpoint: serverEnv().CAP_AWS_ENDPOINT,
48+
region: serverEnv().CAP_AWS_REGION,
49+
credentials: {
50+
accessKeyId: serverEnv().CAP_AWS_ACCESS_KEY ?? "",
51+
secretAccessKey: serverEnv().CAP_AWS_SECRET_KEY ?? "",
52+
},
53+
forcePathStyle: serverEnv().S3_PATH_STYLE,
54+
});
55+
56+
await s3Client
57+
.send(new CreateBucketCommand({ Bucket: serverEnv().CAP_AWS_BUCKET }))
58+
.then(() => {
59+
console.log("Created S3 bucket");
60+
return s3Client.send(
61+
new PutBucketPolicyCommand({
62+
Bucket: serverEnv().CAP_AWS_BUCKET,
63+
Policy: JSON.stringify({
64+
Version: "2012-10-17",
65+
Statement: [
66+
{
67+
Effect: "Allow",
68+
Principal: "*",
69+
Action: ["s3:GetObject"],
70+
Resource: [`arn:aws:s3:::${serverEnv().CAP_AWS_BUCKET}/*`],
71+
},
72+
],
73+
}),
74+
})
75+
);
76+
})
77+
.then(() => {
78+
console.log("Configured S3 buckeet");
79+
})
80+
.catch((e) => {
81+
if (e instanceof BucketAlreadyOwnedByYou) {
82+
console.log("Found existing S3 bucket");
83+
return;
84+
}
85+
});
86+
}
87+
88+
async function runMigrations() {
89+
const isDockerBuild = buildEnv.NEXT_PUBLIC_DOCKER_BUILD === "true";
90+
if (isDockerBuild) {
91+
try {
92+
console.log("🔍 DB migrations triggered");
93+
console.log("💿 Running DB migrations...");
94+
95+
await migrate(db() as any, {
96+
migrationsFolder: path.join(process.cwd(), "/migrations"),
97+
});
98+
console.log("💿 Migrations run successfully!");
99+
} catch (error) {
100+
console.error("🚨 MIGRATION_FAILED", { error });
101+
throw error;
102+
}
103+
}
104+
}

apps/web/instrumentation.ts

Lines changed: 2 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -1,104 +1,6 @@
1-
// This file is used to run database migrations in the docker builds or other self hosting environments.
2-
// It is not suitable (a.k.a DEADLY) for serverless environments where the server will be restarted on each request.
3-
4-
import { serverEnv } from "@cap/env";
5-
import {
6-
BucketAlreadyOwnedByYou,
7-
CreateBucketCommand,
8-
PutBucketPolicyCommand,
9-
S3Client,
10-
} from "@aws-sdk/client-s3";
11-
121
export async function register() {
132
if (process.env.NEXT_RUNTIME === "nodejs") {
14-
console.log("Waiting 5 seconds to run migrations");
15-
16-
// Function to trigger migrations with retry logic
17-
const triggerMigrations = async (retryCount = 0, maxRetries = 3) => {
18-
try {
19-
const response = await fetch(
20-
`${serverEnv().WEB_URL}/api/selfhosted/migrations`,
21-
{
22-
method: "POST",
23-
headers: {
24-
"Content-Type": "application/json",
25-
},
26-
}
27-
);
28-
29-
// This will throw an error if the response status is not ok
30-
response.ok ||
31-
(await Promise.reject(new Error(`HTTP error ${response.status}`)));
32-
33-
const responseData = await response.json();
34-
console.log(
35-
"✅ Migrations triggered successfully:",
36-
responseData.message
37-
);
38-
} catch (error) {
39-
console.error(
40-
`🚨 Error triggering migrations (attempt ${retryCount + 1}):`,
41-
error
42-
);
43-
if (retryCount < maxRetries - 1) {
44-
console.log(
45-
`🔄 Retrying in 5 seconds... (${retryCount + 1}/${maxRetries})`
46-
);
47-
setTimeout(() => triggerMigrations(retryCount + 1, maxRetries), 5000);
48-
} else {
49-
console.error(`🚨 All ${maxRetries} migration attempts failed.`);
50-
process.exit(1); // Exit with error code if all attempts fail
51-
}
52-
}
53-
};
54-
55-
// Add a timeout to trigger migrations after 5 seconds on server start
56-
setTimeout(() => triggerMigrations(), 5000);
57-
58-
setTimeout(() => createS3Bucket(), 5000);
3+
const { register } = await import("./instrumentation.node");
4+
await register();
595
}
60-
return;
61-
}
62-
63-
async function createS3Bucket() {
64-
const s3Client = new S3Client({
65-
endpoint: serverEnv().CAP_AWS_ENDPOINT,
66-
region: serverEnv().CAP_AWS_REGION,
67-
credentials: {
68-
accessKeyId: serverEnv().CAP_AWS_ACCESS_KEY ?? "",
69-
secretAccessKey: serverEnv().CAP_AWS_SECRET_KEY ?? "",
70-
},
71-
forcePathStyle: serverEnv().S3_PATH_STYLE,
72-
});
73-
74-
await s3Client
75-
.send(new CreateBucketCommand({ Bucket: serverEnv().CAP_AWS_BUCKET }))
76-
.then(() => {
77-
console.log("Created S3 bucket");
78-
return s3Client.send(
79-
new PutBucketPolicyCommand({
80-
Bucket: serverEnv().CAP_AWS_BUCKET,
81-
Policy: JSON.stringify({
82-
Version: "2012-10-17",
83-
Statement: [
84-
{
85-
Effect: "Allow",
86-
Principal: "*",
87-
Action: ["s3:GetObject"],
88-
Resource: [`arn:aws:s3:::${serverEnv().CAP_AWS_BUCKET}/*`],
89-
},
90-
],
91-
}),
92-
})
93-
);
94-
})
95-
.then(() => {
96-
console.log("Configured S3 buckeet");
97-
})
98-
.catch((e) => {
99-
if (e instanceof BucketAlreadyOwnedByYou) {
100-
console.log("Found existing S3 bucket");
101-
return;
102-
}
103-
});
1046
}

apps/web/next.config.mjs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const nextConfig = {
2121
ignoreBuildErrors: true,
2222
},
2323
experimental: {
24-
instrumentationHook: process.env.DOCKER_BUILD === "true",
24+
instrumentationHook: process.env.NEXT_PUBLIC_DOCKER_BUILD === "true",
2525
optimizePackageImports: ["@cap/ui", "@cap/utils", "@cap/web-api-contract"],
2626
serverComponentsExternalPackages: [
2727
"@react-email/components",
@@ -102,7 +102,8 @@ const nextConfig = {
102102
appVersion: version,
103103
},
104104
// If the DOCKER_BUILD environment variable is set to true, we are output nextjs to standalone ready for docker deployment
105-
output: process.env.DOCKER_BUILD === "true" ? "standalone" : undefined,
105+
output:
106+
process.env.NEXT_PUBLIC_DOCKER_BUILD === "true" ? "standalone" : undefined,
106107
};
107108

108109
export default nextConfig;

0 commit comments

Comments
 (0)