Skip to content

Commit cacf643

Browse files
committed
get records and make a plan
1 parent 6c2b360 commit cacf643

File tree

7 files changed

+174
-60
lines changed

7 files changed

+174
-60
lines changed

packages/cli/src/commands/mirror.ts

Lines changed: 43 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
11
import type { CommandModule, InferredOptionTypes } from "yargs";
2-
import { createClient, getAddress, http } from "viem";
2+
import { createClient, getAddress, http, isHex } from "viem";
33
import chalk from "chalk";
44
import { getChainId } from "viem/actions";
55
import { defaultChains } from "../defaultChains";
66
import { mirror } from "../mirror/mirror";
7+
import { MUDError } from "@latticexyz/common/errors";
8+
import { kmsKeyToAccount } from "@latticexyz/common/kms";
9+
import { privateKeyToAccount } from "viem/accounts";
710

811
const options = {
912
rpcBatch: {
@@ -19,6 +22,10 @@ const options = {
1922
desc: "Source world address to mirror from.",
2023
required: true,
2124
},
25+
fromBlock: {
26+
type: "number",
27+
desc: "Block number of source world deploy.",
28+
},
2229
fromRpc: {
2330
type: "string",
2431
desc: "RPC URL of source chain to mirror from.",
@@ -55,11 +62,36 @@ const commandModule: CommandModule<Options, Options> = {
5562
});
5663
const fromChainId = await getChainId(fromClient);
5764
const fromIndexer = opts.fromIndexer ?? defaultChains.find((chain) => chain.id === fromChainId)?.indexerUrl;
65+
if (!fromIndexer) {
66+
throw new MUDError(`No \`--fromIndexer\` provided or indexer URL configured for chain ${fromChainId}.`);
67+
}
68+
69+
const account = await (async () => {
70+
if (opts.kms) {
71+
const keyId = process.env.AWS_KMS_KEY_ID;
72+
if (!keyId) {
73+
throw new MUDError(
74+
"Missing `AWS_KMS_KEY_ID` environment variable. This is required when using with `--kms` option.",
75+
);
76+
}
77+
78+
return await kmsKeyToAccount({ keyId });
79+
} else {
80+
const privateKey = process.env.PRIVATE_KEY;
81+
if (!isHex(privateKey)) {
82+
throw new MUDError(
83+
`Missing or invalid \`PRIVATE_KEY\` environment variable. To use the default Anvil private key, run\n\n echo "PRIVATE_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" > .env\n`,
84+
);
85+
}
86+
return privateKeyToAccount(privateKey);
87+
}
88+
})();
5889

5990
const toClient = createClient({
6091
transport: http(opts.toRpc, {
6192
batch: opts.rpcBatch ? { batchSize: 100, wait: 1000 } : undefined,
6293
}),
94+
account,
6395
});
6496
const toChainId = await getChainId(toClient);
6597

@@ -71,11 +103,17 @@ const commandModule: CommandModule<Options, Options> = {
71103
),
72104
);
73105

74-
// TODO: load up account from KMS or private key
75-
76106
await mirror({
77-
from: { world: fromWorld, client: fromClient, indexer: fromIndexer },
78-
to: { client: toClient },
107+
rootDir: process.cwd(),
108+
from: {
109+
world: fromWorld,
110+
block: opts.fromBlock != null ? BigInt(opts.fromBlock) : undefined,
111+
client: fromClient,
112+
indexer: fromIndexer,
113+
},
114+
to: {
115+
client: toClient,
116+
},
79117
});
80118
},
81119
};

packages/cli/src/mirror/debug.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import { debug as parentDebug } from "../debug";
2+
3+
export const debug = parentDebug.extend("mirro");
4+
export const error = parentDebug.extend("mirro");
5+
6+
// Pipe debug output to stdout instead of stderr
7+
debug.log = console.debug.bind(console);
8+
9+
// Pipe error output to stderr
10+
error.log = console.error.bind(console);

packages/cli/src/mirror/mirror.ts

Lines changed: 73 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,93 @@
1-
import { Address, Client } from "viem";
1+
import { createWriteStream } from "fs";
2+
import { createGzip } from "zlib";
3+
import { pipeline } from "stream/promises";
4+
import { Account, Address, Chain, Client, Transport } from "viem";
5+
import { getWorldDeploy } from "../deploy/getWorldDeploy";
6+
import { getChainId } from "viem/actions";
7+
import { getTables } from "../deploy/getTables";
8+
import { resourceToLabel } from "@latticexyz/common";
9+
import { getRecordsAsLogs } from "@latticexyz/store-sync";
10+
import pRetry from "p-retry";
11+
import { Table } from "@latticexyz/config";
12+
import path from "path";
213

3-
// TODO: decide if preserving world address is important (chain config has to change anyway)
14+
// TODO: attempt to create world the same way as it was originally created, thus preserving world address
15+
// TODO: set up table to track migrated records with original metadata (block number/timestamp) and for lazy migrations
416

5-
export async function mirror(opts: {
17+
export async function mirror({
18+
rootDir,
19+
from,
20+
to,
21+
}: {
22+
rootDir: string;
623
from: {
24+
block?: bigint;
725
world: Address;
826
client: Client;
9-
indexer?: string;
27+
indexer: string;
1028
};
1129
to: {
12-
client: Client;
30+
client: Client<Transport, Chain | undefined, Account>;
1331
};
1432
}) {
1533
// TODO: check for world balance, warn
16-
//
17-
// TODO: get world salt, factory, and deployer address
18-
// TODO: prepare world deploy, make sure resulting world addresses will match
1934
// TODO: deploy world
2035
//
21-
// TODO: set up a table to track migrated records? would make this idempotent and enable lazy mirroring
22-
//
36+
2337
// TODO: fetch data from indexer
2438
// TODO: check each system for state/balance, warn
2539
//
2640
// TODO: set records for each table
2741
//
2842
// TODO: deploy each system via original bytecode
2943
// TODO: update system addresses as necessary (should this be done as part of setting records?)
44+
//
45+
const fromChainId = await getChainId(from.client);
46+
const toChainId = await getChainId(to.client);
47+
48+
const plan = createPlan(path.join(rootDir, "mud-mirror-plan.json.gz"));
49+
plan.add("mirror", { from: { chainId: fromChainId, world: from.world }, to: { chainId: toChainId } });
50+
51+
const worldDeploy = await getWorldDeploy(from.client, from.world, from.block);
52+
const tables = await getTables({
53+
client: from.client,
54+
worldDeploy,
55+
indexerUrl: from.indexer,
56+
chainId: fromChainId,
57+
});
58+
59+
for (const table of tables) {
60+
const logs = await pRetry(() =>
61+
getRecordsAsLogs<Table>({
62+
worldAddress: from.world,
63+
table: table as never,
64+
client: from.client,
65+
indexerUrl: from.indexer,
66+
chainId: fromChainId,
67+
}),
68+
);
69+
console.log("got", logs.length, "logs for", resourceToLabel(table));
70+
for (const log of logs) {
71+
plan.add("setRecord", log.args);
72+
}
73+
}
74+
75+
await plan.end();
76+
}
77+
78+
function createPlan(filename: string) {
79+
const gzip = createGzip();
80+
const fileStream = createWriteStream(filename);
81+
const output = pipeline(gzip, fileStream);
82+
gzip.write("[\n");
83+
return {
84+
add(step: string, data: any) {
85+
gzip.write(JSON.stringify({ step, data }) + ",\n");
86+
return this;
87+
},
88+
async end() {
89+
gzip.end("]\n");
90+
await output;
91+
},
92+
};
3093
}

packages/store-sync/src/exports/internal.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
1+
export * from "../getRecords";
2+
13
// SQL
2-
export * from "../sql";
4+
export * from "../sql/common";
5+
export * from "../sql/fetchRecords";
6+
export * from "../sql/selectFrom";
7+
export * from "../sql/getSnapshot";
38

49
// Stash
510
export * from "../stash/common";

packages/store-sync/src/getRecords.ts

Lines changed: 41 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -43,46 +43,48 @@ type GetRecordsResult<table extends Table = Table> = {
4343
export async function getRecords<table extends Table>(
4444
options: GetRecordsOptions<table>,
4545
): Promise<GetRecordsResult<table>> {
46-
async function getLogs(): Promise<readonly StorageAdapterLog[]> {
47-
if (options.indexerUrl && options.chainId) {
48-
debug("fetching records for", options.table.label, "via indexer from", options.indexerUrl);
49-
const logs = await getSnapshot({
50-
chainId: options.chainId,
51-
address: options.worldAddress,
52-
indexerUrl: options.indexerUrl,
53-
filters: [{ tableId: options.table.tableId }],
54-
});
55-
// By default, the indexer includes the `store.Tables` table as part of the snapshot.
56-
// Once we change this default, we can remove the filter here.
57-
// See https://github.com/latticexyz/mud/issues/3386.
58-
return logs?.logs.filter((log) => log.args.tableId === options.table.tableId) ?? [];
59-
} else {
60-
const client =
61-
options.client ??
62-
createClient({
63-
transport: http(options.rpcUrl),
64-
});
65-
debug("fetching records for", options.table.label, "via RPC from", client.transport.url);
66-
const blockLogs = await fetchBlockLogs({
67-
fromBlock: options.fromBlock ?? 0n,
68-
toBlock: options.toBlock ?? (await getAction(client, getBlockNumber, "getBlockNumber")({})),
69-
maxBlockRange: 100_000n,
70-
async getLogs({ fromBlock, toBlock }) {
71-
return getStoreLogs(client, {
72-
address: options.worldAddress,
73-
fromBlock,
74-
toBlock,
75-
tableId: options.table.tableId,
76-
});
77-
},
78-
});
79-
return flattenStoreLogs(blockLogs.flatMap((block) => block.logs));
80-
}
81-
}
82-
83-
const logs = await getLogs();
84-
const records = logs.map((log) => logToRecord({ log: log as LogToRecordArgs<table>["log"], table: options.table }));
46+
const logs = await getRecordsAsLogs(options);
47+
const records = logs.map((log) => logToRecord({ log: log as never, table: options.table }));
8548
const blockNumber = logs.length > 0 ? logs[logs.length - 1].blockNumber ?? 0n : 0n;
8649
debug("found", records.length, "records for table", options.table.label, "at block", blockNumber);
8750
return { records, blockNumber };
8851
}
52+
53+
export async function getRecordsAsLogs<table extends Table>(
54+
options: GetRecordsOptions<table>,
55+
): Promise<readonly Extract<StorageAdapterLog, { eventName: "Store_SetRecord" }>[]> {
56+
if (options.indexerUrl && options.chainId) {
57+
debug("fetching records for", options.table.label, "via indexer from", options.indexerUrl);
58+
const snapshot = await getSnapshot({
59+
chainId: options.chainId,
60+
address: options.worldAddress,
61+
indexerUrl: options.indexerUrl,
62+
filters: [{ tableId: options.table.tableId }],
63+
});
64+
// By default, the indexer includes the `store.Tables` table as part of the snapshot.
65+
// Once we change this default, we can remove the filter here.
66+
// See https://github.com/latticexyz/mud/issues/3386.
67+
return (snapshot?.logs.filter((log) => log.args.tableId === options.table.tableId) ?? []) as never;
68+
}
69+
70+
const client =
71+
options.client ??
72+
createClient({
73+
transport: http(options.rpcUrl),
74+
});
75+
debug("fetching records for", options.table.label, "via RPC from", client.transport.url);
76+
const blockLogs = await fetchBlockLogs({
77+
fromBlock: options.fromBlock ?? 0n,
78+
toBlock: options.toBlock ?? (await getAction(client, getBlockNumber, "getBlockNumber")({})),
79+
maxBlockRange: 100_000n,
80+
async getLogs({ fromBlock, toBlock }) {
81+
return getStoreLogs(client, {
82+
address: options.worldAddress,
83+
fromBlock,
84+
toBlock,
85+
tableId: options.table.tableId,
86+
});
87+
},
88+
});
89+
return flattenStoreLogs(blockLogs.flatMap((block) => block.logs));
90+
}

packages/store-sync/src/sql/getSnapshot.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ export async function getSnapshot({
4949
});
5050

5151
if (logsFilters && logsFilters.length === 0) {
52-
return undefined;
52+
return;
5353
}
5454

5555
return getSnapshotLogs({

packages/store-sync/src/sql/index.ts

Lines changed: 0 additions & 4 deletions
This file was deleted.

0 commit comments

Comments
 (0)