diff --git a/csv-to-datasheet.js b/csv-to-datasheet.js new file mode 100644 index 0000000..8683bd4 --- /dev/null +++ b/csv-to-datasheet.js @@ -0,0 +1,73 @@ +const fs = require("fs"); +const parser = require("csv-parse/sync"); +const process = require("process"); +const yargs = require("yargs"); + +const args = yargs(process.argv.slice(2)) + .command( + "$0 ", + "Convert a module art CSV file to JSON", + () => { + yargs + .positional("filename", { + describe: `Input is a CSV with the following data in each column: + - ignored + - key (string) + - Portrait path + - Thumbnail path + - Token path + - Subject path + - label (string) + - source book (string) + - scale (num) + - ancestry tags (csv string => array) + - category tags (csv string => array) + - equipment tags (csv string => array) + - feature tags (csv string => array) + - family tags (csv string => array) + - special tags (csv string => array) + ` + }); + } + ) + .usage("Usage: node $0 ") + .check((args) => typeof args.filename === "string" && + fs.existsSync(args.filename) && + fs.statSync(args.filename).isFile()) + .help(false) + .version(false) + .parseSync(); + +let jsonData = []; +const csvData = fs.readFileSync(args.filename, { encoding: "utf-8" }); +const data = parser + .parse(csvData) + .slice(1) + .map((row) => ({ + "label": row[6], + "key": row[1], + "source": row[7], + "art": { + "portrait" : row[2], + "thumb" : row[3], + "token" : row[4], + "subject" : row[5], + "scale" : Number(row[8]) || 1, + }, + "tags": { + "ancestry" : row[9] ? row[9].toLowerCase().split(",") : undefined, + "category" : row[10] ? row[10].toLowerCase().split(",") : undefined, + "equipment" : row[11] ? row[11].toLowerCase().split(",") : undefined, + "features" : row[12] ? row[12].toLowerCase().split(",") : undefined, + "family" : row[13] ? row[13].toLowerCase().split(",") : undefined, + "special" : row[14] ? row[14].toLowerCase().split(",") : undefined, + }, + })) + .map((element) => { + for (const group in element.tagGroups) { + if ( element.tagGroups[group].tags === undefined) { delete element.tagGroups[group] } + } + jsonData.push(element) + }) + +fs.writeFileSync(args.filename.replace(/\.csv$/, ".json"), JSON.stringify(jsonData, null, 2), { encoding: "utf-8" }); diff --git a/csv-to-json.js b/csv-to-json.js deleted file mode 100644 index 8872ee0..0000000 --- a/csv-to-json.js +++ /dev/null @@ -1,45 +0,0 @@ -const fs = require("fs"); -const parser = require("csv-parse/sync"); -const process = require("process"); -const yargs = require("yargs"); - -const args = yargs(process.argv.slice(2)) - .command( - "$0 ", - "Convert a module art CSV file to JSON", - () => { - yargs - .positional("filename", { - describe: "A CSV filename. The first column of the CSV is ignored. Columns 2-6 should be compendium ID, actor ID, actor image path, token image path, and an optional scale-ratio antecedent (consequent of 1), and an optional boolean indicating whether random images are to be enabled.", - }); - } - ) - .usage("Usage: node $0 ") - .check((args) => typeof args.filename === "string" && - fs.existsSync(args.filename) && - fs.statSync(args.filename).isFile()) - .help(false) - .version(false) - .parseSync(); - -const csvData = fs.readFileSync(args.filename, { encoding: "utf-8" }); -const jsonData = parser - .parse(csvData) - .slice(1) - .map((row) => ({ - pack: row[1], - id: row[2], - actor: row[3], - token: - row[5].trim() || row[6] - ? { img: row[4], scale: Number(row[5]) || undefined, randomImg: !!row[6] || undefined } - : row[4], - randomImg: !!row[6], - })) - .reduce((accum, row) => { - accum[row.pack] ??= {}; - accum[row.pack][row.id] = { actor: row.actor, token: row.token }; - return accum; - }, {}); - -fs.writeFileSync(args.filename.replace(/\.csv$/, ".json"), JSON.stringify(jsonData, null, 2), { encoding: "utf-8" }); diff --git a/csv-to-map.js b/csv-to-map.js new file mode 100644 index 0000000..bf646f3 --- /dev/null +++ b/csv-to-map.js @@ -0,0 +1,84 @@ +const fs = require("fs"); +const parser = require("csv-parse/sync"); +const process = require("process"); +const yargs = require("yargs"); + +function createTokenObject(token) { + + // Exception catcher for older token packs that fixes the mapping for oversized smalls if it can find them + // Ideally we should try to save the two scales separately rather than "inferring" the new value like this + switch( token.ring.subject.scale ) { + case 1.2: + token.ring.subject.scale = 1.5 + break + case 1.6: + token.ring.subject.scale = 2 + break + default: + break + } + + return token +} + +const args = yargs(process.argv.slice(2)) + .command( + "$0 ", + "Convert a module art CSV file to JSON", + () => { + yargs + .positional("filename", { + describe: `A CSV file. The first row is ignored since it's assumed to contain column headers. + Subsequent rows should each correspond to a single creature and contain the following data in each column: + A (0) - Label/name nb: this field is ignored + B (1) - Compendium ID nb: includes the system prefix, eg 'pf2e.pathfinder-bestiary' + C (2) - Actor ID nb: short form, eg 'Z7xWkQKCHGyd02B1' + D (3) - Portrait image path + E (4) - Token image path + F (5) - Subject image path + G (6) - Optional scale value empty => default (1) + H (7) - Optional boolean indicating whether random images are to be enabled empty/undefined => false + I (8) - Optional boolean indicating whether to enable dynamic token ring empty/undefined => true (it's recommended this should be enabled if there is subject artwork) + ` + }); + } + ) + .usage("Usage: node $0 ") + .check((args) => typeof args.filename === "string" && + fs.existsSync(args.filename) && + fs.statSync(args.filename).isFile()) + .help(false) + .version(false) + .parseSync(); + +const csvData = fs.readFileSync(args.filename, { encoding: "utf-8" }); +const jsonData = parser + .parse(csvData) + .slice(1) + .map((row) => ({ + pack: row[1], + id: row[2], + actor: row[3], + token: { + randomImg: (row[7]==="TRUE") ? true : false, + texture: { + src: row[4], + scaleX: Number(row[6]) || undefined, + scaleY: Number(row[6]) || undefined, + }, + ring: { + enabled: (row[8]==="FALSE") ? false : true, + subject: { + texture: row[5] || undefined, + scale: Number(row[6]) || undefined + } + } + }, + })) + .reduce((accum, row) => { + accum[row.pack] ??= {}; + accum[row.pack][row.id] = { actor: row.actor, token: createTokenObject(row.token) }; + return accum; + }, {}); + +fs.writeFileSync(args.filename.replace(/\.csv$/, ".json"), JSON.stringify(jsonData, null, 2), { encoding: "utf-8" }); \ No newline at end of file diff --git a/readme.txt b/readme.txt new file mode 100644 index 0000000..04fc706 --- /dev/null +++ b/readme.txt @@ -0,0 +1,8 @@ +Script supports two functions, which generate a mapping file or a datasheet respectively. +They are run with the following commands: + +node csv-to-map.js +node csv-to-datasheet.js + + = A CSV file +The expected CSV formats are detailed in the respective files \ No newline at end of file