diff --git a/apps/obsidian/package.json b/apps/obsidian/package.json index 6b70f9052..46a2f46c7 100644 --- a/apps/obsidian/package.json +++ b/apps/obsidian/package.json @@ -22,6 +22,7 @@ "@types/node": "^20", "@types/react": "catalog:obsidian", "@types/react-dom": "catalog:obsidian", + "@types/mime-types": "3.0.1", "autoprefixer": "^10.4.21", "builtin-modules": "3.3.0", "dotenv": "^16.4.5", @@ -41,10 +42,11 @@ "@repo/utils": "workspace:*", "@supabase/supabase-js": "catalog:", "date-fns": "^4.1.0", + "mime-types": "^3.0.1", "nanoid": "^4.0.2", "react": "catalog:obsidian", "react-dom": "catalog:obsidian", "tailwindcss-animate": "^1.0.7", "tldraw": "3.14.2" } -} \ No newline at end of file +} diff --git a/apps/obsidian/src/utils/publishNode.ts b/apps/obsidian/src/utils/publishNode.ts index 10f79bcc9..83a87c196 100644 --- a/apps/obsidian/src/utils/publishNode.ts +++ b/apps/obsidian/src/utils/publishNode.ts @@ -1,6 +1,8 @@ import type { FrontMatterCache, TFile } from "obsidian"; import type { default as DiscourseGraphPlugin } from "~/index"; import { getLoggedInClient, getSupabaseContext } from "./supabaseContext"; +import { addFile } from "@repo/database/lib/files"; +import mime from "mime-types"; export const publishNode = async ({ plugin, @@ -26,21 +28,88 @@ export const publishNode = async ({ if (!myGroup) throw new Error("Cannot get group"); const existingPublish = (frontmatter.publishedToGroups as undefined | string[]) || []; - if (existingPublish.includes(myGroup)) return; // already published - const publishResponse = await client.from("ResourceAccess").insert({ - /* eslint-disable @typescript-eslint/naming-convention */ - account_uid: myGroup, - source_local_id: nodeId, - space_id: spaceId, - /* eslint-enable @typescript-eslint/naming-convention */ - }); + const idResponse = await client + .from("Content") + .select("last_modified") + .eq("source_local_id", nodeId) + .eq("space_id", spaceId) + .eq("variant", "full") + .maybeSingle(); + if (idResponse.error || !idResponse.data) { + throw idResponse.error || new Error("no data while fetching node"); + } + const lastModifiedDb = new Date( + idResponse.data.last_modified + "Z", + ).getTime(); + const embeds = plugin.app.metadataCache.getFileCache(file)?.embeds ?? []; + const attachments = embeds + .map(({ link }) => { + const attachment = plugin.app.metadataCache.getFirstLinkpathDest( + link, + file.path, + ); + if (attachment === null) { + console.warn("Could not find file for " + link); + } + return attachment; + }) + .filter((a) => !!a); + const lastModified = Math.max( + file.stat.mtime, + ...attachments.map((a) => a.stat.mtime), + ); + + if (existingPublish.includes(myGroup) && lastModified <= lastModifiedDb) + return; // already published + const publishResponse = await client.from("ResourceAccess").upsert( + { + /* eslint-disable @typescript-eslint/naming-convention */ + account_uid: myGroup, + source_local_id: nodeId, + space_id: spaceId, + /* eslint-enable @typescript-eslint/naming-convention */ + }, + { ignoreDuplicates: true }, + ); if (publishResponse.error && publishResponse.error.code !== "23505") // 23505 is duplicate key, which counts as a success. throw publishResponse.error; - await plugin.app.fileManager.processFrontMatter( - file, - (fm: Record) => { - fm.publishedToGroups = [...existingPublish, myGroup]; - }, - ); + + const existingFiles: string[] = []; + for (const attachment of attachments) { + const mimetype = mime.lookup(attachment.path) || "application/octet-stream"; + if (mimetype.startsWith("text/")) continue; + existingFiles.push(attachment.path); + const content = await plugin.app.vault.readBinary(attachment); + await addFile({ + client, + spaceId, + sourceLocalId: nodeId, + fname: attachment.path, + mimetype, + created: new Date(attachment.stat.ctime), + lastModified: new Date(attachment.stat.mtime), + content, + }); + } + let cleanupCommand = client + .from("FileReference") + .delete() + .eq("space_id", spaceId) + .eq("source_local_id", nodeId); + if (existingFiles.length) + cleanupCommand = cleanupCommand.notIn("filepath", [ + ...new Set(existingFiles), + ]); + const cleanupResult = await cleanupCommand; + // do not fail on cleanup + if (cleanupResult.error) console.error(cleanupResult.error); + + if (!existingPublish.includes(myGroup)) + await plugin.app.fileManager.processFrontMatter( + file, + (fm: Record) => { + fm.publishedToGroups = [...existingPublish, myGroup]; + }, + ); }; diff --git a/packages/database/package.json b/packages/database/package.json index d7887bb1a..7e7e5cf44 100644 --- a/packages/database/package.json +++ b/packages/database/package.json @@ -45,6 +45,7 @@ "@repo/utils": "workspace:*", "@supabase/auth-js": "catalog:", "@supabase/functions-js": "catalog:", + "@supabase/storage-js": "catalog:", "@supabase/supabase-js": "catalog:", "tslib": "2.5.1" }, diff --git a/packages/database/src/dbTypes.ts b/packages/database/src/dbTypes.ts index af239752b..51aac6d6a 100644 --- a/packages/database/src/dbTypes.ts +++ b/packages/database/src/dbTypes.ts @@ -511,6 +511,70 @@ export type Database = { }, ] } + file_gc: { + Row: { + filehash: string + } + Insert: { + filehash: string + } + Update: { + filehash?: string + } + Relationships: [] + } + FileReference: { + Row: { + created: string + filehash: string + filepath: string + last_modified: string + source_local_id: string + space_id: number + variant: Database["public"]["Enums"]["ContentVariant"] | null + } + Insert: { + created: string + filehash: string + filepath: string + last_modified: string + source_local_id: string + space_id: number + variant?: Database["public"]["Enums"]["ContentVariant"] | null + } + Update: { + created?: string + filehash?: string + filepath?: string + last_modified?: string + source_local_id?: string + space_id?: number + variant?: Database["public"]["Enums"]["ContentVariant"] | null + } + Relationships: [ + { + foreignKeyName: "FileReference_content_fkey" + columns: ["space_id", "source_local_id", "variant"] + isOneToOne: false + referencedRelation: "Content" + referencedColumns: ["space_id", "source_local_id", "variant"] + }, + { + foreignKeyName: "FileReference_content_fkey" + columns: ["space_id", "source_local_id", "variant"] + isOneToOne: false + referencedRelation: "my_contents" + referencedColumns: ["space_id", "source_local_id", "variant"] + }, + { + foreignKeyName: "FileReference_content_fkey" + columns: ["space_id", "source_local_id", "variant"] + isOneToOne: false + referencedRelation: "my_contents_with_embedding_openai_text_embedding_3_small_1536" + referencedColumns: ["space_id", "source_local_id", "variant"] + }, + ] + } group_membership: { Row: { admin: boolean | null @@ -1153,6 +1217,33 @@ export type Database = { }, ] } + my_file_references: { + Row: { + created: string | null + filehash: string | null + filepath: string | null + last_modified: string | null + source_local_id: string | null + space_id: number | null + } + Insert: { + created?: string | null + filehash?: string | null + filepath?: string | null + last_modified?: string | null + source_local_id?: string | null + space_id?: number | null + } + Update: { + created?: string | null + filehash?: string | null + filepath?: string | null + last_modified?: string | null + source_local_id?: string | null + space_id?: number | null + } + Relationships: [] + } my_spaces: { Row: { id: number | null @@ -1434,6 +1525,8 @@ export type Database = { Returns: undefined } extract_references: { Args: { refs: Json }; Returns: number[] } + file_access: { Args: { hashvalue: string }; Returns: boolean } + file_exists: { Args: { hashvalue: string }; Returns: boolean } generic_entity_access: { Args: { target_id: number @@ -1890,3 +1983,4 @@ export const Constants = { }, }, } as const + diff --git a/packages/database/src/lib/files.ts b/packages/database/src/lib/files.ts new file mode 100644 index 000000000..41efc130c --- /dev/null +++ b/packages/database/src/lib/files.ts @@ -0,0 +1,56 @@ +import type { DGSupabaseClient } from "./client"; + +const ASSETS_BUCKET_NAME = "assets"; + +export const addFile = async ({ + client, spaceId, sourceLocalId, fname, mimetype, created, lastModified, content +}:{ + client: DGSupabaseClient, + spaceId: number, + sourceLocalId: string, + fname: string, + mimetype: string, + created: Date, + lastModified: Date, + content: ArrayBuffer +}): Promise => { + // This assumes the content fits in memory. + const uint8Array = new Uint8Array(content); + const hashBuffer = await crypto.subtle.digest('SHA-256', uint8Array); + const hashArray = Array.from(new Uint8Array(hashBuffer)); + const hashvalue = hashArray.map((h) => h.toString(16).padStart(2, '0')).join(''); + const lookForDup = await client.rpc("file_exists",{hashvalue}) + if (lookForDup.error) throw lookForDup.error; + const exists = lookForDup.data; + if (!exists) { + // we should use upsert here for sync issues, but we get obscure rls errors. + const uploadResult = await client.storage.from(ASSETS_BUCKET_NAME).upload(hashvalue, content, {contentType: mimetype}); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + if (uploadResult.error && String((uploadResult.error as Record).statusCode) !== "409") + throw uploadResult.error; + } + // not doing an upsert because it does not update on conflict + const frefResult = await client.from("FileReference").insert({ + /* eslint-disable @typescript-eslint/naming-convention */ + space_id: spaceId, + source_local_id: sourceLocalId, + last_modified: lastModified.toISOString(), + /* eslint-enable @typescript-eslint/naming-convention */ + filepath: fname, + filehash: hashvalue, + created: created.toISOString() + }); + + if (frefResult.error) { + if (frefResult.error.code === "23505") { + const updateResult = await client.from("FileReference").update({ + // eslint-disable-next-line @typescript-eslint/naming-convention + last_modified: lastModified.toISOString(), + filehash: hashvalue, + created: created.toISOString() + }).eq("source_local_id", sourceLocalId).eq("space_id", spaceId).eq("filepath", fname); + if (updateResult.error) throw updateResult.error; + } else + throw frefResult.error; + } +} diff --git a/packages/database/supabase/config.toml b/packages/database/supabase/config.toml index cfc20427f..59f9aa408 100644 --- a/packages/database/supabase/config.toml +++ b/packages/database/supabase/config.toml @@ -55,6 +55,7 @@ schema_paths = [ './schemas/account.sql', './schemas/content.sql', './schemas/embedding.sql', + './schemas/assets.sql', './schemas/concept.sql', './schemas/contributor.sql', './schemas/sync.sql', diff --git a/packages/database/supabase/migrations/20260118210851_fileref.sql b/packages/database/supabase/migrations/20260118210851_fileref.sql new file mode 100644 index 000000000..c895f4029 --- /dev/null +++ b/packages/database/supabase/migrations/20260118210851_fileref.sql @@ -0,0 +1,150 @@ +CREATE TABLE IF NOT EXISTS public."FileReference" ( + source_local_id character varying NOT NULL, + space_id bigint NOT NULL, + filepath character varying NOT NULL, + filehash character varying NOT NULL, -- or binary? + "created" timestamp without time zone NOT NULL, + last_modified timestamp without time zone NOT NULL, + variant public."ContentVariant" GENERATED ALWAYS AS ('full') STORED +); +ALTER TABLE ONLY public."FileReference" +ADD CONSTRAINT "FileReference_pkey" PRIMARY KEY (source_local_id, space_id, filepath); + +ALTER TABLE ONLY public."FileReference" +ADD CONSTRAINT "FileReference_content_fkey" FOREIGN KEY ( + space_id, source_local_id, variant +) REFERENCES public."Content" (space_id, source_local_id, variant) ON DELETE CASCADE; + +CREATE INDEX file_reference_filepath_idx ON public."FileReference" USING btree (filepath); +CREATE INDEX file_reference_filehash_idx ON public."FileReference" USING btree (filehash); +ALTER TABLE public."FileReference" OWNER TO "postgres"; + +CREATE OR REPLACE VIEW public.my_file_references AS +SELECT + source_local_id, + space_id, + filepath, + filehash, + created, + last_modified +FROM public."FileReference" +WHERE ( + space_id = any(public.my_space_ids()) + OR public.can_view_specific_resource(space_id, source_local_id) +); + +GRANT ALL ON TABLE public."FileReference" TO authenticated; +GRANT ALL ON TABLE public."FileReference" TO service_role; +REVOKE ALL ON TABLE public."FileReference" FROM anon; + +ALTER TABLE public."FileReference" ENABLE ROW LEVEL SECURITY; + +DROP POLICY IF EXISTS file_reference_policy ON public."FileReference"; +DROP POLICY IF EXISTS file_reference_select_policy ON public."FileReference"; +CREATE POLICY file_reference_select_policy ON public."FileReference" FOR SELECT USING (public.in_space(space_id) OR public.can_view_specific_resource(space_id, source_local_id)); +DROP POLICY IF EXISTS file_reference_delete_policy ON public."FileReference"; +CREATE POLICY file_reference_delete_policy ON public."FileReference" FOR DELETE USING (public.in_space(space_id)); +DROP POLICY IF EXISTS file_reference_insert_policy ON public."FileReference"; +CREATE POLICY file_reference_insert_policy ON public."FileReference" FOR INSERT WITH CHECK (public.in_space(space_id)); +DROP POLICY IF EXISTS file_reference_update_policy ON public."FileReference"; +CREATE POLICY file_reference_update_policy ON public."FileReference" FOR UPDATE USING (public.in_space(space_id)); + +-- We cannot delete blobs from sql; we'll need to call an edge function with pg_net. +-- We could pass the name to the edge function, but it's safer to accumulate paths in a table +-- so next invocation will find all collected paths. +CREATE TABLE IF NOT EXISTS public.file_gc ( + filehash character varying NOT NULL PRIMARY KEY +); +ALTER TABLE public.file_gc OWNER TO "postgres"; + +GRANT ALL ON TABLE public.file_gc TO service_role; +REVOKE ALL ON TABLE public.file_gc FROM authenticated; +REVOKE ALL ON TABLE public.file_gc FROM anon; + +-- we could also find out if the storage exists, but not sure how that works with ACLs. +-- This is both faster and safer. +CREATE OR REPLACE FUNCTION public.file_exists(hashvalue VARCHAR) RETURNS boolean +SET search_path = '' +SECURITY DEFINER +LANGUAGE sql AS $$ +SELECT EXISTS (SELECT true FROM public."FileReference" WHERE filehash = hashvalue LIMIT 1); +$$; + +REVOKE EXECUTE ON FUNCTION public.file_exists(VARCHAR) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION public.file_exists(VARCHAR) TO service_role; + +CREATE OR REPLACE FUNCTION public.file_access(hashvalue VARCHAR) RETURNS boolean +SET search_path = '' +SECURITY DEFINER +LANGUAGE sql AS $$ +SELECT EXISTS ( + SELECT true FROM public."FileReference" + WHERE filehash = hashvalue AND ( + public.in_space(space_id) OR + public.can_view_specific_resource(space_id, source_local_id) + ) + LIMIT 1); +$$; + +CREATE OR REPLACE FUNCTION public.after_delete_update_fref() RETURNS TRIGGER +SET search_path = '' +SECURITY DEFINER +LANGUAGE plpgsql AS $$ +BEGIN + PERFORM pg_advisory_xact_lock(hashtext(OLD.filehash)); + IF NOT public.file_exists(OLD.filehash) THEN + INSERT INTO public.file_gc VALUES (OLD.filehash); + -- TODO: Invocation with pg_net, following the pattern in + -- https://supabase.com/docs/guides/functions/schedule-functions + END IF; + IF NEW.filehash IS NOT NULL THEN + DELETE FROM public.file_gc WHERE filehash = NEW.filehash; + END IF; + RETURN OLD; +END; +$$; + +CREATE OR REPLACE FUNCTION public.after_insert_fref() RETURNS TRIGGER +SET search_path = '' +SECURITY DEFINER +LANGUAGE plpgsql AS $$ +BEGIN + DELETE FROM public.file_gc WHERE filehash = NEW.filehash; + RETURN NEW; +END; +$$; + +CREATE TRIGGER on_delete_file_reference_trigger AFTER DELETE ON public."FileReference" FOR EACH ROW EXECUTE FUNCTION public.after_delete_update_fref(); +CREATE TRIGGER on_update_file_reference_trigger AFTER UPDATE ON public."FileReference" FOR EACH ROW EXECUTE FUNCTION public.after_delete_update_fref(); +CREATE TRIGGER on_insert_file_reference_trigger AFTER INSERT ON public."FileReference" FOR EACH ROW EXECUTE FUNCTION public.after_insert_fref(); + +INSERT INTO storage.buckets +(id, name, public) +VALUES +('assets', 'assets', false) +ON CONFLICT (id) DO NOTHING; + +DROP POLICY IF EXISTS "storage_insert_assets_authenticated" ON storage.objects; +CREATE POLICY "storage_insert_assets_authenticated" +ON storage.objects FOR INSERT TO authenticated WITH CHECK ( + bucket_id = 'assets' +); + +DROP POLICY IF EXISTS "storage_select_assets_access" ON storage.objects; +CREATE POLICY "storage_select_assets_access" +ON storage.objects FOR SELECT TO authenticated USING ( + bucket_id = 'assets' AND file_access(name) +); + +DROP POLICY IF EXISTS "storage_delete_assets_noref" ON storage.objects; +CREATE POLICY "storage_delete_assets_noref" +ON storage.objects FOR DELETE TO authenticated USING ( + bucket_id = 'assets' AND NOT EXISTS ( + SELECT true FROM public."FileReference" + WHERE filehash = name LIMIT 1 + ) +); + +DROP POLICY IF EXISTS "storage_update_assets_authenticated" ON storage.objects; +CREATE POLICY "storage_update_assets_authenticated" +ON storage.objects FOR UPDATE TO authenticated USING (bucket_id = 'assets'); diff --git a/packages/database/supabase/schemas/assets.sql b/packages/database/supabase/schemas/assets.sql new file mode 100644 index 000000000..a72b49ec9 --- /dev/null +++ b/packages/database/supabase/schemas/assets.sql @@ -0,0 +1,150 @@ +CREATE TABLE IF NOT EXISTS public."FileReference" ( + source_local_id character varying NOT NULL, + space_id bigint NOT NULL, + filepath character varying NOT NULL, + filehash character varying NOT NULL, -- or binary? + "created" timestamp without time zone NOT NULL, + last_modified timestamp without time zone NOT NULL, + -- not allowed virtual with user types + variant public."ContentVariant" GENERATED ALWAYS AS ('full') STORED +); +ALTER TABLE ONLY public."FileReference" +ADD CONSTRAINT "FileReference_pkey" PRIMARY KEY (source_local_id, space_id, filepath); + +ALTER TABLE ONLY public."FileReference" +ADD CONSTRAINT "FileReference_content_fkey" FOREIGN KEY ( + space_id, source_local_id, variant +) REFERENCES public."Content" (space_id, source_local_id, variant) ON DELETE CASCADE; +-- note the absence of on update ; the generated column forbids cascade, so it will error +-- However, update on those columns should never happen. + +CREATE INDEX file_reference_filepath_idx ON public."FileReference" USING btree (filepath); +CREATE INDEX file_reference_filehash_idx ON public."FileReference" USING btree (filehash); +ALTER TABLE public."FileReference" OWNER TO "postgres"; + +CREATE OR REPLACE VIEW public.my_file_references AS +SELECT + source_local_id, + space_id, + filepath, + filehash, + created, + last_modified +FROM public."FileReference" +WHERE ( + space_id = any(public.my_space_ids()) + OR public.can_view_specific_resource(space_id, source_local_id) +); + +GRANT ALL ON TABLE public."FileReference" TO authenticated; +GRANT ALL ON TABLE public."FileReference" TO service_role; +REVOKE ALL ON TABLE public."FileReference" FROM anon; + +ALTER TABLE public."FileReference" ENABLE ROW LEVEL SECURITY; + +DROP POLICY IF EXISTS file_reference_policy ON public."FileReference"; +DROP POLICY IF EXISTS file_reference_select_policy ON public."FileReference"; +CREATE POLICY file_reference_select_policy ON public."FileReference" FOR SELECT USING (public.in_space(space_id) OR public.can_view_specific_resource(space_id, source_local_id)); +DROP POLICY IF EXISTS file_reference_delete_policy ON public."FileReference"; +CREATE POLICY file_reference_delete_policy ON public."FileReference" FOR DELETE USING (public.in_space(space_id)); +DROP POLICY IF EXISTS file_reference_insert_policy ON public."FileReference"; +CREATE POLICY file_reference_insert_policy ON public."FileReference" FOR INSERT WITH CHECK (public.in_space(space_id)); +DROP POLICY IF EXISTS file_reference_update_policy ON public."FileReference"; +CREATE POLICY file_reference_update_policy ON public."FileReference" FOR UPDATE USING (public.in_space(space_id)); + +-- We cannot delete blobs from sql; we'll need to call an edge function with pg_net. +-- We could pass the name to the edge function, but it's safer to accumulate paths in a table +-- so next invocation will find all collected paths. +CREATE TABLE IF NOT EXISTS public.file_gc ( + filehash character varying NOT NULL PRIMARY KEY +); +ALTER TABLE public.file_gc OWNER TO "postgres"; + +GRANT ALL ON TABLE public.file_gc TO service_role; +REVOKE ALL ON TABLE public.file_gc FROM authenticated; +REVOKE ALL ON TABLE public.file_gc FROM anon; + +-- we could also find out if the storage exists, but not sure how that works with ACLs. +-- This is both faster and safer. +CREATE OR REPLACE FUNCTION public.file_exists(hashvalue VARCHAR) RETURNS boolean +SET search_path = '' +SECURITY DEFINER +LANGUAGE sql AS $$ +SELECT EXISTS (SELECT true FROM public."FileReference" WHERE filehash = hashvalue LIMIT 1); +$$; + +CREATE OR REPLACE FUNCTION public.file_access(hashvalue VARCHAR) RETURNS boolean +SET search_path = '' +SECURITY DEFINER +LANGUAGE sql AS $$ +SELECT EXISTS ( + SELECT true FROM public."FileReference" + WHERE filehash = hashvalue AND ( + public.in_space(space_id) OR + public.can_view_specific_resource(space_id, source_local_id) + ) + LIMIT 1); +$$; + +CREATE OR REPLACE FUNCTION public.after_delete_update_fref() RETURNS TRIGGER +SET search_path = '' +SECURITY DEFINER +LANGUAGE plpgsql AS $$ +BEGIN + PERFORM pg_advisory_xact_lock(hashtext(OLD.filehash)); + IF NOT public.file_exists(OLD.filehash) THEN + INSERT INTO public.file_gc VALUES (OLD.filehash); + -- TODO: Invocation with pg_net, following the pattern in + -- https://supabase.com/docs/guides/functions/schedule-functions + END IF; + IF NEW.filehash IS NOT NULL THEN + DELETE FROM public.file_gc WHERE filehash = NEW.filehash; + END IF; + RETURN OLD; +END; +$$; + +CREATE OR REPLACE FUNCTION public.after_insert_fref() RETURNS TRIGGER +SET search_path = '' +SECURITY DEFINER +LANGUAGE plpgsql AS $$ +BEGIN + DELETE FROM public.file_gc WHERE filehash = NEW.filehash; + RETURN NEW; +END; +$$; + +CREATE TRIGGER on_delete_file_reference_trigger AFTER DELETE ON public."FileReference" FOR EACH ROW EXECUTE FUNCTION public.after_delete_update_fref(); +CREATE TRIGGER on_update_file_reference_trigger AFTER UPDATE ON public."FileReference" FOR EACH ROW EXECUTE FUNCTION public.after_delete_update_fref(); +CREATE TRIGGER on_insert_file_reference_trigger AFTER INSERT ON public."FileReference" FOR EACH ROW EXECUTE FUNCTION public.after_insert_fref(); + +INSERT INTO storage.buckets +(id, name, public) +VALUES +('assets', 'assets', false) +ON CONFLICT (id) DO NOTHING; + +DROP POLICY IF EXISTS "storage_insert_assets_authenticated" ON storage.objects; +CREATE POLICY "storage_insert_assets_authenticated" +ON storage.objects FOR INSERT TO authenticated WITH CHECK ( + bucket_id = 'assets' +); + +DROP POLICY IF EXISTS "storage_select_assets_access" ON storage.objects; +CREATE POLICY "storage_select_assets_access" +ON storage.objects FOR SELECT TO authenticated USING ( + bucket_id = 'assets' AND file_access(name) +); + +DROP POLICY IF EXISTS "storage_delete_assets_noref" ON storage.objects; +CREATE POLICY "storage_delete_assets_noref" +ON storage.objects FOR DELETE TO authenticated USING ( + bucket_id = 'assets' AND NOT EXISTS ( + SELECT true FROM public."FileReference" + WHERE filehash = name LIMIT 1 + ) +); + +DROP POLICY IF EXISTS "storage_update_assets_authenticated" ON storage.objects; +CREATE POLICY "storage_update_assets_authenticated" +ON storage.objects FOR UPDATE TO authenticated USING (bucket_id = 'assets'); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f88acf07b..18d081d5c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -12,6 +12,9 @@ catalogs: "@supabase/functions-js": specifier: 2.88.0 version: 2.88.0 + "@supabase/storage-js": + specifier: 2.88.0 + version: 2.88.0 "@supabase/supabase-js": specifier: 2.88.0 version: 2.88.0 @@ -108,6 +111,9 @@ importers: date-fns: specifier: ^4.1.0 version: 4.1.0 + mime-types: + specifier: ^3.0.1 + version: 3.0.2 nanoid: specifier: ^4.0.2 version: 4.0.2 @@ -133,6 +139,9 @@ importers: "@repo/typescript-config": specifier: workspace:* version: link:../../packages/typescript-config + "@types/mime-types": + specifier: 3.0.1 + version: 3.0.1 "@types/node": specifier: ^20 version: 20.19.13 @@ -477,6 +486,9 @@ importers: "@supabase/functions-js": specifier: "catalog:" version: 2.88.0 + "@supabase/storage-js": + specifier: "catalog:" + version: 2.88.0 "@supabase/supabase-js": specifier: "catalog:" version: 2.88.0 @@ -5923,6 +5935,12 @@ packages: integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==, } + "@types/mime-types@3.0.1": + resolution: + { + integrity: sha512-xRMsfuQbnRq1Ef+C+RKaENOxXX87Ygl38W1vDfPHRku02TgQr+Qd8iivLtAMcR0KF5/29xlnFihkTlbqFrGOVQ==, + } + "@types/minimatch@6.0.0": resolution: { @@ -6636,7 +6654,7 @@ packages: { integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==, } - deprecated: Use your platform's native atob() and btoa() methods instead + deprecated: Use your platform"s native atob() and btoa() methods instead abbrev@3.0.1: resolution: @@ -8316,7 +8334,7 @@ packages: integrity: sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==, } engines: { node: ">=12" } - deprecated: Use your platform's native DOMException instead + deprecated: Use your platform"s native DOMException instead domhandler@5.0.3: resolution: @@ -11037,7 +11055,7 @@ packages: { integrity: sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==, } - deprecated: This package is deprecated. Use require('node:util').isDeepStrictEqual instead. + deprecated: This package is deprecated. Use require("node:util").isDeepStrictEqual instead. lodash.isequalwith@4.4.0: resolution: @@ -11483,12 +11501,12 @@ packages: } engines: { node: ">= 0.6" } - mime-types@3.0.1: + mime-types@3.0.2: resolution: { - integrity: sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==, + integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==, } - engines: { node: ">= 0.6" } + engines: { node: ">=18" } mime@3.0.0: resolution: @@ -11800,7 +11818,7 @@ packages: integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==, } engines: { node: ">=10.5.0" } - deprecated: Use your platform's native DOMException instead + deprecated: Use your platform"s native DOMException instead node-fetch@2.6.7: resolution: @@ -19849,6 +19867,8 @@ snapshots: "@types/mdurl@2.0.0": {} + "@types/mime-types@3.0.1": {} + "@types/minimatch@6.0.0": dependencies: minimatch: 10.0.3 @@ -20467,7 +20487,7 @@ snapshots: accepts@2.0.0: dependencies: - mime-types: 3.0.1 + mime-types: 3.0.2 negotiator: 1.0.0 acorn-globals@7.0.1: @@ -22191,7 +22211,7 @@ snapshots: fresh: 2.0.0 http-errors: 2.0.0 merge-descriptors: 2.0.0 - mime-types: 3.0.1 + mime-types: 3.0.2 on-finished: 2.4.1 once: 1.4.0 parseurl: 1.3.3 @@ -23645,7 +23665,7 @@ snapshots: dependencies: mime-db: 1.52.0 - mime-types@3.0.1: + mime-types@3.0.2: dependencies: mime-db: 1.54.0 @@ -25170,7 +25190,7 @@ snapshots: etag: 1.8.1 fresh: 2.0.0 http-errors: 2.0.0 - mime-types: 3.0.1 + mime-types: 3.0.2 ms: 2.1.3 on-finished: 2.4.1 range-parser: 1.2.1 @@ -26033,7 +26053,7 @@ snapshots: dependencies: content-type: 1.0.5 media-typer: 1.1.0 - mime-types: 3.0.1 + mime-types: 3.0.2 typed-array-buffer@1.0.3: dependencies: diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index a55e25f5f..111b9e818 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -31,6 +31,7 @@ catalog: react-dom: ^19.1.0 "@supabase/supabase-js": 2.88.0 "@supabase/functions-js": 2.88.0 + "@supabase/storage-js": 2.88.0 "@supabase/auth-js": 2.88.0 catalogs: