@@ -9,11 +9,12 @@ Code related to permanently deleting projects.
9
9
10
10
import getLogger from "@cocalc/backend/logger" ;
11
11
import getPool from "@cocalc/database/pool" ;
12
- import { callback2 } from "@cocalc/util/async-utils" ;
13
- import { PostgreSQL } from "./types" ;
14
- import { minutes_ago } from "@cocalc/util/misc" ;
15
12
import { getServerSettings } from "@cocalc/database/settings" ;
13
+ import { callback2 } from "@cocalc/util/async-utils" ;
16
14
import { KUCALC_ON_PREMISES } from "@cocalc/util/db-schema/site-defaults" ;
15
+ import { minutes_ago } from "@cocalc/util/misc" ;
16
+ import { bulk_delete } from "./bulk-delete" ;
17
+ import { PostgreSQL } from "./types" ;
17
18
18
19
const log = getLogger ( "db:delete-projects" ) ;
19
20
@@ -84,6 +85,8 @@ FROM projects as p
84
85
ON p.project_id = s.project_id
85
86
WHERE p.deleted = true
86
87
AND p.state ->> 'state' != 'deleted'
88
+ ORDER BY
89
+ p.project_id, s.string_id
87
90
` ;
88
91
89
92
/*
@@ -102,6 +105,8 @@ export async function cleanup_old_projects_data(
102
105
) {
103
106
const settings = await getServerSettings ( ) ;
104
107
const on_prem = settings . kucalc === KUCALC_ON_PREMISES ;
108
+ const L0 = log . extend ( "cleanup_old_projects_data" ) ;
109
+ const L = L0 . debug ;
105
110
106
111
log . debug ( "cleanup_old_projects_data" , { delay_ms, max_run_m, on_prem } ) ;
107
112
const start_ts = new Date ( ) ;
@@ -115,31 +120,91 @@ export async function cleanup_old_projects_data(
115
120
for ( const row of rows ) {
116
121
const { project_id, string_id } = row ;
117
122
if ( start_ts < minutes_ago ( max_run_m ) ) {
118
- log . debug (
119
- `cleanup_old_projects_data: too much time elapsed, breaking after ${ num } syncstrings` ,
120
- ) ;
123
+ L ( `too much time elapsed, breaking after ${ num } syncstrings` ) ;
121
124
break ;
122
125
}
123
126
124
- log . debug (
125
- `cleanup_old_projects_data: deleting syncstring ${ project_id } /${ string_id } ` ,
126
- ) ;
127
+ L ( `deleting syncstring ${ project_id } /${ string_id } ` ) ;
127
128
num += 1 ;
128
129
await callback2 ( db . delete_syncstring , { string_id } ) ;
129
130
130
131
// wait for the given amount of delay_ms millio seconds
131
132
await new Promise ( ( done ) => setTimeout ( done , delay_ms ) ) ;
132
133
134
+ // Q_CLEANUP_SYNCSTRINGS orders by project_id, hence we trigger project specific actions when the id changes
133
135
if ( pid != project_id ) {
134
136
pid = project_id ;
137
+ const L2 = L0 . extend ( project_id ) . debug ;
138
+
135
139
if ( on_prem ) {
136
- log . debug (
137
- `cleanup_old_projects_data: deleting project data in ${ project_id } ` ,
138
- ) ;
140
+ L2 ( `cleanup_old_projects_data for project_id=${ project_id } ` ) ;
139
141
// TODO: this only works on-prem, and requires the project files to be mounted
140
142
141
- log . debug ( `deleting all shared files in project ${ project_id } ` ) ;
143
+ L2 ( `deleting all shared files in project ${ project_id } ` ) ;
142
144
// TODO: do it directly like above, and also get rid of all those shares in the database
145
+
146
+ const delPublicPaths = await bulk_delete ( {
147
+ table : "public_paths" ,
148
+ field : "project_id" ,
149
+ value : project_id ,
150
+ } ) ;
151
+ L2 ( `deleted public_paths ${ delPublicPaths . rowsDeleted } entries` ) ;
152
+
153
+ const delProjectLog = await bulk_delete ( {
154
+ table : "project_log" ,
155
+ field : "project_id" ,
156
+ value : project_id ,
157
+ } ) ;
158
+ L2 ( `deleted project_log ${ delProjectLog . rowsDeleted } entries` ) ;
159
+
160
+ const delFileUse = await bulk_delete ( {
161
+ table : "file_use" ,
162
+ field : "project_id" ,
163
+ value : project_id ,
164
+ } ) ;
165
+ L2 ( `deleted file_use ${ delFileUse . rowsDeleted } entries` ) ;
166
+
167
+ const delAccessLog = await bulk_delete ( {
168
+ table : "file_access_log" ,
169
+ field : "project_id" ,
170
+ value : project_id ,
171
+ } ) ;
172
+ L2 ( `deleted file_access_log ${ delAccessLog . rowsDeleted } entries` ) ;
173
+
174
+ const delJupyterApiLog = await bulk_delete ( {
175
+ table : "jupyter_api_log" ,
176
+ field : "project_id" ,
177
+ value : project_id ,
178
+ } ) ;
179
+ L2 ( `deleted jupyter_api_log ${ delJupyterApiLog . rowsDeleted } entries` ) ;
180
+
181
+ for ( const field of [
182
+ "target_project_id" ,
183
+ "source_project_id" ,
184
+ ] as const ) {
185
+ const delCopyPaths = await bulk_delete ( {
186
+ table : "copy_paths" ,
187
+ field,
188
+ value : project_id ,
189
+ } ) ;
190
+ L2 ( `deleted copy_paths/${ field } ${ delCopyPaths . rowsDeleted } entries` ) ;
191
+ }
192
+
193
+ const delListings = await bulk_delete ( {
194
+ table : "listings" ,
195
+ field : "project_id" ,
196
+ id : "project_id" , // TODO listings has a more complex ID, is this a problem?
197
+ value : project_id ,
198
+ } ) ;
199
+ L2 ( `deleted ${ delListings . rowsDeleted } listings` ) ;
200
+
201
+ const delInviteTokens = await bulk_delete ( {
202
+ table : "project_invite_tokens" ,
203
+ field : "project_id" ,
204
+ value : project_id ,
205
+ id : "token" ,
206
+ } ) ;
207
+ L2 ( `deleted ${ delInviteTokens . rowsDeleted } entries` ) ;
143
208
}
144
209
145
210
// now, that we're done with that project, mark it as state.state ->> 'deleted'
0 commit comments