@@ -157,28 +157,40 @@ impl FsStore {
157157 // Load all the data we have into a doc
158158 match Chunks :: load ( & self . root , id) {
159159 Ok ( Some ( chunks) ) => {
160+ println ! ( "hmm..." ) ;
160161 let doc = chunks
161162 . to_doc ( )
162163 . map_err ( |e| Error ( ErrorKind :: LoadDocToCompact ( e) ) ) ?;
163164
164165 // Write the snapshot
165166 let output_chunk_name = SavedChunkName :: new_snapshot ( doc. get_heads ( ) ) ;
166167 let chunk = doc. save ( ) ;
167- write_chunk ( & self . root , & paths, & chunk, output_chunk_name) ?;
168+ println ! ( "Going to write: {:#?}" , output_chunk_name) ;
169+ write_chunk ( & self . root , & paths, & chunk, output_chunk_name. clone ( ) ) ?;
168170
169171 // Remove all the old data
170172 for incremental in chunks. incrementals . keys ( ) {
171173 let path = paths. chunk_path ( & self . root , incremental) ;
174+ println ! ( "Removing {:?}" , path) ;
172175 std:: fs:: remove_file ( & path)
173176 . map_err ( |e| Error ( ErrorKind :: DeleteChunk ( path, e) ) ) ?;
174177 }
178+ let just_wrote = paths. chunk_path ( & self . root , & output_chunk_name) ;
175179 for snapshot in chunks. snapshots . keys ( ) {
176180 let path = paths. chunk_path ( & self . root , snapshot) ;
181+ println ! ( "Removing Snap {:?}" , path) ;
182+
183+ if path == just_wrote {
184+ tracing:: error!( "Somehow trying to delete the same path we just wrote to. Not today Satan" ) ;
185+ continue ;
186+ }
187+
177188 std:: fs:: remove_file ( & path)
178189 . map_err ( |e| Error ( ErrorKind :: DeleteChunk ( path, e) ) ) ?;
179190 }
180191 }
181192 Ok ( None ) => {
193+ println ! ( "No existing files,and compaction requested first" ) ;
182194 let output_chunk_name = SavedChunkName {
183195 hash : uuid:: Uuid :: new_v4 ( ) . as_bytes ( ) . to_vec ( ) ,
184196 chunk_type : ChunkType :: Snapshot ,
@@ -187,6 +199,7 @@ impl FsStore {
187199 write_chunk ( & self . root , & paths, full_doc, output_chunk_name) ?;
188200 }
189201 Err ( e) => {
202+ println ! ( "Error loading chunks for {:?} {}" , self . root, id) ;
190203 tracing:: error!( e=%e, "Error loading chunks" ) ;
191204 }
192205 }
@@ -219,6 +232,10 @@ fn write_chunk(
219232 // Move the temporary file into a snapshot in the document data directory
220233 // with a name based on the hash of the heads of the document
221234 let output_path = paths. chunk_path ( root, & name) ;
235+
236+ tracing:: warn!( "Renaming: {:?}" , temp_save) ;
237+ tracing:: warn!( "To: {:?}" , output_path) ;
238+
222239 std:: fs:: rename ( & temp_save_path, & output_path)
223240 . map_err ( |e| Error ( ErrorKind :: RenameTempFile ( temp_save_path, output_path, e) ) ) ?;
224241
@@ -286,13 +303,13 @@ impl DocIdPaths {
286303 }
287304}
288305
289- #[ derive( Debug , Hash , PartialEq , Eq ) ]
306+ #[ derive( Debug , Hash , PartialEq , Eq , Clone ) ]
290307enum ChunkType {
291308 Snapshot ,
292309 Incremental ,
293310}
294311
295- #[ derive( Debug , Hash , PartialEq , Eq ) ]
312+ #[ derive( Debug , Hash , PartialEq , Eq , Clone ) ]
296313struct SavedChunkName {
297314 hash : Vec < u8 > ,
298315 chunk_type : ChunkType ,
@@ -355,7 +372,7 @@ impl Chunks {
355372 fn load ( root : & Path , doc_id : & DocumentId ) -> Result < Option < Self > , Error > {
356373 let doc_id_hash = DocIdPaths :: from ( doc_id) ;
357374 let level2_path = doc_id_hash. level2_path ( root) ;
358- tracing:: debug !(
375+ tracing:: warn !(
359376 root=%root. display( ) ,
360377 doc_id=?doc_id,
361378 doc_path=%level2_path. display( ) ,
@@ -439,7 +456,11 @@ impl Chunks {
439456 for chunk in self . incrementals . values ( ) {
440457 bytes. extend ( chunk) ;
441458 }
442- automerge:: Automerge :: load ( & bytes)
459+
460+ automerge:: Automerge :: load_with_options (
461+ & bytes,
462+ automerge:: LoadOptions :: new ( ) . on_partial_load ( automerge:: OnPartialLoad :: Ignore ) ,
463+ )
443464 }
444465}
445466
0 commit comments