@@ -314,10 +314,6 @@ private void cacheFilterAndLocs(final TileData currentTileData, final List<Abstr
314
314
315
315
private void cacheTile (final int totalCycleCount , final TileData tileData , final CycleData currentCycleData ) throws IOException {
316
316
final byte [] tileByteArray = new byte [tileData .compressedBlockSize ];
317
- //we are going to explode the nibbles in to bytes to make PF filtering easier
318
- final byte [] uncompressedByteArray = new byte [tileData .uncompressedBlockSize ];
319
- // ByteBuffer uncompressedByteArray = ByteBuffer.allocate(tileData.uncompressedBlockSize);
320
- final byte [] unNibbledByteArray = new byte [tileData .uncompressedBlockSize * 2 ];
321
317
322
318
// Read the whole compressed block into a buffer, then sanity check the length
323
319
final InputStream stream = this .streams [totalCycleCount ];
@@ -332,38 +328,18 @@ private void cacheTile(final int totalCycleCount, final TileData tileData, final
332
328
(totalCycleCount + 1 ), this .streamFiles [totalCycleCount ].getAbsolutePath ()));
333
329
}
334
330
335
- // Uncompress the data from the buffer we just wrote - use gzip input stream to write to uncompressed buffer
331
+ // Decompress the data from the buffer we just wrote - use gzip input stream to write to uncompressed buffer
336
332
final ByteArrayInputStream byteInputStream = new ByteArrayInputStream (Arrays .copyOfRange (tileByteArray , 0 , readBytes ));
337
-
338
- final GZIPInputStream gzipInputStream = new GZIPInputStream (byteInputStream , uncompressedByteArray .length );
339
- int read ;
340
- int totalRead = 0 ;
341
- try {
342
- while ((read = gzipInputStream .read (uncompressedByteArray , totalRead , uncompressedByteArray .length - totalRead )) != -1 ) {
343
- if (read == 0 ) break ;
344
- totalRead += read ;
345
- }
346
- } catch (final EOFException eofException ) {
347
- throw new PicardException ("Unexpected end of file " + this .streamFiles [totalCycleCount ].getAbsolutePath ()
348
- + " this file is likely corrupt or truncated. We have read "
349
- + totalRead + " and were expecting to read "
350
- + uncompressedByteArray .length );
351
- }
352
- if (totalRead != tileData .uncompressedBlockSize ) {
353
- throw new PicardException (String .format ("Error while decompressing from BCL file for cycle %d. Offending file on disk is %s" ,
354
- (totalCycleCount + 1 ), this .streamFiles [totalCycleCount ].getAbsolutePath ()));
355
- }
333
+ byte [] decompressedByteArray = decompressTile (totalCycleCount , tileData , byteInputStream );
356
334
357
335
// Read uncompressed data from the buffer and expand each nibble into a full byte for ease of use
358
- int index = 0 ;
359
- for (final byte singleByte : uncompressedByteArray ) {
360
- unNibbledByteArray [index ] = (byte ) (singleByte & 0x0f );
361
- index ++;
362
- unNibbledByteArray [index ] = (byte ) ((singleByte >> 4 ) & 0x0f );
363
- index ++;
364
- }
365
- gzipInputStream .close ();
336
+ byte [] unNibbledByteArray = promoteNibblesToBytes (decompressedByteArray );
337
+ cachedTile [totalCycleCount ] = filterNonPfReads (tileData , currentCycleData , unNibbledByteArray );
366
338
339
+ cachedTilePosition [totalCycleCount ] = 0 ;
340
+ }
341
+
342
+ private byte [] filterNonPfReads (TileData tileData , CycleData currentCycleData , byte [] unNibbledByteArray ) {
367
343
// Write buffer contents to cached tile array
368
344
// if nonPF reads are included we need to strip them out
369
345
if (!currentCycleData .pfExcluded ) {
@@ -383,11 +359,52 @@ private void cacheTile(final int totalCycleCount, final TileData tileData, final
383
359
}
384
360
filterIndex ++;
385
361
}
386
- cachedTile [ totalCycleCount ] = filteredByteArray ;
362
+ return filteredByteArray ;
387
363
} else {
388
- cachedTile [ totalCycleCount ] = unNibbledByteArray ;
364
+ return unNibbledByteArray ;
389
365
}
390
- cachedTilePosition [totalCycleCount ] = 0 ;
366
+ }
367
+
368
+ private byte [] promoteNibblesToBytes (byte [] decompressedByteArray ) {
369
+ //we are going to explode the nibbles in to bytes to make PF filtering easier
370
+ final byte [] unNibbledByteArray = new byte [decompressedByteArray .length * 2 ];
371
+ int index = 0 ;
372
+ for (final byte singleByte : decompressedByteArray ) {
373
+ unNibbledByteArray [index ] = (byte ) (singleByte & 0x0f );
374
+ index ++;
375
+ unNibbledByteArray [index ] = (byte ) ((singleByte >> 4 ) & 0x0f );
376
+ index ++;
377
+ }
378
+ return unNibbledByteArray ;
379
+ }
380
+
381
+ private byte [] decompressTile (int totalCycleCount , TileData tileData , ByteArrayInputStream byteInputStream ) throws IOException {
382
+ final byte [] decompressedByteArray = new byte [tileData .uncompressedBlockSize ];
383
+ //only decompress the data if we are expecting data.
384
+ if (decompressedByteArray .length == 0 ) {
385
+ log .warn ("Ignoring tile " + tileData .tileNum + " there are no PF reads." );
386
+ } else {
387
+ int read ;
388
+ int totalRead = 0 ;
389
+ try (GZIPInputStream gzipInputStream = new GZIPInputStream (byteInputStream , decompressedByteArray .length )) {
390
+ while ((read = gzipInputStream .read (decompressedByteArray , totalRead , decompressedByteArray .length - totalRead )) != -1 ) {
391
+ if (read == 0 ) {
392
+ break ;
393
+ }
394
+ totalRead += read ;
395
+ }
396
+ } catch (final EOFException eofException ) {
397
+ throw new PicardException ("Unexpected end of file " + this .streamFiles [totalCycleCount ].getAbsolutePath ()
398
+ + " this file is likely corrupt or truncated. We have read "
399
+ + totalRead + " and were expecting to read "
400
+ + decompressedByteArray .length );
401
+ }
402
+ if (totalRead != tileData .uncompressedBlockSize ) {
403
+ throw new PicardException (String .format ("Error while decompressing from BCL file for cycle %d. Offending file on disk is %s" ,
404
+ (totalCycleCount + 1 ), this .streamFiles [totalCycleCount ].getAbsolutePath ()));
405
+ }
406
+ }
407
+ return decompressedByteArray ;
391
408
}
392
409
393
410
public CycleData [] getCycleData () {
0 commit comments