diff --git a/conf/modules.config b/conf/modules.config index f378b3f9..8cbcb4e9 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -57,10 +57,23 @@ process { ] } + withName: COUNT_READS { + publishDir = [ + [ + path: { "${params.outdir}/count_reads/${meta.id}" }, + mode: params.publish_dir_mode, + ] + ] + } - - - + withName: COUNT_READS_AFTER_TRIMMING { + publishDir = [ + [ + path: { "${params.outdir}/count_reads_after_trimming/${meta.id}" }, + mode: params.publish_dir_mode, + ] + ] + } withName: FASTQC { ext.args = '--quiet' diff --git a/modules/local/count_reads.nf b/modules/local/count_reads.nf new file mode 100644 index 00000000..89352212 --- /dev/null +++ b/modules/local/count_reads.nf @@ -0,0 +1,39 @@ +process COUNT_READS { + tag "$meta.id" + label 'process_medium' + + container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? + 'nfcore/dualrnaseq:dev' : + 'nfcore/dualrnaseq:dev' }" + + input: + tuple val(meta), path(reads) + + output: + tuple val(meta), path('*.txt'), emit: read_counts + + script: + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" + + if (reads instanceof Path || reads.size() == 1) { + // Code to process a single file + """ + count_1=\$((\$(zcat ${reads[0]} | wc -l) / 4)) + echo "${reads[0].baseName}\t\$count_1" > ${reads[0].baseName}_counts.txt + """ + } else if (reads instanceof Path || reads.size() == 2) { + // Code to process two files + """ + count_1=\$((\$(zcat ${reads[0]} | wc -l) / 4)) + count_2=\$((\$(zcat ${reads[1]} | wc -l) / 4)) + echo "${reads[0].baseName}\t\$count_1" > ${reads[0].baseName}_counts.txt + echo "${reads[1].baseName}\t\$count_2" > ${reads[1].baseName}_counts.txt + """ + } else { + """ + echo "Error: Input 'reads' should contain either one or two files." + exit 1 + """ + } +} diff --git a/modules/nf-core/fastqc/main.nf b/modules/nf-core/fastqc/main.nf index 9ae58381..76a42dd8 100644 --- a/modules/nf-core/fastqc/main.nf +++ b/modules/nf-core/fastqc/main.nf @@ -2,10 +2,10 @@ process FASTQC { tag "$meta.id" label 'process_medium' - conda "bioconda::fastqc=0.11.9" + conda "bioconda::fastqc=0.12.1--hdfd78af_0" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' : - 'quay.io/biocontainers/fastqc:0.11.9--0' }" + 'https://depot.galaxyproject.org/singularity/fastqc:0.12.1--hdfd78af_0' : + 'quay.io/biocontainers/fastqc:0.12.1--hdfd78af_0' }" input: tuple val(meta), path(reads) diff --git a/workflows/dualrnaseq.nf b/workflows/dualrnaseq.nf index 727ac560..51459580 100644 --- a/workflows/dualrnaseq.nf +++ b/workflows/dualrnaseq.nf @@ -72,11 +72,13 @@ include { SALMON_ALIGNMENT_BASED } from '../subworkflows/local/salmon_alignment_ // // MODULE: Installed directly from nf-core/modules // -include { FASTQC } from '../modules/nf-core/fastqc/main' -include { FASTQC as FASTQC_AFTER_TRIMMING } from '../modules/nf-core/fastqc/main' -include { CUTADAPT } from '../modules/nf-core/cutadapt/main' -include { MULTIQC } from '../modules/nf-core/multiqc/main' -include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main' +include { COUNT_READS } from '../modules/local/count_reads' +include { COUNT_READS as COUNT_READS_AFTER_TRIMMING } from '../modules/local/count_reads' +include { FASTQC } from '../modules/nf-core/fastqc/main' +include { FASTQC as FASTQC_AFTER_TRIMMING } from '../modules/nf-core/fastqc/main' +include { CUTADAPT } from '../modules/nf-core/cutadapt/main' +include { MULTIQC } from '../modules/nf-core/multiqc/main' +include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main' /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -99,12 +101,17 @@ workflow DUALRNASEQ { ) ch_versions = ch_versions.mix(INPUT_CHECK.out.versions) + COUNT_READS ( + INPUT_CHECK.out.reads + ) + + ch_count_reads_merged = Channel.fromPath( "${params.outdir}/count_reads/*" ).collectFile(name: 'count_reads_merged.txt').view() + if (!(params.skip_tools && params.skip_tools.split(',').contains('fastqc'))) { FASTQC(INPUT_CHECK.out.reads) ch_versions = ch_versions.mix(FASTQC.out.versions.first()) } - ch_reads = INPUT_CHECK.out.reads if (!(params.skip_tools && params.skip_tools.split(',').contains('cutadapt'))) { CUTADAPT(INPUT_CHECK.out.reads) ch_reads = CUTADAPT.out.reads @@ -114,6 +121,8 @@ workflow DUALRNASEQ { if (!(params.skip_tools && (params.skip_tools.split(',').contains('fastqc') || params.skip_tools.split(',').contains('cutadapt')))) { FASTQC_AFTER_TRIMMING(ch_reads) ch_versions = ch_versions.mix(FASTQC_AFTER_TRIMMING.out.versions.first()) + COUNT_READS_AFTER_TRIMMING (INPUT_CHECK.out.reads) + ch_count_reads_merged = Channel.fromPath( "${params.outdir}/count_reads/*" ).collectFile(name: 'count_reads_after_trimming_merged.txt').view() }