diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml
index f358604..3308ab8 100644
--- a/.github/workflows/python-package-conda.yml
+++ b/.github/workflows/python-package-conda.yml
@@ -7,6 +7,10 @@ jobs:
runs-on: ubuntu-latest
strategy:
max-parallel: 5
+ defaults:
+ run:
+ # Adding -l {0} helps ensure conda can be found properly.
+ shell: bash -l {0}
steps:
- uses: actions/checkout@v4
@@ -14,13 +18,18 @@ jobs:
uses: actions/setup-python@v3
with:
python-version: '3.10'
- - name: Add conda to system path
- run: |
- # $CONDA is an environment variable pointing to the root of the miniconda directory
- echo $CONDA/bin >> $GITHUB_PATH
+ - name: Setup Miniforge
+ uses: conda-incubator/setup-miniconda@v3
+ with:
+ miniforge-version: latest
+ python-version: ${{ matrix.python-version }}
+ environment-file: environment.yml
+ activate-environment: environment.yml
+ run-post: false
- name: Install dependencies
run: |
- conda env update --file environment.yml --name base
+ python -m pip install flake8 pytest coverage pytest-cov
+ pip install .
- name: Lint with flake8
run: |
conda install flake8
@@ -30,5 +39,9 @@ jobs:
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
- conda install pytest
- pytest
+ python -m pytest --cov=turmoric --cov-branch --cov-report=xml
+ - name: Upload coverage reports to Codecov
+ uses: codecov/codecov-action@v4.0.1
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ slug: Nance-Lab/TURMorIC
diff --git a/.gitignore b/.gitignore
index c74d117..5a34bc7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,11 @@
*cache*
*DS_Store*
-*egg-info*
+*egg-info/
+*.egg
+*.coverage
+.hypothesis/
+__pycache__/
+.vscode
+coverage.xml
+*.csv
+*.json
\ No newline at end of file
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 0000000..575f578
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,23 @@
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Set the OS, Python version, and other tools you might need
+build:
+ os: ubuntu-24.04
+ tools:
+ python: "3.13"
+
+# Build documentation in the "docs/" directory with Sphinx
+sphinx:
+ configuration: docs/conf.py
+
+# Optionally, but recommended,
+# declare the Python requirements required to build your documentation
+# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+# python:
+# install:
+# - requirements: docs/requirements.txt
+
diff --git a/README.md b/README.md
index 3866708..eda283c 100644
--- a/README.md
+++ b/README.md
@@ -27,7 +27,7 @@ This directory contains all of the source code for the functions of the TURMorIC
# test Directory
This directory holds all of the test functions for each of the components. This includes functions to test if the image directory exists, cleaning the file and folder paths, adjusting contrast among other preprocessing steps.
_________________________________________________________________
- ##Installation Instructions
+## Installation Instructions
To get started with TURMERIC, follow these steps:
Prerequisites:
diff --git a/docs/CHEME546_final_presentation.pptx b/docs/CHEME546_final_presentation.pptx
deleted file mode 100644
index da778aa..0000000
Binary files a/docs/CHEME546_final_presentation.pptx and /dev/null differ
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..d4bb2cb
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle
new file mode 100644
index 0000000..d0c3d02
Binary files /dev/null and b/docs/_build/doctrees/environment.pickle differ
diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree
new file mode 100644
index 0000000..6c710fe
Binary files /dev/null and b/docs/_build/doctrees/index.doctree differ
diff --git a/docs/_build/doctrees/modules.doctree b/docs/_build/doctrees/modules.doctree
new file mode 100644
index 0000000..f700c87
Binary files /dev/null and b/docs/_build/doctrees/modules.doctree differ
diff --git a/docs/_build/doctrees/turmoric.doctree b/docs/_build/doctrees/turmoric.doctree
new file mode 100644
index 0000000..5ad1b81
Binary files /dev/null and b/docs/_build/doctrees/turmoric.doctree differ
diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo
new file mode 100644
index 0000000..2fa7d40
--- /dev/null
+++ b/docs/_build/html/.buildinfo
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file records the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: acbd4b40445a055688478a090e8bc54d
+tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html
new file mode 100644
index 0000000..1cddef8
--- /dev/null
+++ b/docs/_build/html/_modules/index.html
@@ -0,0 +1,106 @@
+
+
+
+
+
+[docs]
+defapply_li_threshold(file,size):
+"""
+ Applies Li thresholding to all .tif images in the input folder (and subfolders)
+ and saves the binary masks in the output folder.
+
+ Parameters:
+ - input_folder: Path to the folder containing .tif images.
+ - output_folder: Path to save the processed binary masks.
+ - min_object_size: Minimum size of objects to retain in the binary mask.
+ """
+
+ # Read the image
+ im=io.imread(file)
+
+ # Assume the second channel is the microglia channel
+ microglia_im=im[:,:,1]ifim.ndim==3elseim
+
+ # Apply Li threshold
+ thresh_li=filters.threshold_li(microglia_im)
+ binary_li=microglia_im>thresh_li
+
+ # Remove small objects and fill holes
+ binary_li=morphology.remove_small_objects(binary_li,min_size=size)
+ binary_li=ndimage.binary_fill_holes(binary_li)
+
+ returnbinary_li
+
+
+
+
+
+[docs]
+defapply_li_threshold_recursively(input_folder,output_folder='./li_output/',threshold_method='li'):
+ ifnotos.path.isdir(input_folder):
+ print(f"Error: Input folder '{input_folder}' does not exist.")
+ return
+
+ threshold_function=threshold_helper_func(threshold_method)
+
+ # Create output folder if it doesn't exist
+ os.makedirs(output_folder,exist_ok=True)
+
+ # Walk through all files and subfolders
+ forroot,_,filesinos.walk(input_folder):
+ forfileinfiles:
+ iffile.endswith(".tif"):
+ # Full input path
+ input_path=os.path.join(root,file)
+
+ # Create corresponding output subfolder
+ relative_path=os.path.relpath(root,input_folder)
+ output_subfolder=os.path.join(output_folder,relative_path)
+ os.makedirs(output_subfolder,exist_ok=True)
+
+ # Full output path
+ output_path=os.path.join(output_subfolder,file.replace(".tif","_"+threshold_method+"_thresh.npy"))
+ try:
+ binary_image=threshold_function(file)
+ np.save(output_path,binary_image)
+
+ exceptExceptionase:
+ print(f"Error processing {input_path}: {e}")
+
+ print(f"Processing completed. Results are saved in '{output_folder}'.")
+importos
+importshutil
+importrandom
+fromcollectionsimportdefaultdict
+
+"""
+Takes in a directory of images to separate them into training and testing data.
+
+The image directory is passed into the function along with groups like
+brain region or sex as well as treatment conditions of the slices. The
+function then splits the images into a 80:20 training and testing data
+without data leakage. The data is grouped by slice and treatment
+conditions in new training and testing directories.
+
+Parameters:
+ base_dir: The directory of all images to be used for training and testing.
+ groups: The variable groups of the images like brain region or subject sex.
+ treatment_conditions: The treatment applied to the slices.
+
+Returns:
+ train_dir: The directory of training files from an 80:20 split from the base directory.
+ test_dir: The directory of testing files from an 80:20 split from the base directory.
+"""
+# Function to organize files into training and testing folders without slice leakage
+
+[docs]
+deforganize_files_without_leakage(base_dir,train_dir,test_dir,groups,treatment_conditions,test_size=0.2):
+ forgroupingroups:
+ forconditionintreatment_conditions:
+ condition_path=os.path.join(base_dir,group,condition)
+ ifnotos.path.exists(condition_path):
+ continue
+
+ print(f'processing {group}{condition} :)')
+
+ # Group files by brain slice
+ slice_files=defaultdict(list)
+ forfileinos.listdir(condition_path):
+ ifos.path.isfile(os.path.join(condition_path,file)):
+ # Extract slice_id based on the naming pattern
+ slice_id="_".join(file.split("_")[2:3])# Extract the third element (Slice204)
+ slice_files[slice_id].append(file)
+
+ # Split slices into training and testing
+ slice_ids=list(slice_files.keys())
+ random.seed(42)# For reproducibility
+ random.shuffle(slice_ids)
+
+ split_index=int(len(slice_ids)*(1-test_size))
+ train_slices=slice_ids[:split_index]
+ test_slices=slice_ids[split_index:]
+
+ # Create subdirectories for training and testing
+ train_subdir=os.path.join(train_dir,group,condition)
+ test_subdir=os.path.join(test_dir,group,condition)
+ os.makedirs(train_subdir,exist_ok=True)
+ os.makedirs(test_subdir,exist_ok=True)
+
+ # Move files to the appropriate folders
+ forslice_idintrain_slices:
+ forfileinslice_files[slice_id]:
+ shutil.copy(os.path.join(condition_path,file),os.path.join(train_subdir,file))
+
+ forslice_idintest_slices:
+ forfileinslice_files[slice_id]:
+ shutil.copy(os.path.join(condition_path,file),os.path.join(test_subdir,file))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/_build/html/_modules/turmoric/vampire_model.html b/docs/_build/html/_modules/turmoric/vampire_model.html
new file mode 100644
index 0000000..2bac379
--- /dev/null
+++ b/docs/_build/html/_modules/turmoric/vampire_model.html
@@ -0,0 +1,291 @@
+
+
+
+
+
+
+
+ turmoric.vampire_model — TURMorIC 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+[docs]
+classVampireModelTrainer:
+"""
+ A class to handle VAMPIRE model training and application for brain image analysis.
+ """
+
+ def__init__(self,base_path:str,treatments:List[str],groups:List[str]):
+"""
+ Initialize the VAMPIRE model trainer.
+
+ Args:
+ base_path: Base directory path for image data
+ treatments: List of treatment conditions
+ groups: List of experimental groups
+ """
+ self.base_path=Path(base_path)
+ self.treatments=treatments
+ self.groups=groups
+ self.model_path:Optional[Path]=None
+
+ # Validate base path exists
+ ifnotself.base_path.exists():
+ raiseFileNotFoundError(f"Base path does not exist: {self.base_path}")
+
+
+[docs]
+ defextract_features(self,image_set_path:Path)->None:
+"""
+ Extract features from images using VAMPIRE.
+
+ Args:
+ image_set_path: Path to the image dataset
+ """
+ logger.info(f"Extracting features from: {image_set_path}")
+
+ try:
+ vampire.extraction.extract_properties(str(image_set_path))
+ logger.info("Feature extraction completed successfully")
+ exceptExceptionase:
+ logger.error(f"Error during feature extraction: {e}")
+ raise
+
+
+
+[docs]
+ deftrain_model(self,image_set_path:Path,model_name:str='li',
+ num_points:int=50,num_clusters:int=5)->Path:
+"""
+ Train a VAMPIRE model on the extracted features.
+
+ Args:
+ image_set_path: Path to the training image dataset
+ model_name: Name identifier for the model
+ num_points: Number of points for model training
+ num_clusters: Number of clusters for model training
+
+ Returns:
+ Path to the trained model file
+ """
+ logger.info(f"Training VAMPIRE model: {model_name}")
+
+ build_info_df=pd.DataFrame({
+ 'img_set_path':[str(image_set_path)],
+ 'output_path':[str(image_set_path)],
+ 'model_name':[model_name],
+ 'num_points':[num_points],
+ 'num_clusters':[num_clusters],
+ 'num_pc':[np.nan]
+ })
+
+ try:
+ vampire.quickstart.fit_models(build_info_df)
+
+ # Find the generated model file
+ model_pattern=f"model_{model_name}_({num_points}_{num_clusters}_*)__.pickle"
+ model_files=list(image_set_path.glob(model_pattern))
+
+ ifnotmodel_files:
+ raiseFileNotFoundError(f"No model file found matching pattern: {model_pattern}")
+
+ self.model_path=model_files[0]
+ logger.info(f"Model trained successfully: {self.model_path}")
+ returnself.model_path
+
+ exceptExceptionase:
+ logger.error(f"Error during model training: {e}")
+ raise
+
+
+
+[docs]
+ defcreate_apply_dataframe(self,test_base_path:Path,model_path:Path)->pd.DataFrame:
+"""
+ Create a DataFrame for applying the trained model to test datasets.
+
+ Args:
+ test_base_path: Base path for test datasets
+ model_path: Path to the trained model
+
+ Returns:
+ DataFrame with application configuration
+ """
+ apply_data=[]
+
+ forgroupinself.groups:
+ fortreatmentinself.treatments:
+ img_set_path=test_base_path/group/treatment
+
+ # Check if path exists before adding to dataframe
+ ifimg_set_path.exists():
+ apply_data.append({
+ 'img_set_path':str(img_set_path),
+ 'model_path':str(model_path),
+ 'output_path':str(img_set_path),
+ 'img_set_name':treatment
+ })
+ logger.debug(f"Added to apply list: {treatment}")
+ else:
+ logger.warning(f"Path does not exist, skipping: {img_set_path}")
+
+ ifnotapply_data:
+ raiseValueError("No valid test datasets found")
+
+ returnpd.DataFrame(apply_data)
+
+
+
+[docs]
+ defapply_model(self,test_base_path:Path)->None:
+"""
+ Apply the trained model to test datasets.
+
+ Args:
+ test_base_path: Base path for test datasets
+ """
+ ifself.model_pathisNone:
+ raiseValueError("Model must be trained before applying")
+
+ logger.info(f"Applying model to test datasets in: {test_base_path}")
+
+ try:
+ apply_info_df=self.create_apply_dataframe(test_base_path,self.model_path)
+ logger.info(f"Applying model to {len(apply_info_df)} datasets")
+
+ vampire.quickstart.transform_datasets(apply_info_df)
+ logger.info("Model application completed successfully")
+
+ exceptExceptionase:
+ logger.error(f"Error during model application: {e}")
+ raise
+
+
+
+[docs]
+ defrun_full_pipeline(self,training_subpath:str="training/vampire_data",
+ testing_subpath:str="testing/vampire_data")->None:
+"""
+ Run the complete training and application pipeline.
+
+ Args:
+ training_subpath: Relative path to training data
+ testing_subpath: Relative path to testing data
+ """
+ train_path=self.base_path/training_subpath
+ test_path=self.base_path/testing_subpath
+
+ logger.info("Starting VAMPIRE model pipeline")
+ logger.info(f"Training path: {train_path}")
+ logger.info(f"Testing path: {test_path}")
+
+ # Step 1: Extract features
+ self.extract_features(train_path)
+
+ # Step 2: Train model
+ self.train_model(train_path)
+
+ # Step 3: Apply model to test data
+ self.apply_model(test_path)
+
+ logger.info("Pipeline completed successfully")
Applies Li thresholding to all .tif images in the input folder (and subfolders)
+and saves the binary masks in the output folder.
+
Parameters:
+- input_folder: Path to the folder containing .tif images.
+- output_folder: Path to save the processed binary masks.
+- min_object_size: Minimum size of objects to retain in the binary mask.
+
+
+
+
\ No newline at end of file
diff --git a/docs/components_specs.md b/docs/components_specs.md
deleted file mode 100644
index 4701fef..0000000
--- a/docs/components_specs.md
+++ /dev/null
@@ -1,161 +0,0 @@
-# GUI
-### **What it does**
-The **Interactive GUI Component** provides a **graphical interface** that allows users to visualize, adjust, and process images **without coding expertise**. This component is essential for **Maya, Jaden, and Ethan**, who prefer an intuitive, step-by-step workflow.
-[More details on GUI implementation here](gui_details.md)
-
-### **Inputs**
-- **Uploaded raw or preprocessed images**.
-- **Segmentation overlays** from the **Segmentation & Quantification Component**.
-- **User inputs** (sliders, dropdowns, buttons) for fine-tuning thresholding and segmentation settings.
-
-### **Outputs**
-- **Real-time visualization** of segmentation overlays.
-- **Exported user-defined settings**, enabling reproducibility.
-- **Feedback alerts**, notifying users of potential image quality or segmentation issues.
-
-### **How it uses other components**
-- Works with **Adaptive Thresholding Component** to interactively adjust segmentation.
-- Allows real-time refinement of **Segmentation & Quantification Component** outputs.
-
-### **Side effects**
-- **User bias may affect segmentation accuracy** when manually adjusting settings.
-- **Real-time image processing can be slow** for large datasets, requiring optimization.
-
-# **Image Preprocessing Component**
-
-### **What it does**
-The **Image Preprocessing Component** prepares raw microscopy images for segmentation and analysis. It ensures consistency in image format, removes background noise, adjusts contrast, and normalizes variations in staining intensity to facilitate accurate segmentation. Since users like **Maya and Jaden** have limited experience with image processing, this component must function with minimal user input while providing automated enhancements.
-
-### ** Important Parameters and Roles**
-- Splits segmented microglia images into four quadrants for each image in a specified folder.
-- Selects training and testing datasets, organizing images into directories for model building and testing.
-- Colors segmented images based on shape modes with custom colors.
-- Handles image segmentation and labeling using shape modes from CSV.
-- Automates labeling for multiple images based on provided file paths.
-
-### **Inputs**
-- A **folder** containing **raw microscopy images** in **.nd2** format or **a single .nd2 image**.
-- **Metadata**, including **resolution, stain type, exposure time, and microscope settings**, to be factored into preprocessing.
-- **User-defined parameters** (optional), such as **contrast adjustment levels, noise reduction settings, and intensity normalization preferences**.
-
-### **Outputs**
-- **Preprocessed images** optimized for segmentation, saved in a designated output directory.
-- **Log file** documenting preprocessing steps and identifying any issues (e.g., overexposure, poor contrast).
-- **CSV file** to store model-building and application information.
-- **Visual comparisons** allowing users to assess preprocessing improvements before segmentation.
-
-### **How it uses other components**
-- Passes enhanced images to the **Segmentation & Quantification Component** for further processing.
-- Interacts with the **Adaptive Thresholding Component** to refine intensity adjustments dynamically.
-- **Integrates with the Interactive GUI**, providing users with previews and manual adjustment options.
-
-### **Side effects**
-- **Potential alteration of cellular structures** if filters are applied too aggressively.
-- **Increased storage usage** due to the creation of multiple image versions.
-- **Over-processing risk**, possibly introducing artificial enhancements that could skew analysis results.
-- **Renames and organizes images** for VAMPIRE analysis according to a specific naming convention.
-- **Saves the recolored images** at specified output paths.
-- **Sends colored image files** to the GUI
-- **Spits out warning not error** if inputting an image that has already been split asking user if they want to continue and overwrite quad images
-
-
-## **Adaptive Thresholding Component**
-
-### **What it does**
-The **Adaptive Thresholding Component** dynamically adjusts segmentation parameters based on staining intensity variations. This is particularly useful for **Jaden**, who experiences inconsistent thresholding due to variations in staining quality.
-
-### **Important Parameters and Roles**
-- Applies multiple thresholding methods (Li, Otsu, Mean, etc.) to images.
-- Uses a custom function for folder filtering and prepares image lists for threshold testing.
-- Saves the thresholded images and moves them to a dedicated folder.
-- Performs analysis to determine the optimal threshold (Li threshold) for microglia identification based on visual inspection.
-- Removes small objects below a size threshold (based on microglia size) using Sci-kit's morphology functions.
-
-### **Inputs**
-- **Image data** from the **Image Preprocessing Component**.
-- **User-defined or auto-calculated threshold values**, affecting segmentation sensitivity.
-- **Reference datasets** (if available) containing **annotated images** to enhance thresholding accuracy.
-
-### **Outputs**
-- **Optimized segmentation masks**, ensuring consistency across images.
-- **Suggested threshold values**, which users can refine manually.
-- **Heatmaps visualizing staining intensity variations**, aiding in sample consistency analysis.
-
-### **How it uses other components**
-- Works with the **Segmentation & Quantification Component** to enhance segmentation accuracy.
-- Provides **real-time feedback** in the **Interactive GUI**, enabling users to fine-tune thresholding.
-- **Applies to multiple images** using the **Batch Processing Component**, ensuring uniform processing.
-
-### **Side effects**
-- **Overcorrection risks**, where weakly stained structures may be misclassified as background.
-- **Automatically generated values may not always align with user expectations**, requiring manual validation.
-
-## Segmentation & Quantification Component**
-
-### **What it does**
-The **Segmentation & Quantification Component** detects and outlines **microglia and glial cells** within images, extracting morphological features such as **cell shape, size, clustering behavior, and complexity**. This component ensures high segmentation accuracy while remaining user-friendly, especially for **Maya, Amina, and Ethan**, who require automated processing.
-
-### **Important Parameters and Roles**
-- Applies image segmentation using skimage.measure.label and calculates multiple region properties (area, centroid, eccentricity, etc.) for each labeled region.
-- Calculates additional features like circularity and aspect ratio for microglia cells.
-
-### **Inputs**
-- **Preprocessed images** from the **Image Preprocessing Component**.
-- **Segmentation parameters** from the **Adaptive Thresholding Component**
-- **Ground truth data** (if available) for validation and segmentation accuracy improvements.
-
-### **Outputs**
-- **Segmented images** in **.tiff** with labeled cellular structures.
-- **.csv files containing quantitative datasets**, including:
- - **Cell count**
- - **Shape descriptors** (circularity, elongation, convexity)
- - **Clustering and spatial distribution statistics**
-- **Segmentation overlays** that allow users to visually assess analysis results.
-
-### **How it uses other components**
-- Works with **Image Preprocessing Component** to ensure quality segmentation.
-- Uses **Adaptive Thresholding Component** recommendations to refine segmentation.
-- Passes extracted features to the **Data Export API** for storage and computational analysis.
-- **Integrates with the Interactive GUI**, enabling real-time parameter adjustments.
-
-### **Side effects**
-- **Incorrect threshold settings** may result in over-segmentation or under-segmentation.
-- **Processing large images may cause computational delays**, necessitating GPU acceleration.
-- **User modifications in the GUI** could introduce segmentation variability, affecting reproducibility.
-
-## **Batch Processing Component**
-
-### **What it does**
-The **Batch Processing Component** enables researchers to process multiple images simultaneously, ensuring consistency across experiments. This is essential for **Amina and Olivia**, who require large-scale data analysis.
-
-### **Inputs**
-- **Folder of images** requiring batch segmentation.
-- **Predefined segmentation and thresholding parameters** for consistency.
-
-### **Outputs**
-- **Batch-processed segmented images** stored in a structured directory.
-- **Statistical summaries** for large-scale comparative studies.
-
-### **How it uses other components**
-- Uses **Segmentation & Quantification Component** to apply segmentation across multiple images.
-- Works with **Data Export API** for structured results storage.
-
-## **Data Export API Component**
-
-### **What it does**
-The **Data Export API Component** saves analysis results in structured formats (**CSV, JSON**) for computational analysis and machine learning applications. This is essential for **Olivia**, who needs structured data for predictive modeling.
-
-### **Inputs**
-- **Extracted morphological features** from the **Segmentation & Quantification Component**.
-- **User-defined export preferences** (format, fields to include).
-
-### **Outputs**
-- **Structured datasets** (CSV, JSON) ready for statistical or machine learning analysis.
-
-### **How it uses other components**
-- Works with **Batch Processing Component** to export large datasets efficiently.
-- **Supports integration with machine learning pipelines** for advanced analysis.
-
-### **Side effects (moar deets)**
-- **Outputs are formatted correctly** to allow for additional processing.
-- **Incorrectly formatted outputs** may require additional processing.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..d7f1a3d
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,34 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../src/'))
+
+project = 'TURMorIC'
+copyright = '2025, Nels Schimek, Colin Landis'
+author = 'Nels Schimek, Colin Landis'
+release = '0.0.1'
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = []
+
+templates_path = ['_templates']
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = 'sphinx_rtd_theme'
+html_static_path = ['_static']
+
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx_rtd_theme', 'sphinx.ext.viewcode']
diff --git a/docs/exisiting_components.md b/docs/exisiting_components.md
deleted file mode 100644
index dab9e19..0000000
--- a/docs/exisiting_components.md
+++ /dev/null
@@ -1,54 +0,0 @@
-Current components of codebase:
-
-vampire.py:
-- Description: Launches the interface for the VAMPIRE GUI.
-- Important Parameters and Roles:
- - Sets up the graphical user interface for the VAMPIRE tool.
- - Implements functions for user input and interacts with other modules (e.g., loading CSV files, selecting output folders).
- - Controls the workflow through functions such as Model, getcsv, and getdir.
-
----
-mainbody.py:
-- Description: Handles the core VAMPIRE analysis process, including:
- - Registration: Aligns boundaries to remove rotational variance.
- - PCA (Principal Component Analysis): Reduces data dimensions for better analysis.
- - Clustering: Groups data based on similarities.
-- Functions:
- - mainbody: Executes the complete pipeline based on model building or application.
- - collect_seleced_bstack: Collects and prepares the boundary stack for further processing.
----
-collect_selected_bstack.py:
-- Description: Reads the boundaries of cells or nuclei from a CSV file and prepares them for analysis.
-- Functions:
- - collect_seleced_bstack: Gathers boundary data from specified locations (either to build a model or apply an existing one).
----
-bdreg.py:
-- Description: Registers the boundaries of cells or nuclei, ensuring alignment across images.
-- Functions:
- - bdreg: Registers the boundaries by resampling and applying Singular Value Decomposition (SVD).
-________________________________________
-pca_bdreg.py:
-- Description: Applies PCA to registered boundaries to reduce dimensions and capture the most relevant features.
-- Functions:
- - pca_bdreg: Handles the PCA transformation of boundary data.
-________________________________________
-PCA_custom.py:
-- Description: Custom implementation of PCA to process boundary data.
-- Functions:
- - PCA_custom: Returns the transformed data and regenerates the original data from the PCA transformation.
-________________________________________
-clusterSM.py:
-- Description: Applies clustering (K-means) to boundary data after PCA processing to categorize cells or nuclei.
-- Functions:
- - clusterSM: Performs K-means clustering and assigns cluster labels to each cell or nucleus.
-________________________________________
-update_csv.py:
-- Description: Generates and updates the VAMPIRE datasheet with cluster labels after analysis.
-- Functions:
- - update_csv: Writes the results, including cluster assignments, into a CSV file.
-________________________________________
-Helper Functions:
-- getdir: Opens a file dialog to choose a directory.
-- getcsv: Opens a file dialog to select a CSV file.
-- Model: Handles model building or application, depending on the user’s choice.
-- vampire(): Initializes and runs the GUI loop.
\ No newline at end of file
diff --git a/docs/gui_details.md b/docs/gui_details.md
deleted file mode 100644
index a85226a..0000000
--- a/docs/gui_details.md
+++ /dev/null
@@ -1,120 +0,0 @@
-## GUI Breakdown
-### **MainWindow(QWidget):**
-Description:Controls the interface displays and passes commands to the backend
--Connects to CentralNode class to pass commands to backend
--Stores button names, default values, positions, and page orders
-
-Use Case:
--Allows the user to interact with the system easily by providing an intuitive, user-friendly interface with clearly defined buttons and controls.
--Users want an error-free experience, and the GUI automatically handles input validation and error checking to ensure that the system behaves predictably.
--Users desire convenience, and the GUI can remember their settings across different sessions, ensuring they don’t have to re-enter their preferences every time.
-
-Displays controls and images
--Important parameters and roles:
--General_Layout: Creates layout for pages.
--Page_Layout: Creates layout for buttons.
--Page_Stack: Creates a stack of pages.
--Page_Index: Keeps track of the current page.
--Page_Buttons: Keeps track of buttons on the current page.
--Page_Controls: Keeps track of controls on the current page.
--Page_Controls_Layout: Creates layout for controls.
-
-Functions:
--Generate_Page: Generates a page for the GUI which may contain unique buttons or controls.
--Generate_Controls: Generates buttons, dropdowns, and sliders for the GUI.
--Update_Controls: Updates the values of the controls on the GUI and the page.
--Update_Image: Updates the image displayed on the GUI.
--Update_Parameters: Updates the parameters shown on the GUI.
-
-Pages of GUI:
--FirstPage: Ask if you want to upload parameters for a batch process or go through the parameter selection process.
--SecondPageA: Filters and thresholding.
--ThirdPageA: Aligns boundaries to remove rotational variance.
--FourthPageA: Principal Component Analysis (PCA).
--FifthPageA: Clustering and class assignment.
--SecondPageB: Batch process: Selects file location and uploads parameters.
--ThirdPageB: Shows estimated time remaining for processing.
--FinalPage: Displays results and asks if you want to save results and/or parameters.
-_________________________________________________________________
-## **CentralNode(QObject):**
-Description: Controls the backend of the GUI.
--Connects to MainWindow class to receive commands from the GUI.
--Connects to ImageHandler class to receive images from the backend.
--Connects to FunctionHandler class to pass commands to the backend.
--Stores the current state of the GUI and parameters.
--Controls the worker threads for image processing.
-
-Use Case:
--Manages the backend communication between the GUI and the various processing modules (image handling, functions execution) to ensure seamless interaction between user commands and backend processing.
--Users want to easily manage their data and parameters, and the CentralNode ensures that all settings and inputs are tracked across the system.
--Allows the user to work efficiently, as it manages the threading and execution of background tasks without freezing the GUI, allowing continuous interaction with the system.
-
-Important parameters and roles:
--Connects to MainWindow: Receives commands from the MainWindow to manage tasks like image loading, applying filters, or running models.
--Connects to ImageHandler: Facilitates communication between the frontend and the image processing thread.
--Connects to FunctionHandler: Initiates functions based on user input and manages function execution asynchronously.
--Stores current state of parameters: Manages the internal state of the GUI, such as selected filters, image data, and function parameters.
--Manages worker threads: Controls the background workers for image manipulation, model building, and other tasks.
-
-Functions:
--Update_Parameters: Updates parameters on the backend and passes them to the relevant worker thread.
--Update_Image: Emits signals to update the image displayed in the GUI.
--Update_Controls: Updates the backend state of controls based on user interaction.
--File management: Stores and sets file paths for saving parameters and images.
-_______________________________________________________
-## ** ImageHandler(QThread):**
-Description:
--Handles image processing functions and sends processed image data to CentralNode.
--Performs image manipulation tasks in a separate thread, such as applying filters and performing transformations.
--Ensures GUI remains responsive by offloading intensive tasks to the background.
-
-Use Case:
--Allows users to manipulate images without freezing the interface, enabling real-time interaction with the system while complex image processing is happening in the background.
--Users expect quick results, and the ImageHandler ensures that images are processed efficiently in the background without affecting the GUI’s responsiveness.
--Users want the system to be intuitive, and the image processing happens automatically based on the user’s inputs, with no need for them to manage the backend.
-
-Important parameters and roles:
--Connects to CentralNode: Receives image data from the backend and processes it.
--Sends processed image signals: Sends the processed image back to CentralNode to update the GUI display.
-Functions:
--apply_filter_to_image: Applies a filter to an image (e.g., thresholding, Sobel edge detection) based on the user’s selection.
--emit_updated_image: Converts the processed image to a QImage and emits it for display in the GUI.
-____________________________________________________________
-## **FunctionHandler(QThread):**
-Description:
--Handles the execution of background functions (such as PCA or clustering) that do not directly affect the image but modify other data.
--Executes tasks asynchronously to keep the GUI responsive.
-
-Use Case:
--Helps users run computationally heavy functions (like PCA or clustering) without interrupting their workflow, as these tasks are executed in the background.
--Users want to perform complex tasks like dimensionality reduction or data classification without having to wait for the interface to respond, and FunctionHandler enables this by offloading the work to a separate thread.
-
-Important parameters and roles:
--Connects to CentralNode: Receives function-related commands from the GUI and processes them in the background.
--Stores current state of function parameters: Manages the parameters of functions like PCA, clustering, etc.
-
-Functions:
--run: Executes the function in the background, ensuring that the GUI stays responsive.
--select_function: Determines the function to execute based on the current control index (e.g., PCA ).
-__________________________________________________________________________
-## **ModelHandler(QThread):**
-Description:
--Handles the process of building or applying a model in a background thread to avoid blocking the GUI.
--Works with FunctionHandler to execute model-related operations such as clustering or PCA.
-
-Use Case:
--Helps users build or apply models without freezing the UI, allowing them to perform complex modeling tasks (like clustering) while continuing to interact with other parts of the application.
--Users expect models to be built quickly and without errors, and the ModelHandler ensures the task is performed reliably and in the background, so the user doesn’t need to wait.
-
-Important parameters and roles:
--Connects to CentralNode: Sends progress updates and status changes during model building or application.
--Uses output path and clusters: Handles output paths and the number of clusters for certain models.
-
-Functions:
--run: Starts the model-building or application process in a separate thread.
--mainbody: Contains core logic for the model-building or applying process, including progress updates and status messaging.
--update_status: Sends status updates (e.g., "Modeling initiated…", "Modeling completed") to the GUI.
--update_progress: Sends percentage-based progress updates to indicate task completion.
--apply_model: Applies the model to the data if build_model is False and updates the GUI accordingly.
-
-
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..0e9a393
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,19 @@
+.. TURMorIC documentation master file, created by
+ sphinx-quickstart on Fri Aug 29 14:00:51 2025.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+TURMorIC documentation
+======================
+
+Add your content using ``reStructuredText`` syntax. See the
+`reStructuredText `_
+documentation for details.
+
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents:
+
+ turmoric
+
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..32bb245
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/modules.rst b/docs/modules.rst
new file mode 100644
index 0000000..e140e35
--- /dev/null
+++ b/docs/modules.rst
@@ -0,0 +1,7 @@
+turmoric
+========
+
+.. toctree::
+ :maxdepth: 4
+
+ turmoric
diff --git a/src/turmoric/image_processing_functions/.gitkeep b/docs/requirements.txt
similarity index 100%
rename from src/turmoric/image_processing_functions/.gitkeep
rename to docs/requirements.txt
diff --git a/docs/turmoric.rst b/docs/turmoric.rst
new file mode 100644
index 0000000..e2abf2b
--- /dev/null
+++ b/docs/turmoric.rst
@@ -0,0 +1,61 @@
+turmoric package
+================
+
+Submodules
+----------
+
+turmoric.apply\_thresholds module
+---------------------------------
+
+.. automodule:: turmoric.apply_thresholds
+ :members:
+ :show-inheritance:
+ :undoc-members:
+
+turmoric.cell\_analysis module
+------------------------------
+
+.. automodule:: turmoric.cell_analysis
+ :members:
+ :show-inheritance:
+ :undoc-members:
+
+turmoric.image\_process module
+------------------------------
+
+.. automodule:: turmoric.image_process
+ :members:
+ :show-inheritance:
+ :undoc-members:
+
+turmoric.main module
+--------------------
+
+.. automodule:: turmoric.main
+ :members:
+ :show-inheritance:
+ :undoc-members:
+
+turmoric.utils module
+---------------------
+
+.. automodule:: turmoric.utils
+ :members:
+ :show-inheritance:
+ :undoc-members:
+
+turmoric.vampire\_model module
+------------------------------
+
+.. automodule:: turmoric.vampire_model
+ :members:
+ :show-inheritance:
+ :undoc-members:
+
+Module contents
+---------------
+
+.. automodule:: turmoric
+ :members:
+ :show-inheritance:
+ :undoc-members:
diff --git a/docs/use_cases.md b/docs/use_cases.md
deleted file mode 100644
index 60707d6..0000000
--- a/docs/use_cases.md
+++ /dev/null
@@ -1,58 +0,0 @@
-## Colin
-_Based off of Krista's user stories_
-#### 1. For users like Dana, they want to use the image thresholding based on shape, but with a simple UI where they would not need to fine tune the code once it is set up. The software needs to be high throughput or at least be able to rapidly return analysis.
-
-#### 2. For users like Emma, they need an image processing pipeline that is effective for various conditions and in different brain environments. For use at home, it needs to have enough open access that a VPN would allow users to connect remotely.
-
-#### 3. For users like Bella, they need a software tool that rapidly diagnoses patients with minimal error. The tool would need to be able to segment more than just brain cells for tumors. A simple UI or GUI would be necessary for their skill level.
-
-#### 4. For users like Edward, they need a software tool that retains the original image or image regions for delivering feedback while also different brain cell types. An intuitive UI would be necessary for calling functions with the image segmenting/thresholding.
-
-#### 5. For users like Jacob, they need a database of example brain slices to practice with. Without coding experience, a simple UI or GUI with tunable parameters for image segmentation. The UI should also provide some sort of tutorial for them to follow and learn some of the coding and parameters.
-
-## Heather
-_Based off of Muna's user stories_
-#### 1. Maya needs an interface that is easy to use for someone with a limited backround in imaging which would mean that the interface should be intuitive and easy to navigate, promt the user to do the necesarry steps in order and offer explanations for each step.
-
-#### 2. Jaden wants a program that is able to automatically adjust for attentuation and differences in staining penetration for automatic thresholding. While ideally this would entail a ground truth image with perfect staining or well anotated images and machine learning to correct for these differences another approach (since we may not have that kind of data) may be to have parameters adjustable for the user by sliders and then manual correction if needed.
-
-#### 3. Amina wants to be able to save parameters and apply them to a folder of images in batches.
-
-#### 4. Ethan wants an interface that segments images and is easy to use.
-
-#### 5. Oliva wants a program that will output quantitave results to a csv file for later analyses.
-## Krista
-_Based off of Colin's user stories_
-#### 1. Jalen's main priority for this code is learning. For users like Jalen, their main use case would be accessing, downloading, and reading the source code and documentation.
-
-#### 2. Devonta wants to be able to use this package to present quality data. He would be inputting pictures into the program and would need a visual output of the results so that he can put them in his presentation. He would also need to be able to easily access statistical metrics such as error.
-
-#### 3. Saquon wants to use the program to specifically probe the cell morphology of brain cell types. This program should be able to differentiate between different cell types and report quantifiable cell morphology metrics such as symmetry, size, circularity, etc.
-
-#### 4. Cooper is hoping to use this program in his PhD studies and to improve his data science skills. For Cooper, this program would need to allow him to input and test different values for various parameters as he explores a potential hypothesis for his PhD. This package needs understandable documentation so that he can understand how his changes might affect his results. The source code also needs to be accessible and downloadable so that Cooper can download and read the code to better his understanding.
-
-#### 5. Darius wants to use this program to segment and analyze images of cells in other biological contexts. This means that the program should be robust. The imaging techniques and metrics should not be dependent on brain contexts. At the least, the program should allow Darius to modify parameters to tailor it for other specific biological contexts. With basic coding knowledge, Darius should be able to tweak these parameters directly from the command line or using a UI so that he himself does not need to download and modify the source code.
-
-## Muna
-_Based off of Heather’s user stories_
-#### 1. Ally needs an intuitive software interface that does not require coding expertise but allows them to visualize and analyze astrocyte structures. The tool should provide clear, step-by-step guidance and visual outputs to help them understand how astrocytes respond to polycarboxybetaine methacrylate hydrogel nanoparticles. The interface should focus on accessibility, enabling users with minimal programming experience to explore cellular structural changes with ease.
-
-#### 2. Basil requires a software tool that offers extensive customization for image filtering and processing parameters. Since he is proficient in C++ and interested in high-quality visual outputs, the software should allow fine-tuning of segmentation and filtering settings through an adjustable parameter interface. It should also support advanced visualization tools to ensure optimal image quality.
-
-#### 3. Camron needs a software tool that can apply machine learning techniques to analyze cellular interactions with nanoparticles. The tool should provide automated quantification of cell responses and structural changes, with an easy-to-use interface that enables non-expert users to navigate and interpret results efficiently. It should also support both Python and R integration for advanced statistical analyses.
-
-#### 4. Daria seeks a robust analytical tool for characterizing glial subpopulations and their structural responses to specific biochemical treatments. The software should allow in-depth morphological analysis with quantitative output metrics. Given her proficiency in Python, it should include an API for scripting and automation while also offering a graphical interface for initial explorations.
-
-#### 5. Eli requires a user-friendly software tool that provides structural characterization of glial cells in response to lipid nanoparticle treatments. The tool must prioritize ease of use with a well-designed GUI, enabling users unfamiliar with Python to perform image analysis confidently. Additionally, it should provide clear visual feedback to assess whether her nanoparticles induce neuroinflammation.
-
-## Sergi
-
-#### 1. Tania wants to incorporate this package to improve her research on brain cell segmentation, which has so far only been performed through visual inspection. To enhance her experience, the software must provide clear instructions on how to input the images and execute the segmentation process. As Tania plans to continue analyzing the results, the software should also offer flexible plotting options to visualize the outcomes effectively. This will allow her to interpret the segmented images for each stain and gain deeper insights into the brain cells under study.
-
-#### 2. Nic wants to obtain information for his thesis on how different melodies of music impact brain cells. Since he has no background in programming nor sciences, the software must be as explicit and user-friendly as possible, allowing him to easily obtain results without dealing with complex computational aspects. It should generate clear, well-structured outputs that can be directly analyzed in his work, ensuring credibility and ease of integration into his study. Additionally, as Nic is already working on his thesis, the software should provide proper citation information for his research.
-
-#### 3. Maria aims to identify and segment oligodendrocytes for her research. However, since the images she obtains from her experiments are not standardized, the software must include preprocessing features such as flexible input handling and filtering options. Additionally, the documentation should be clear and comprehensive, providing detailed instructions on image preprocessing, segmentation parameters, and customization options to ensure ease of use for researchers with varying levels of computational expertise.
-
-#### 4. Alex seeks to study how microglia and other neural cells respond to environmental toxins in controlled settings. She intends to download and modify the code, so the package must be comprehensive and easily accessible. Additionally, clear licensing guidelines should be provided, as she plans to customize and adapt the software for her research.
-
-#### 5. Kristin wants to better understand traumatic brain injuries by incorporating visually aided analysis into her research. Since she produces several brain tissue images every day, the software must be able to efficiently process these image batches to meet her requirements. Additionally, it should include examples of similar analyses, allowing her to compare results and ensure consistency in her work.
diff --git a/docs/user_stories.md b/docs/user_stories.md
deleted file mode 100644
index ccefa6f..0000000
--- a/docs/user_stories.md
+++ /dev/null
@@ -1,82 +0,0 @@
-## Colin
-#### 1.
- Jalen is an undergraduate in the Disease-Directed Engineering Lab at UW. He was very excited to join a lab with baked goods and microscopes. He wants to use the improved python packages and associated functions to fine tune his research as it progresses. He also hopes to understand the underlying code to build his confidence in programming. For his purposes, he wants a lot of markdown and documentation of the different functions inside the package so that he can better study the actual lines of code. He is only familiar with Matlab and C and even those proficiencies are fleeting at best since he has not coded in a couple quarters.
-
-#### 2.
- Devonta is a post doc with years of experience in neuromedicine and programming. He's hoping to develop tools to showcase the strength of academic research and the need for NIH funding. He wants to use the modified and reshaped code to give presentations to others with varying levels of data science and neuromedicine knowledge. He needs the code to be able to produce good quality data with limited error to be able to present it externally. He is a very experienced programmer and data scientist.
-
-#### 3.
- Saquon is a master's student in data science with limited expereience in bio-image processing. He likes how brain images can tell different stories. He wants to use the new packages and tools to be able to understand the cellular morphology of all brain cell types like astrocytes and oligodendrocytes. For the expanded function set, he needs the image thresholding software to be able to parse the morphology of the oligodendrocytes for his thesis and interest in cells. He has experience in coding, but not specifically with the image processing side.
-
-#### 4.
- Cooper is a first-year PhD student in chemical engineering. He is really eager to prove to his PI that he can handle the pressures of grad school. He wants to use the new platform to hone his data science skills and build novel functions to add to the python package for use later in his graduate studies. He needs the new python package to be easily amendable. He would also appreciate a good amount of markdown to be able to parse through it on his own and perform further code development. While he has little experience in coding and brain biology, he is really enthusiastic.
-
-#### 5.
- Darius is a fourth-year PhD student in bioengineering. He does not understand why chemical engineers think they are better than him since he can understand the cellular mechanisms without much thought. He wants to apply the imaging packages to his reseaarch in other biological systems like gut epithelial cells and tumors. He would appreciate documentation on how code was tweaked for each cell type to be able to expand its range and utility. He has basic coding knowledge, but a very strong understanding of cell biology.
-
-## Heather
-#### 1.
- Ally is an undergraduate engineering student who is slightly obsessed with astrocytes and therefore has a lot of questions about them. They have used python once or twice for simple tasks but are not particularly interested in coding or data science but would like to learn about how the structure of different brain cells may be altered in response to polycarboxybetaine methacrylate hydrogel nanoparticles carrying dead CAS9 to dampen the expression of MGMT while delivering TMZ. They would like to have a software that is easy to use and understand and that can help them visualize the structure of astrocytes and how it changes in response to the nanoparticles.
-
-#### 2.
- Basil is a Master's student whose background is in electrical engineering. He has used python for many projects but is more comfortable with C++. He is very much interested in the use of computer vision to analyze the structure of different cells and is a perfectionist when it comes to the quality of his images and visualizations. He would like to have a software where is easy to customize and tweak the filtering parameters to get the best results.
-
-#### 3.
- Camron is a first year PhD student in material science. He has used python for a few projects but is more comfortable with R. He is very interested in the use of machine learning to analyze how different cells interact with nanoparticles and the materials they are made from. To select nanoparticles for further study, he needs to be able to quantify the number of cells that react to his paricles. He needs the software to be able to analyze the structure of the cells and to be able to quantify the changes in the structure of the cells and have an interface that is easy to use.
-
-#### 4.
- Daria is a postdoc in neuroscience. She has used python for many projects and is very comfortable with it. She is very interested in characterizing the structure of different glial subpopulations and how they change in response to glial cell line-derived neurotrophic factor and nitric oxide. She needs the software to be able to analyze the structure of the cells and to be able to quantify the changes in the structure of the cells.
-
-#### 5.
- Eli is a visiting scholar from Taiwan. She has is very knowledgeable and skilled when it comes to synthesizing nanoparticles but is unfamiliar with python. She is planning on using lipid nanoparticles with CD55 mRNA to glial cells to see if she can reverse neuroinflammation. Before proceeding with her project to she wants to make sure that her nanoparticles are not unintentionally inducing inflamation in the brain so she want to look at the structure of the glial cells when she adds her nanoparticles. She needs the software that characterizes the structure of the cells to be easy to use and to have a good user interface.
-
-## Krista
-#### 1.
- Dana is a 5th year PhD student studying antimicrobial resistance. She is performing studies of how different combinations of antibiotics affect cell population growth. She wants to use this image analysis tool to analyze images taken of cell growth on plates to quickly enumerate different cell populations. She has limited experience in Python and would benefit from an intuitive UI.
-
-#### 2.
- Emma is a fourth year PhD student studying the role of microglia in Alzheimer's disease. She wants an image analysis tool that can easily identify microglia and enumerate from brain slice images. She wants this tool to be user friendly and efficient so that she can use this tool from her own laptop at home. She is advanced in Linux commands and Python.
-
-#### 3.
- Bella is a research scientist for a biotech company that wants to imrpove diagnostics. Bella wants to use an image analysis tool segment and identify different immune cell types from images of tissue slices. She needs this tool to be accurate and fast so that she can quickly and correctly diagnose patients. She has no coding experience, and would thus need a UI that provides helpful feedback about the images.
-
-#### 4.
- Edward is an senior undergraduate researcher who just began research in a neuroscience lab. For his project, he is interested in how different disease states in the brain impact the spatial arrangements of cells in the brain. He wants an image analysis tool that can take in large images of entire brain slices and identify different cell types. He would like this tool to give visual feedback of where the cells are being segmented. He is an intermediate in Python, but is not familiar with Python's image analysis packages.
-
-#### 5.
- Jacob is a high school student who is curious about image analysis and neuroscience. He wants access to an image analysis tool that would offer example brain slice images and allow him to play around with different image segmentation parameters to learn how results might differ from different parameter choices. He wants a user-friendly and educational UI that tells him about what each parameter does and offers flexibility in the image types he wants to analyze. Jacob has no programming experience whatsoever.
-
-## Muna
-
-#### 1.
- Maya is a first-year PhD student in bioengineering who has a background in molecular biology but limited experience with image analysis. She is studying how microglia morphology changes after traumatic brain injury and wants a tool that can segment and quantify the complexity of microglial structures. She needs the tool to provide a simple, step-by-step interface with clear visual feedback so she can validate the results without needing extensive coding expertise. She has limited experience with programming and prefers an intuitive GUI that automates most of the image processing steps.
-
-#### 2.
- Jaden is a third-year neuroscience PhD student who has been working with immunohistochemistry images but struggles with inconsistent thresholding results for different cell types. He wants a tool that can automatically adjust its parameters based on staining intensity to help standardize his image analysis across experiments. He needs an intuitive GUI where he can tweak parameters but also rely on default settings for fast processing. He has some experience with ImageJ but is not proficient in coding and prefers a point-and-click interface for thresholding adjustments.
-
-#### 3.
- Amina is a postdoc specializing in neuroimmunology. She wants to compare microglial activation states across multiple conditions and needs a software tool that can batch-process images and provide quantitative metrics on cell shape and clustering. She prefers a tool that offers customizable data outputs and statistical summaries to compare cell morphology across conditions. She is comfortable with Python and prefers an API that integrates with her existing analysis pipeline.
-
-#### 4.
- Ethan is a medical student doing a research rotation in neuropathology. He wants a tool that provides an easy-to-use GUI with built-in presets for different brain cell types, allowing him to quickly analyze microglial morphology without manually adjusting every image. He needs a tool that simplifies the segmentation process while maintaining high accuracy. He has no coding experience and relies on graphical interfaces like ImageJ or Fiji for his research.
-
-#### 5.
- Olivia is a second-year Master's student in computational neuroscience. She is working on building predictive models for neuroinflammation and needs to extract morphological features from microglial images. She needs a tool that can export segmentation results as structured data (e.g., CSV, JSON) for downstream machine learning analysis. She is highly proficient in Python and prefers a scriptable API over a GUI.
-
-## Sergi
-
-#### 1.
-Tania is a second-year Ph.D. student at the Renaud Lab at the UW.She has been researching brain cells, but the wide variety in their morphologies has made it difficult to segment and she is also struggling to find a clear pattern between them.She believes that this tool will help her better differentiate between various cell types, including microglia and oligodendrocytes. She works with two different stains and expects to obtain a digitally segmented and processed image highlighting microglia and oligodendrocytes for each stain. While she doesn't perform computational methods regularly, she has vast experience in the visual analysis of brain cell imaging.
-
-#### 2.
-Nic is a third-year Ph.D. student in the Department of Music.He aims to use this tool to investigate how brain cells respond to exposure to a specific melody over extended periods.To support his research, he has collaborated with the neuroscience department, which has provided him with images of rat brain tissue exposed to various melodies.He expects that the tool will help him gain deeper insights into the brain’s neural responses and interactions with music.As he doesn't have knowledge of programming, he only wants to obtain concise information for his thesis and doesnt want to dive into the computational aspect of it.
-
-#### 3.
-Maria is a second-year Ph.D. student in the Department of Neuroscience.She intends to use this tool to analyze the segmentation of neural cells, specifically oligodendrocytes. However, her images are generated in various formats and with different pixel qualities.She believes the tool will help her identify and quantify the role of microglia in neuroinflammation and neurodegenerative diseases more efficiently. Maria has a strong foundation in neuroscience, and at the same time wants learn about computational techniques necessary for advanced cell segmentation analysis.
-
-#### 4.
-Alex is a second-year Ph.D. student in the Department of Environmental Science.She is investigating how neural cells, such as microglia, respond to toxins in air chambers and workspaces, by using this tool for advanced segmentation and analysis of cell interactions in brain tissue samples.Alex hopes that the tool will provide insights into the neuroimmune response to pollutants and how microglia contribute to brain health under stress. She counts with a solid background in coding and programming and is looking forward to download the code and customize it for her research.
-
-#### 5.
-Kristin is a undergraduate student at the Nance Lab. She is working on living slices of mice brain of different age stages and hoping to better understand how the brain works and how it reacts to injuries, including traumatic brain injuries (TBI). She hopes to gain a better understanding of such injuries from the various brain tissue images she produces daily, while also expecting to find examples of similar samples analyzed using the software. Despite their limited experience with computational methods, Kristin is eager to apply and incorporate this technique into her researcht.
-
diff --git a/environment.yml b/environment.yml
index f630b5a..fd833c4 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,280 +1,17 @@
-name: nosferatu
+name: turmoric
channels:
- conda-forge
-dependencies:
- - numpy>=2.2.2
+dependencies:
+ - numpy
- pandas>=2.2.3
- - scikit-learn>=1.6.1
+ - scikit-learn>=1.6.1
- scikit-image>=0.25.0
- matplotlib>=3.10.1
- - pytest>=8.3.4
- - _libgcc_mutex>=0.1
- - _openmp_mutex>=4.5
- - _r-mutex>=1.0.1
- - anyio>=4.6.2
- - argon2-cffi>=21.3.0
- - argon2-cffi-bindings>=21.2.0
- - astroid>=3.3.8
- - asttokens>=2.0.5
- - async-lru>=2.0.4
- - attrs>=24.3.0
- - autopep8>=2.0.4
- - babel>=2.16.0
- - beautifulsoup4>=4.12.3
- - binutils_impl_linux-64>=2.40
- - binutils_linux-64>=2.40.0
- - blas>=1.1
- - bleach>=6.2.0
- - bottleneck>=1.4.2
- - brotli>=1.0.9
- - brotli-bin>=1.0.9
- - brotli-python>=1.0.9
- - bwidget>=1.10.1
- - bzip2>=1.0.8
- - c-ares>=1.19.1
- - ca-certificates>=2025.2.25
- - cairo>=1.16.0
- - certifi>=2025.1.31
- - cffi>=1.17.1
- - charset-normalizer>=3.3.2
- - colorama>=0.4.6
- - comm>=0.2.1
- - contourpy>=1.3.1
- - curl>=8.11.1
- - cycler>=0.11.0
- - cyrus-sasl>=2.1.28
- - dbus>=1.13.18
- - debugpy>=1.8.11
- - decorator>=5.1.1
- - defusedxml>=0.7.1
- - dill>=0.3.9
- - docstring-to-markdown>=0.15
- - executing>=0.8.3
- - expat>=2.6.4
- - flake8>=7.1.1
- - font-ttf-dejavu-sans-mono>=2.37
- - font-ttf-inconsolata>=3.000
- - font-ttf-source-code-pro>=2.038
- - font-ttf-ubuntu>=0.83
- - fontconfig>=2.14.1
- - fonts-conda-ecosystem>=1
- - fonts-conda-forge>=1
- - fonttools>=4.55.3
- - freetype>=2.12.1
- - fribidi>=1.0.10
- - gcc_impl_linux-64>=11.2.0
- - gcc_linux-64>=11.2.0
- - gfortran_impl_linux-64>=11.2.0
- - gfortran_linux-64>=11.2.0
- - glib>=2.78.4
- - glib-tools>=2.78.4
- - graphite2>=1.3.14
- - gst-plugins-base>=1.14.1
- - gstreamer>=1.14.1
- - gxx_impl_linux-64>=11.2.0
- - gxx_linux-64>=11.2.0
- - h11>=0.14.0
- - harfbuzz>=4.3.0
- - httpcore>=1.0.2
- - httpx>=0.27.0
- - icu>=73.1
- - idna>=3.7
- - importlib-metadata>=8.5.0
- - ipykernel>=6.29.5
- - ipython>=8.30.0
- - ipywidgets>=8.1.5
- - isort>=5.13.2
- - jedi>=0.19.2
- - jinja2>=3.1.6
- - jpeg>=9e
- - json5>=0.9.25
- - jsonschema>=4.23.0
- - jsonschema-specifications>=2023.7.1
- - jupyter>=1.1.1
- - jupyter-lsp>=2.2.0
- - jupyter_client>=8.6.3
- - jupyter_console>=6.6.3
- - jupyter_core>=5.7.2
- - jupyter_events>=0.12.0
- - jupyter_server>=2.15.0
- - jupyter_server_terminals>=0.4.4
- - jupyterlab>=4.3.4
- - jupyterlab_pygments>=0.1.2
- - jupyterlab_server>=2.27.3
- - jupyterlab_widgets>=3.0.13
- - kernel-headers_linux-64>=3.10.0
- - kiwisolver>=1.4.8
- - krb5>=1.20.1
- - lcms2>=2.16
- - ld_impl_linux-64>=2.40
- - lerc>=4.0.0
- - libabseil>=20240116.2
- - libbrotlicommon>=1.0.9
- - libbrotlidec>=1.0.9
- - libbrotlienc>=1.0.9
- - libclang>=14.0.6
- - libclang13>=14.0.6
- - libcups>=2.4.2
- - libcurl>=8.11.1
- - libdeflate>=1.22
- - libedit>=3.1.20230828
- - libev>=4.33
- - libffi>=3.4.4
- - libgcc>=14.2.0
- - libgcc-devel_linux-64>=11.2.0
- - libgcc-ng>=14.2.0
- - libgfortran>=14.2.0
- - libgfortran-ng>=14.2.0
- - libgfortran5>=14.2.0
- - libglib>=2.78.4
- - libgomp>=14.2.0
- - libiconv>=1.16
- - libllvm14>=14.0.6
- - libnghttp2>=1.57.0
- - libopenblas>=0.3.28
- - libpng>=1.6.39
- - libpq>=17.2
- - libprotobuf>=4.25.3
- - libsodium>=1.0.18
- - libssh2>=1.11.1
- - libstdcxx>=14.2.0
- - libstdcxx-devel_linux-64>=11.2.0
- - libstdcxx-ng>=11.2.0
- - libtiff>=4.5.1
- - libuuid>=1.41.5
- - libwebp-base>=1.5.0
- - libxcb>=1.15
- - libxkbcommon>=1.0.1
- - libxml2>=2.13.5
- - libxslt>=1.1.41
- - lz4-c>=1.9.4
- - make>=4.4.1
- - markupsafe>=3.0.2
- - matplotlib>=3.10.1
- - matplotlib-base>=3.10.1
- - matplotlib-inline>=0.1.6
- - mccabe>=0.7.0
- - minizip>=4.0.3
- - mistune>=3.1.2
- - mysql>=8.4.0
- - nbclient>=0.10.2
- - nbconvert>=7.16.6
- - nbconvert-core>=7.16.6
- - nbconvert-pandoc>=7.16.6
- - nbformat>=5.10.4
- - ncurses>=6.4
- - nest-asyncio>=1.6.0
- - notebook>=7.3.2
- - notebook-shim>=0.2.4
- - nspr>=4.35
- - nss>=3.89.1
- - numexpr>=2.10.1
- - numpy-base>=2.0.1
- - openblas>=0.3.28
- - openjpeg>=2.5.2
- - openldap>=2.6.4
- - openssl>=3.4.0
- - overrides>=7.4.0
- - packaging>=24.2
- - pandoc>=2.12
- - pandocfilters>=1.5.0
- - pango>=1.50.9
- - paramiko>=2.11.0
- - parso>=0.9.0
- - pathos>=0.2.5
- - pexpect>=4.8.0
- - pickleshare>=0.7.5
- - pillow>=9.5.0
- pip>=23.1.2
- - plotly>=5.10.0
- - psutil>=5.9.5
- - pureeval>=0.2.2
- - py4j>=0.10.10.6
- - pyasn1>=0.5.0
- - pycairo>=1.23.0
- - pycosat>=0.6.3
- - pycparser>=2.21
- - pydantic>=2.6.0
- - pydeck>=0.8.0
- - pydot>=1.5.0
- - pygraphviz>=1.8
- - pyjwt>=2.8.0
- - pyopenssl>=23.1.1
- - pyparsing>=3.0.9
- - pyrsistent>=0.19.3
- - pysocks>=1.7.1
- - pytest>=7.2.2
- - python>=3.11
- - python_abi>=3.11
- - pytorch>=2.1.0
- - pytorch-cuda>=2.1.0
- - pyyaml>=6.0
- - pyzmq>=25.1.0
- - qt>=5.15.9
- - qt5-sqldriver>=5.15.9
- - qtbase>=5.15.9
- - qtcharts>=5.15.9
- - qtconnectivity>=5.15.9
- - qtdoc>=5.15.9
- - qtgraphicaleffects>=5.15.9
- - qtlocation>=5.15.9
- - qtmultimedia>=5.15.9
- - qtscript>=5.15.9
- - qtsensors>=5.15.9
- - qtserialport>=5.15.9
- - qttest>=5.15.9
- - qtwebsockets>=5.15.9
- - qtwebview>=5.15.9
- - r-base>=4.2.2
- - r-biocmanager>=1.30.17
- - r-curl>=5.0.0
- - r-digest>=0.6.31
- - r-ggplot2>=3.4.1
- - r-knitr>=1.41
- - r-matrix>=1.5.3
- - r-rcpp>=1.0.9
- - r-reshape2>=1.4.4
- - r-stringr>=1.5.0
- - r-tidyverse>=2.0.0
- - r-viridis>=0.6.0
- - readline>=8.2
- - regex>=2023.4.23
- - requests>=2.31.0
- - requests-toolbelt>=0.10.1
- scipy>=1.11.1
- seaborn>=0.12.2
- setuptools>=65.5.0
- - sip>=4.19.24
- - six>=1.16.0
- - sniffio>=1.3.0
- - snowballstemmer>=2.2.0
- - sortedcontainers>=2.4.0
- - sphinx>=6.2.2
- - sphinxcontrib-websupport>=1.2.4
- - sqlite>=3.41.2
- - sympy>=1.11.1
- - tbb>=2021.9.0
- - tensorboard>=2.13.0
- - tensorflow>=2.13.0
- - terminado>=0.15.0
- - testpath>=0.6.0
- - tornado>=6.2
- - tqdm>=4.66.0
- - traitlets>=5.9.0
- - typing_extensions>=4.6.3
- - urllib3>=1.26.15
- - vega_datasets>=0.10.1
- - werkzeug>=2.3.6
- - wheel>=0.41.1
- - xz>=5.2.11
- - zlib>=1.2.12
- - zstd>=1.5.5
+ - tifffile>=2025.3.13
- pip:
- - imageio>=2.37.0
- - lazy-loader>=0.4
- - networkx>=3.4.2
- - opencv-python>=4.11.0.86
- - scikit-image>=0.25.0
- - tifffile>=2025.3.13
- - pyqt6
- - opencv-python
+ - opencv-python>=4.11
+ - nd2
diff --git a/example_dataset/CellProfiler segmentation pipeline.cppipe b/example_dataset/CellProfiler segmentation pipeline.cppipe
deleted file mode 100644
index 4018699..0000000
--- a/example_dataset/CellProfiler segmentation pipeline.cppipe
+++ /dev/null
@@ -1,149 +0,0 @@
-CellProfiler Pipeline: http://www.cellprofiler.org
-Version:4
-DateRevision:319
-GitHash:
-ModuleCount:9
-HasImagePlaneDetails:False
-
-Images:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:2|show_window:False|notes:\x5B\'To begin creating your project, use the Images module to compile a list of files and/or folders that you want to analyze. You can also specify a set of rules to include only the desired files in your selected folders.\'\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- :\xff\xfe
- Filter images?:\xff\xfeI\x00m\x00a\x00g\x00e\x00s\x00 \x00o\x00n\x00l\x00y\x00
- Select the rule criteria:\xff\xfea\x00n\x00d\x00 \x00(\x00e\x00x\x00t\x00e\x00n\x00s\x00i\x00o\x00n\x00 \x00d\x00o\x00e\x00s\x00 \x00i\x00s\x00i\x00m\x00a\x00g\x00e\x00)\x00 \x00(\x00d\x00i\x00r\x00e\x00c\x00t\x00o\x00r\x00y\x00 \x00d\x00o\x00e\x00s\x00n\x00o\x00t\x00 \x00c\x00o\x00n\x00t\x00a\x00i\x00n\x00r\x00e\x00g\x00e\x00x\x00p\x00 \x00"\x00\x5B\x00\\\x00\\\x00\\\x00\\\x00/\x00\x5D\x00\\\x00\\\x00.\x00"\x00)\x00
-
-Metadata:[module_num:2|svn_version:\'Unknown\'|variable_revision_number:5|show_window:False|notes:\x5B\'The Metadata module optionally allows you to extract information describing your images (i.e, metadata) which will be stored along with your measurements. This information can be contained in the file name and/or location, (?P\x5B^\\\\\\\\\x5D*)(?P\x5B^\\\\\\\\\x5D*)or in an external file.\'\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Extract metadata?:\xff\xfeY\x00e\x00s\x00
- Metadata data type:\xff\xfeT\x00e\x00x\x00t\x00
- Metadata types:\xff\xfe{\x00}\x00
- Extraction method count:\xff\xfe2\x00
- Metadata extraction method:\xff\xfeE\x00x\x00t\x00r\x00a\x00c\x00t\x00 \x00f\x00r\x00o\x00m\x00 \x00f\x00i\x00l\x00e\x00/\x00f\x00o\x00l\x00d\x00e\x00r\x00 \x00n\x00a\x00m\x00e\x00s\x00
- Metadata source:\xff\xfeF\x00i\x00l\x00e\x00 \x00n\x00a\x00m\x00e\x00
- Regular expression to extract from file name:\xff\xfe(\x00?\x00P\x00<\x00I\x00m\x00a\x00g\x00e\x00I\x00D\x00>\x00\x5B\x00^\x00\\\x00\\\x00\x5D\x00*\x00)\x00
- Regular expression to extract from folder name:\xff\xfe(\x00?\x00P\x00<\x00D\x00a\x00t\x00e\x00>\x00\x5B\x000\x00-\x009\x00\x5D\x00{\x004\x00}\x00_\x00\x5B\x000\x00-\x009\x00\x5D\x00{\x002\x00}\x00_\x00\x5B\x000\x00-\x009\x00\x5D\x00{\x002\x00}\x00)\x00$\x00
- Extract metadata from:\xff\xfeA\x00l\x00l\x00 \x00i\x00m\x00a\x00g\x00e\x00s\x00
- Select the filtering criteria:\xff\xfea\x00n\x00d\x00 \x00(\x00f\x00i\x00l\x00e\x00 \x00d\x00o\x00e\x00s\x00 \x00c\x00o\x00n\x00t\x00a\x00i\x00n\x00 \x00"\x00c\x001\x00"\x00)\x00
- Metadata file location:\xff\xfeE\x00l\x00s\x00e\x00w\x00h\x00e\x00r\x00e\x00.\x00.\x00.\x00\x7C\x00
- Match file and image metadata:\xff\xfe\x5B\x00\x5D\x00
- Use case insensitive matching?:\xff\xfeN\x00o\x00
- Metadata file name:\xff\xfe
- Metadata extraction method:\xff\xfeE\x00x\x00t\x00r\x00a\x00c\x00t\x00 \x00f\x00r\x00o\x00m\x00 \x00f\x00i\x00l\x00e\x00/\x00f\x00o\x00l\x00d\x00e\x00r\x00 \x00n\x00a\x00m\x00e\x00s\x00
- Metadata source:\xff\xfeF\x00o\x00l\x00d\x00e\x00r\x00 \x00n\x00a\x00m\x00e\x00
- Regular expression to extract from file name:\xff\xfe^\x00(\x00?\x00P\x00<\x00P\x00l\x00a\x00t\x00e\x00>\x00.\x00*\x00)\x00_\x00(\x00?\x00P\x00<\x00W\x00e\x00l\x00l\x00>\x00\x5B\x00A\x00-\x00P\x00\x5D\x00\x5B\x000\x00-\x009\x00\x5D\x00{\x002\x00}\x00)\x00_\x00s\x00(\x00?\x00P\x00<\x00S\x00i\x00t\x00e\x00>\x00\x5B\x000\x00-\x009\x00\x5D\x00)\x00_\x00w\x00(\x00?\x00P\x00<\x00C\x00h\x00a\x00n\x00n\x00e\x00l\x00N\x00u\x00m\x00b\x00e\x00r\x00>\x00\x5B\x000\x00-\x009\x00\x5D\x00)\x00
- Regular expression to extract from folder name:\xff\xfe(\x00?\x00P\x00<\x00S\x00e\x00t\x00I\x00D\x00>\x00\x5B\x00^\x00\\\x00\\\x00\x5D\x00*\x00$\x00)\x00
- Extract metadata from:\xff\xfeA\x00l\x00l\x00 \x00i\x00m\x00a\x00g\x00e\x00s\x00
- Select the filtering criteria:\xff\xfea\x00n\x00d\x00 \x00(\x00f\x00i\x00l\x00e\x00 \x00d\x00o\x00e\x00s\x00 \x00c\x00o\x00n\x00t\x00a\x00i\x00n\x00 \x00"\x00"\x00)\x00
- Metadata file location:\xff\xfeE\x00l\x00s\x00e\x00w\x00h\x00e\x00r\x00e\x00.\x00.\x00.\x00\x7C\x00
- Match file and image metadata:\xff\xfe\x5B\x00\x5D\x00
- Use case insensitive matching?:\xff\xfeN\x00o\x00
- Metadata file name:\xff\xfe
-
-NamesAndTypes:[module_num:3|svn_version:\'Unknown\'|variable_revision_number:8|show_window:False|notes:\x5B\'The NamesAndTypes module allows you to assign a meaningful name to each image by which other modules will refer to it.\'\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Assign a name to:\xff\xfeA\x00l\x00l\x00 \x00i\x00m\x00a\x00g\x00e\x00s\x00
- Select the image type:\xff\xfeG\x00r\x00a\x00y\x00s\x00c\x00a\x00l\x00e\x00 \x00i\x00m\x00a\x00g\x00e\x00
- Name to assign these images:\xff\xfec\x001\x00
- Match metadata:\xff\xfe\x5B\x00\x5D\x00
- Image set matching method:\xff\xfeO\x00r\x00d\x00e\x00r\x00
- Set intensity range from:\xff\xfeI\x00m\x00a\x00g\x00e\x00 \x00m\x00e\x00t\x00a\x00d\x00a\x00t\x00a\x00
- Assignments count:\xff\xfe1\x00
- Single images count:\xff\xfe0\x00
- Maximum intensity:\xff\xfe2\x005\x005\x00.\x000\x00
- Process as 3D?:\xff\xfeN\x00o\x00
- Relative pixel spacing in X:\xff\xfe1\x00.\x000\x00
- Relative pixel spacing in Y:\xff\xfe1\x00.\x000\x00
- Relative pixel spacing in Z:\xff\xfe1\x00.\x000\x00
- Select the rule criteria:\xff\xfea\x00n\x00d\x00 \x00(\x00m\x00e\x00t\x00a\x00d\x00a\x00t\x00a\x00 \x00d\x00o\x00e\x00s\x00 \x00C\x00h\x00a\x00n\x00n\x00e\x00l\x00N\x00u\x00m\x00b\x00e\x00r\x00 \x00"\x00c\x001\x00"\x00)\x00
- Name to assign these images:\xff\xfeA\x00c\x00t\x00i\x00n\x00
- Name to assign these objects:\xff\xfeC\x00e\x00l\x00l\x00
- Select the image type:\xff\xfeG\x00r\x00a\x00y\x00s\x00c\x00a\x00l\x00e\x00 \x00i\x00m\x00a\x00g\x00e\x00
- Set intensity range from:\xff\xfeI\x00m\x00a\x00g\x00e\x00 \x00m\x00e\x00t\x00a\x00d\x00a\x00t\x00a\x00
- Maximum intensity:\xff\xfe2\x005\x005\x00.\x000\x00
-
-Groups:[module_num:4|svn_version:\'Unknown\'|variable_revision_number:2|show_window:False|notes:\x5B\'The Groups module optionally allows you to split your list of images into image subsets (groups) which will be processed independently of each other. Examples of groupings include screening batches, microtiter plates, time-lapse movies, etc.\'\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Do you want to group your images?:\xff\xfeY\x00e\x00s\x00
- grouping metadata count:\xff\xfe1\x00
- Metadata category:\xff\xfeS\x00e\x00t\x00I\x00D\x00
-
-IdentifyPrimaryObjects:[module_num:5|svn_version:\'Unknown\'|variable_revision_number:13|show_window:False|notes:\x5B\'Identify the nuclei from the DAPI image. Three-class thresholding performs better than the default two-class thresholding in this case.\'\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Select the input image:\xff\xfec\x001\x00
- Name the primary objects to be identified:\xff\xfec\x001\x00
- Typical diameter of objects, in pixel units (Min,Max):\xff\xfe5\x000\x00,\x002\x000\x000\x00
- Discard objects outside the diameter range?:\xff\xfeY\x00e\x00s\x00
- Discard objects touching the border of the image?:\xff\xfeY\x00e\x00s\x00
- Method to distinguish clumped objects:\xff\xfeI\x00n\x00t\x00e\x00n\x00s\x00i\x00t\x00y\x00
- Method to draw dividing lines between clumped objects:\xff\xfeS\x00h\x00a\x00p\x00e\x00
- Size of smoothing filter:\xff\xfe1\x000\x00
- Suppress local maxima that are closer than this minimum allowed distance:\xff\xfe5\x00
- Speed up by using lower-resolution image to find local maxima?:\xff\xfeY\x00e\x00s\x00
- Fill holes in identified objects?:\xff\xfeA\x00f\x00t\x00e\x00r\x00 \x00b\x00o\x00t\x00h\x00 \x00t\x00h\x00r\x00e\x00s\x00h\x00o\x00l\x00d\x00i\x00n\x00g\x00 \x00a\x00n\x00d\x00 \x00d\x00e\x00c\x00l\x00u\x00m\x00p\x00i\x00n\x00g\x00
- Automatically calculate size of smoothing filter for declumping?:\xff\xfeY\x00e\x00s\x00
- Automatically calculate minimum allowed distance between local maxima?:\xff\xfeY\x00e\x00s\x00
- Handling of objects if excessive number of objects identified:\xff\xfeC\x00o\x00n\x00t\x00i\x00n\x00u\x00e\x00
- Maximum number of objects:\xff\xfe5\x000\x000\x00
- Use advanced settings?:\xff\xfeY\x00e\x00s\x00
- Threshold setting version:\xff\xfe9\x00
- Threshold strategy:\xff\xfeG\x00l\x00o\x00b\x00a\x00l\x00
- Thresholding method:\xff\xfeO\x00t\x00s\x00u\x00
- Threshold smoothing scale:\xff\xfe1\x00.\x003\x004\x008\x008\x00
- Threshold correction factor:\xff\xfe1\x00.\x000\x00
- Lower and upper bounds on threshold:\xff\xfe0\x00,\x001\x00
- Manual threshold:\xff\xfe0\x00.\x000\x00
- Select the measurement to threshold with:\xff\xfeN\x00o\x00n\x00e\x00
- Two-class or three-class thresholding?:\xff\xfeT\x00w\x00o\x00 \x00c\x00l\x00a\x00s\x00s\x00e\x00s\x00
- Assign pixels in the middle intensity class to the foreground or the background?:\xff\xfeB\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00
- Size of adaptive window:\xff\xfe1\x000\x00
- Lower outlier fraction:\xff\xfe0\x00.\x000\x005\x00
- Upper outlier fraction:\xff\xfe0\x00.\x000\x005\x00
- Averaging method:\xff\xfeM\x00e\x00a\x00n\x00
- Variance method:\xff\xfeS\x00t\x00a\x00n\x00d\x00a\x00r\x00d\x00 \x00d\x00e\x00v\x00i\x00a\x00t\x00i\x00o\x00n\x00
- # of deviations:\xff\xfe2\x00
- Thresholding method:\xff\xfeO\x00t\x00s\x00u\x00
-
-MeasureObjectSizeShape:[module_num:6|svn_version:\'Unknown\'|variable_revision_number:1|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Select objects to measure:\xff\xfec\x001\x00
- Calculate the Zernike features?:\xff\xfeN\x00o\x00
-
-ConvertObjectsToImage:[module_num:7|svn_version:\'Unknown\'|variable_revision_number:1|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Select the input objects:\xff\xfec\x001\x00
- Name the output image:\xff\xfeS\x00e\x00g\x00m\x00e\x00n\x00t\x00e\x00d\x00_\x00c\x00e\x00l\x00l\x00
- Select the color format:\xff\xfeG\x00r\x00a\x00y\x00s\x00c\x00a\x00l\x00e\x00
- Select the colormap:\xff\xfeD\x00e\x00f\x00a\x00u\x00l\x00t\x00
-
-SaveImages:[module_num:8|svn_version:\'Unknown\'|variable_revision_number:13|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Select the type of image to save:\xff\xfeI\x00m\x00a\x00g\x00e\x00
- Select the image to save:\xff\xfeS\x00e\x00g\x00m\x00e\x00n\x00t\x00e\x00d\x00_\x00c\x00e\x00l\x00l\x00
- Select method for constructing file names:\xff\xfeF\x00r\x00o\x00m\x00 \x00i\x00m\x00a\x00g\x00e\x00 \x00f\x00i\x00l\x00e\x00n\x00a\x00m\x00e\x00
- Select image name for file prefix:\xff\xfec\x001\x00
- Enter single file name:\xff\xfeO\x00r\x00i\x00g\x00B\x00l\x00u\x00e\x00
- Number of digits:\xff\xfe4\x00
- Append a suffix to the image file name?:\xff\xfeN\x00o\x00
- Text to append to the image name:\xff\xfe
- Saved file format:\xff\xfet\x00i\x00f\x00f\x00
- Output file location:\xff\xfeD\x00e\x00f\x00a\x00u\x00l\x00t\x00 \x00O\x00u\x00t\x00p\x00u\x00t\x00 \x00F\x00o\x00l\x00d\x00e\x00r\x00 \x00s\x00u\x00b\x00-\x00f\x00o\x00l\x00d\x00e\x00r\x00\x7C\x00\\\x00g\x00<\x00S\x00e\x00t\x00I\x00D\x00>\x00
- Image bit depth:\xff\xfe1\x006\x00-\x00b\x00i\x00t\x00 \x00i\x00n\x00t\x00e\x00g\x00e\x00r\x00
- Overwrite existing files without warning?:\xff\xfeY\x00e\x00s\x00
- When to save:\xff\xfeE\x00v\x00e\x00r\x00y\x00 \x00c\x00y\x00c\x00l\x00e\x00
- Record the file and path information to the saved image?:\xff\xfeN\x00o\x00
- Create subfolders in the output folder?:\xff\xfeN\x00o\x00
- Base image folder:\xff\xfeE\x00l\x00s\x00e\x00w\x00h\x00e\x00r\x00e\x00.\x00.\x00.\x00\x7C\x00
-
-ExportToSpreadsheet:[module_num:9|svn_version:\'Unknown\'|variable_revision_number:12|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
- Select the column delimiter:\xff\xfeC\x00o\x00m\x00m\x00a\x00 \x00(\x00"\x00,\x00"\x00)\x00
- Add image metadata columns to your object data file?:\xff\xfeN\x00o\x00
- Select the measurements to export:\xff\xfeY\x00e\x00s\x00
- Calculate the per-image mean values for object measurements?:\xff\xfeN\x00o\x00
- Calculate the per-image median values for object measurements?:\xff\xfeN\x00o\x00
- Calculate the per-image standard deviation values for object measurements?:\xff\xfeN\x00o\x00
- Output file location:\xff\xfeD\x00e\x00f\x00a\x00u\x00l\x00t\x00 \x00O\x00u\x00t\x00p\x00u\x00t\x00 \x00F\x00o\x00l\x00d\x00e\x00r\x00 \x00s\x00u\x00b\x00-\x00f\x00o\x00l\x00d\x00e\x00r\x00\x7C\x00\\\x00g\x00<\x00S\x00e\x00t\x00I\x00D\x00>\x00
- Create a GenePattern GCT file?:\xff\xfeN\x00o\x00
- Select source of sample row name:\xff\xfeM\x00e\x00t\x00a\x00d\x00a\x00t\x00a\x00
- Select the image to use as the identifier:\xff\xfeN\x00o\x00n\x00e\x00
- Select the metadata to use as the identifier:\xff\xfeN\x00o\x00n\x00e\x00
- Export all measurement types?:\xff\xfeN\x00o\x00
- Press button to select measurements:\xff\xfec\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00P\x00e\x00r\x00i\x00m\x00e\x00t\x00e\x00r\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00F\x00o\x00r\x00m\x00F\x00a\x00c\x00t\x00o\x00r\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00i\x00n\x00o\x00r\x00A\x00x\x00i\x00s\x00L\x00e\x00n\x00g\x00t\x00h\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00O\x00r\x00i\x00e\x00n\x00t\x00a\x00t\x00i\x00o\x00n\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00A\x00r\x00e\x00a\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00i\x00n\x00F\x00e\x00r\x00e\x00t\x00D\x00i\x00a\x00m\x00e\x00t\x00e\x00r\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00S\x00o\x00l\x00i\x00d\x00i\x00t\x00y\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00a\x00x\x00F\x00e\x00r\x00e\x00t\x00D\x00i\x00a\x00m\x00e\x00t\x00e\x00r\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00E\x00u\x00l\x00e\x00r\x00N\x00u\x00m\x00b\x00e\x00r\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00E\x00c\x00c\x00e\x00n\x00t\x00r\x00i\x00c\x00i\x00t\x00y\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00C\x00o\x00m\x00p\x00a\x00c\x00t\x00n\x00e\x00s\x00s\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00E\x00x\x00t\x00e\x00n\x00t\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00e\x00d\x00i\x00a\x00n\x00R\x00a\x00d\x00i\x00u\x00s\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00a\x00x\x00i\x00m\x00u\x00m\x00R\x00a\x00d\x00i\x00u\x00s\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00e\x00a\x00n\x00R\x00a\x00d\x00i\x00u\x00s\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00M\x00a\x00j\x00o\x00r\x00A\x00x\x00i\x00s\x00L\x00e\x00n\x00g\x00t\x00h\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00C\x00e\x00n\x00t\x00e\x00r\x00_\x00Y\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00C\x00e\x00n\x00t\x00e\x00r\x00_\x00X\x00,\x00c\x001\x00\x7C\x00A\x00r\x00e\x00a\x00S\x00h\x00a\x00p\x00e\x00_\x00C\x00e\x00n\x00t\x00e\x00r\x00_\x00Z\x00,\x00c\x001\x00\x7C\x00N\x00u\x00m\x00b\x00e\x00r\x00_\x00O\x00b\x00j\x00e\x00c\x00t\x00_\x00N\x00u\x00m\x00b\x00e\x00r\x00,\x00c\x001\x00\x7C\x00L\x00o\x00c\x00a\x00t\x00i\x00o\x00n\x00_\x00C\x00e\x00n\x00t\x00e\x00r\x00_\x00Y\x00,\x00c\x001\x00\x7C\x00L\x00o\x00c\x00a\x00t\x00i\x00o\x00n\x00_\x00C\x00e\x00n\x00t\x00e\x00r\x00_\x00X\x00,\x00c\x001\x00\x7C\x00L\x00o\x00c\x00a\x00t\x00i\x00o\x00n\x00_\x00C\x00e\x00n\x00t\x00e\x00r\x00_\x00Z\x00
- Representation of Nan/Inf:\xff\xfeN\x00a\x00N\x00
- Add a prefix to file names?:\xff\xfeY\x00e\x00s\x00
- Filename prefix:\xff\xfeC\x00e\x00l\x00l\x00P\x00r\x00o\x00f\x00i\x00l\x00e\x00r\x00 \x00d\x00a\x00t\x00a\x00s\x00h\x00e\x00e\x00t\x00 \x00
- Overwrite existing files without warning?:\xff\xfeN\x00o\x00
- Data to export:\xff\xfec\x001\x00
- Combine these object measurements with those of the previous object?:\xff\xfeN\x00o\x00
- File name:\xff\xfeD\x00A\x00T\x00A\x00.\x00c\x00s\x00v\x00
- Use the object name for the file name?:\xff\xfeY\x00e\x00s\x00
diff --git a/example_dataset/example_images/MEF_LMNA--/xy001c1.tif b/example_dataset/example_images/MEF_LMNA--/xy001c1.tif
deleted file mode 100644
index b44b424..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy001c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy002c1.tif b/example_dataset/example_images/MEF_LMNA--/xy002c1.tif
deleted file mode 100644
index 9ddfa62..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy002c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy003c1.tif b/example_dataset/example_images/MEF_LMNA--/xy003c1.tif
deleted file mode 100644
index 84439db..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy003c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy004c1.tif b/example_dataset/example_images/MEF_LMNA--/xy004c1.tif
deleted file mode 100644
index 7c37faa..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy004c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy005c1.tif b/example_dataset/example_images/MEF_LMNA--/xy005c1.tif
deleted file mode 100644
index 4548837..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy005c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy006c1.tif b/example_dataset/example_images/MEF_LMNA--/xy006c1.tif
deleted file mode 100644
index 5c6f6e0..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy006c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy007c1.tif b/example_dataset/example_images/MEF_LMNA--/xy007c1.tif
deleted file mode 100644
index 81ae08e..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy007c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy008c1.tif b/example_dataset/example_images/MEF_LMNA--/xy008c1.tif
deleted file mode 100644
index 9acc3b9..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy008c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy009c1.tif b/example_dataset/example_images/MEF_LMNA--/xy009c1.tif
deleted file mode 100644
index 31b5fef..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy009c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy010c1.tif b/example_dataset/example_images/MEF_LMNA--/xy010c1.tif
deleted file mode 100644
index 980bd2a..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy010c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy011c1.tif b/example_dataset/example_images/MEF_LMNA--/xy011c1.tif
deleted file mode 100644
index e631fcd..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy011c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy012c1.tif b/example_dataset/example_images/MEF_LMNA--/xy012c1.tif
deleted file mode 100644
index 4bf5d61..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy012c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy013c1.tif b/example_dataset/example_images/MEF_LMNA--/xy013c1.tif
deleted file mode 100644
index 9454074..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy013c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy014c1.tif b/example_dataset/example_images/MEF_LMNA--/xy014c1.tif
deleted file mode 100644
index 795573e..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy014c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy015c1.tif b/example_dataset/example_images/MEF_LMNA--/xy015c1.tif
deleted file mode 100644
index 48492ea..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy015c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy016c1.tif b/example_dataset/example_images/MEF_LMNA--/xy016c1.tif
deleted file mode 100644
index 870f5dc..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy016c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy017c1.tif b/example_dataset/example_images/MEF_LMNA--/xy017c1.tif
deleted file mode 100644
index 0395be1..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy017c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy018c1.tif b/example_dataset/example_images/MEF_LMNA--/xy018c1.tif
deleted file mode 100644
index eb14f0e..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy018c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy019c1.tif b/example_dataset/example_images/MEF_LMNA--/xy019c1.tif
deleted file mode 100644
index 27ec17b..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy019c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy020c1.tif b/example_dataset/example_images/MEF_LMNA--/xy020c1.tif
deleted file mode 100644
index 911dd8a..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy020c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy021c1.tif b/example_dataset/example_images/MEF_LMNA--/xy021c1.tif
deleted file mode 100644
index 7db2ba7..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy021c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy022c1.tif b/example_dataset/example_images/MEF_LMNA--/xy022c1.tif
deleted file mode 100644
index 84d88d2..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy022c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy023c1.tif b/example_dataset/example_images/MEF_LMNA--/xy023c1.tif
deleted file mode 100644
index 362c554..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy023c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy024c1.tif b/example_dataset/example_images/MEF_LMNA--/xy024c1.tif
deleted file mode 100644
index cce2f11..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy024c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy025c1.tif b/example_dataset/example_images/MEF_LMNA--/xy025c1.tif
deleted file mode 100644
index 21d639b..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy025c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy026c1.tif b/example_dataset/example_images/MEF_LMNA--/xy026c1.tif
deleted file mode 100644
index c900af9..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy026c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy027c1.tif b/example_dataset/example_images/MEF_LMNA--/xy027c1.tif
deleted file mode 100644
index 5a45585..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy027c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy028c1.tif b/example_dataset/example_images/MEF_LMNA--/xy028c1.tif
deleted file mode 100644
index 4bade43..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy028c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy029c1.tif b/example_dataset/example_images/MEF_LMNA--/xy029c1.tif
deleted file mode 100644
index 525909e..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy029c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_LMNA--/xy030c1.tif b/example_dataset/example_images/MEF_LMNA--/xy030c1.tif
deleted file mode 100644
index edf4bda..0000000
Binary files a/example_dataset/example_images/MEF_LMNA--/xy030c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/processed_output/grayscale.jpg b/example_dataset/example_images/MEF_wildtype/processed_output/grayscale.jpg
deleted file mode 100644
index a83c4f5..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/processed_output/grayscale.jpg and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_1.jpg b/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_1.jpg
deleted file mode 100644
index 242b33a..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_1.jpg and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_2.jpg b/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_2.jpg
deleted file mode 100644
index 54422d8..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_2.jpg and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_3.jpg b/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_3.jpg
deleted file mode 100644
index 9d903e6..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_3.jpg and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_4.jpg b/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_4.jpg
deleted file mode 100644
index c0e4467..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/processed_output/quadrant_4.jpg and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy001c1.tif b/example_dataset/example_images/MEF_wildtype/xy001c1.tif
deleted file mode 100644
index d4f1923..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy001c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy002c1.tif b/example_dataset/example_images/MEF_wildtype/xy002c1.tif
deleted file mode 100644
index ba6ac54..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy002c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy003c1.tif b/example_dataset/example_images/MEF_wildtype/xy003c1.tif
deleted file mode 100644
index f658d70..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy003c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy004c1.tif b/example_dataset/example_images/MEF_wildtype/xy004c1.tif
deleted file mode 100644
index c6646fc..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy004c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy005c1.tif b/example_dataset/example_images/MEF_wildtype/xy005c1.tif
deleted file mode 100644
index ff97ceb..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy005c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy006c1.tif b/example_dataset/example_images/MEF_wildtype/xy006c1.tif
deleted file mode 100644
index 47b9649..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy006c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy007c1.tif b/example_dataset/example_images/MEF_wildtype/xy007c1.tif
deleted file mode 100644
index 9d981f4..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy007c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy008c1.tif b/example_dataset/example_images/MEF_wildtype/xy008c1.tif
deleted file mode 100644
index 17e125c..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy008c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy009c1.tif b/example_dataset/example_images/MEF_wildtype/xy009c1.tif
deleted file mode 100644
index 11a43a6..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy009c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy010c1.tif b/example_dataset/example_images/MEF_wildtype/xy010c1.tif
deleted file mode 100644
index 504d334..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy010c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy011c1.tif b/example_dataset/example_images/MEF_wildtype/xy011c1.tif
deleted file mode 100644
index 968ba4c..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy011c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy012c1.tif b/example_dataset/example_images/MEF_wildtype/xy012c1.tif
deleted file mode 100644
index b348acf..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy012c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy013c1.tif b/example_dataset/example_images/MEF_wildtype/xy013c1.tif
deleted file mode 100644
index e7a2842..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy013c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy014c1.tif b/example_dataset/example_images/MEF_wildtype/xy014c1.tif
deleted file mode 100644
index 2b9ebb7..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy014c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy015c1.tif b/example_dataset/example_images/MEF_wildtype/xy015c1.tif
deleted file mode 100644
index ef77f19..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy015c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy016c1.tif b/example_dataset/example_images/MEF_wildtype/xy016c1.tif
deleted file mode 100644
index 080eb03..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy016c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy017c1.tif b/example_dataset/example_images/MEF_wildtype/xy017c1.tif
deleted file mode 100644
index 91f4bc4..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy017c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy018c1.tif b/example_dataset/example_images/MEF_wildtype/xy018c1.tif
deleted file mode 100644
index 1571f90..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy018c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy019c1.tif b/example_dataset/example_images/MEF_wildtype/xy019c1.tif
deleted file mode 100644
index 6511b63..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy019c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy020c1.tif b/example_dataset/example_images/MEF_wildtype/xy020c1.tif
deleted file mode 100644
index 224f395..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy020c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy021c1.tif b/example_dataset/example_images/MEF_wildtype/xy021c1.tif
deleted file mode 100644
index 936bb93..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy021c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy022c1.tif b/example_dataset/example_images/MEF_wildtype/xy022c1.tif
deleted file mode 100644
index 25850a3..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy022c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy023c1.tif b/example_dataset/example_images/MEF_wildtype/xy023c1.tif
deleted file mode 100644
index d562a35..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy023c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy024c1.tif b/example_dataset/example_images/MEF_wildtype/xy024c1.tif
deleted file mode 100644
index e963ff8..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy024c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy025c1.tif b/example_dataset/example_images/MEF_wildtype/xy025c1.tif
deleted file mode 100644
index cac6672..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy025c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy026c1.tif b/example_dataset/example_images/MEF_wildtype/xy026c1.tif
deleted file mode 100644
index 8c1b942..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy026c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy027c1.tif b/example_dataset/example_images/MEF_wildtype/xy027c1.tif
deleted file mode 100644
index a5a2b2b..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy027c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy028c1.tif b/example_dataset/example_images/MEF_wildtype/xy028c1.tif
deleted file mode 100644
index db143af..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy028c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy029c1.tif b/example_dataset/example_images/MEF_wildtype/xy029c1.tif
deleted file mode 100644
index 2a9fcac..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy029c1.tif and /dev/null differ
diff --git a/example_dataset/example_images/MEF_wildtype/xy030c1.tif b/example_dataset/example_images/MEF_wildtype/xy030c1.tif
deleted file mode 100644
index e441718..0000000
Binary files a/example_dataset/example_images/MEF_wildtype/xy030c1.tif and /dev/null differ
diff --git a/example_dataset/example_output/Example model/Example model figures/Registered objects.png b/example_dataset/example_output/Example model/Example model figures/Registered objects.png
deleted file mode 100644
index c89fc75..0000000
Binary files a/example_dataset/example_output/Example model/Example model figures/Registered objects.png and /dev/null differ
diff --git a/example_dataset/example_output/Example model/Example model figures/Shape mode dendrogram.png b/example_dataset/example_output/Example model/Example model figures/Shape mode dendrogram.png
deleted file mode 100644
index 6d0528a..0000000
Binary files a/example_dataset/example_output/Example model/Example model figures/Shape mode dendrogram.png and /dev/null differ
diff --git a/example_dataset/example_output/Example model/Example model.pickle b/example_dataset/example_output/Example model/Example model.pickle
deleted file mode 100644
index 669fcf8..0000000
Binary files a/example_dataset/example_output/Example model/Example model.pickle and /dev/null differ
diff --git a/example_dataset/example_output/Result based on Example model/Shape mode distribution_1_MEF_LMNA--.png b/example_dataset/example_output/Result based on Example model/Shape mode distribution_1_MEF_LMNA--.png
deleted file mode 100644
index e62dc9d..0000000
Binary files a/example_dataset/example_output/Result based on Example model/Shape mode distribution_1_MEF_LMNA--.png and /dev/null differ
diff --git a/example_dataset/example_output/Result based on Example model/Shape mode distribution_2_MEF_wildtype.png b/example_dataset/example_output/Result based on Example model/Shape mode distribution_2_MEF_wildtype.png
deleted file mode 100644
index 4232ab3..0000000
Binary files a/example_dataset/example_output/Result based on Example model/Shape mode distribution_2_MEF_wildtype.png and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/VAMPIRE datasheet c1.csv b/example_dataset/example_segmented_images/MEF_LMNA--/VAMPIRE datasheet c1.csv
deleted file mode 100644
index 4b5dce7..0000000
--- a/example_dataset/example_segmented_images/MEF_LMNA--/VAMPIRE datasheet c1.csv
+++ /dev/null
@@ -1,113 +0,0 @@
-Filename,ImageID,ObjectID,X,Y,Area,Perimeter,Major Axis,Minor Axis,Circularity,Aspect Ratio,Shape mode,Distance from cluster center
-xy001c1.tiff,1,1,235,374,15045,670.6894296,143.1187539,137.4301844,0.420299751,1.041392432,7.0,3.29
-xy001c1.tiff,1,2,796,330,6635,333.119841,103.8723863,82.3381772,0.7513629740000001,1.261533712,8.0,1.86
-xy001c1.tiff,1,3,471,539,8053,339.8477631,109.99209499999999,93.92659062,0.876190887,1.171043197,4.0,1.28
-xy001c1.tiff,1,4,1069,810,7696,566.9432175,140.7891111,85.71093797,0.30088171399999997,1.642603785,2.0,4.86
-xy002c1.tiff,2,1,745,39,3261,218.4091629,71.22646594,58.870260800000004,0.859051013,1.209888745,4.0,1.15
-xy002c1.tiff,2,2,623,94,8503,547.1858582,156.7011033,82.68648597,0.356871868,1.8951235069999999,10.0,3.62
-xy002c1.tiff,2,3,501,74,9079,400.9188309,123.1123818,99.75146459,0.7097983240000001,1.23419122,7.0,1.99
-xy002c1.tiff,2,4,663,215,13346,588.9848481,181.6604838,103.1550231,0.48345130799999997,1.761043509,1.0,2.81
-xy002c1.tiff,2,5,987,280,4703,256.0071427,78.51888829,76.52230831,0.9017386359999999,1.026091476,4.0,2.05
-xy002c1.tiff,2,6,1186,414,9239,815.2224382,192.135275,97.52761845,0.174695847,1.970060154,2.0,5.67
-xy002c1.tiff,2,7,408,755,25454,724.1452737000001,202.57890480000003,163.47045380000003,0.609978465,1.239238652,1.0,3.38
-xy003c1.tiff,3,1,626,256,13723,904.8559518,186.5244052,100.89639079999999,0.210620206,1.848672719,10.0,9.82
-xy003c1.tiff,3,2,500,332,9056,445.5939751,110.7970697,106.8534883,0.573149157,1.036906435,8.0,3.03
-xy003c1.tiff,3,3,152,369,8418,825.2163415,184.6333331,100.1269243,0.155339945,1.843992855,1.0,5.99
-xy004c1.tiff,4,1,1096,465,5744,520.7005769,105.85320259999999,83.36550069,0.266224597,1.2697482979999999,10.0,6.86
-xy004c1.tiff,4,2,624,527,4869,265.1787156,79.24849662,78.47807845,0.8701063,1.009816986,4.0,1.69
-xy004c1.tiff,4,3,888,584,3786,277.17871560000003,92.18310784,55.59229767,0.619256705,1.658199278,1.0,2.04
-xy004c1.tiff,4,4,598,648,19298,760.8021279,203.3301669,135.15920669999997,0.41896595,1.504375262,5.0,4.14
-xy004c1.tiff,4,5,1008,798,18410,1076.993037,215.5978464,142.7774776,0.199451755,1.510027002,6.0,5.89
-xy004c1.tiff,4,6,1006,938,2385,264.45079350000003,91.72398756,40.17293838,0.42855690700000004,2.283228244,1.0,3.0
-xy005c1.tiff,5,1,252,265,7409,336.492424,107.278892,88.860272,0.8222783909999999,1.207276205,4.0,1.24
-xy005c1.tiff,5,2,55,418,6110,499.04372259999997,107.7704183,83.70878407,0.308300252,1.2874445559999999,7.0,5.97
-xy005c1.tiff,5,3,1141,428,4477,507.56453789999995,115.3049092,72.39085105,0.21838078100000002,1.5928105209999999,1.0,6.96
-xy005c1.tiff,5,4,381,447,7093,649.4528855000001,196.974131,59.50408355,0.211321908,3.31026241,9.0,4.75
-xy006c1.tiff,6,1,103,384,4777,470.20919879999997,115.9221249,71.15121237,0.271508145,1.629236116,1.0,5.9
-xy006c1.tiff,6,2,1159,589,2234,177.0954544,59.85039473,47.6831492,0.895113701,1.255168665,4.0,1.25
-xy007c1.tiff,7,1,844,242,5900,424.3391411,110.6732405,72.3301147,0.41175197399999997,1.530112885,10.0,2.82
-xy007c1.tiff,7,2,868,447,14545,565.4995667000001,152.0666441,126.62739450000001,0.571556327,1.200898469,5.0,2.61
-xy007c1.tiff,7,3,118,544,2370,182.16652219999997,61.71886048,49.07798279,0.8974723459999999,1.257567181,4.0,1.48
-xy007c1.tiff,7,4,372,630,8035,491.45183949999995,119.60269029999999,92.63005538,0.418055378,1.2911866440000002,8.0,3.23
-xy007c1.tiff,7,5,915,661,9245,655.7960312999999,152.8778977,86.87351293,0.270133976,1.759775708,6.0,5.08
-xy007c1.tiff,7,6,1202,639,5694,460.5756852,135.099575,62.19511239999999,0.337306968,2.1721895780000002,9.0,3.74
-xy008c1.tiff,8,1,304,496,4756,258.1076477,78.90430264,76.81769454,0.897118813,1.027163118,4.0,1.81
-xy008c1.tiff,8,2,161,593,5670,461.6112191000001,137.7501709,60.40768833,0.334379937,2.280341703,9.0,3.39
-xy008c1.tiff,8,3,342,738,7538,562.9432175,120.2663946,92.16578049,0.298907502,1.304892054,2.0,4.52
-xy009c1.tiff,9,1,1131,768,2791,197.8233765,61.783455599999996,57.58773752,0.896219688,1.072857838,4.0,1.79
-xy010c1.tiff,10,1,1208,403,4290,371.9360749,120.4519746,48.41099097,0.389700129,2.488112145,10.0,3.45
-xy010c1.tiff,10,2,846,643,4140,274.04877319999997,88.89233768,62.20912557,0.692714822,1.428927619,3.0,1.78
-xy010c1.tiff,10,3,92,723,2187,175.33809509999998,54.60248496,51.07402749,0.893935283,1.069085162,4.0,1.56
-xy011c1.tiff,11,1,1215,191,2228,285.04877319999997,78.28612361,39.22842083,0.34457732700000004,1.9956481030000002,3.0,3.86
-xy011c1.tiff,11,2,256,266,12867,963.9747468,176.69359530000003,104.6371241,0.174002645,1.688631992,3.0,5.25
-xy011c1.tiff,11,3,551,406,3006,418.6528496,94.94306377,53.881818200000005,0.215521567,1.7620612469999999,9.0,8.02
-xy011c1.tiff,11,4,947,452,4525,444.2031022,158.4046754,47.08416581,0.288180953,3.364287604,10.0,4.36
-xy011c1.tiff,11,5,885,577,12286,696.293506,153.0590683,111.3239532,0.318445918,1.37489789,3.0,4.29
-xy011c1.tiff,11,6,1048,623,2130,185.0954544,69.3698669,39.93972956,0.781264266,1.736863711,1.0,1.97
-xy011c1.tiff,11,7,641,741,5633,317.4213562,104.28429,72.22549587,0.7025501479999999,1.443870875,3.0,1.6
-xy011c1.tiff,11,8,1222,871,2114,370.9310242,75.13434239,44.65799194,0.193076105,1.68243889,6.0,6.82
-xy012c1.tiff,12,1,824,302,6161,456.10259709999997,108.4485054,78.47555787,0.37216539,1.381939911,8.0,3.94
-xy012c1.tiff,12,2,758,471,9150,618.9615075,143.2973479,87.44046075,0.300126038,1.638799095,7.0,5.74
-xy012c1.tiff,12,3,393,511,10495,593.3990617000001,135.32341540000002,112.0394313,0.37454033700000006,1.207819549,3.0,4.52
-xy013c1.tiff,13,1,689,363,6176,963.0041841,148.57196499999998,82.72383731,0.08368754,1.795999435,7.0,11.37
-xy013c1.tiff,13,2,313,405,5097,299.3208512,83.21382258,78.39869867,0.714908654,1.061418416,4.0,3.41
-xy013c1.tiff,13,3,371,768,16520,952.039718,183.62315230000002,143.6368365,0.229039176,1.278384826,3.0,4.86
-xy013c1.tiff,13,4,476,960,2512,331.33304449999997,101.56029620000001,35.88157618,0.28754114399999997,2.8304301819999997,3.0,4.38
-xy015c1.tiff,15,1,388,545,5808,352.6345597,116.5123409,68.38111383,0.586930565,1.703867258,1.0,2.25
-xy015c1.tiff,15,2,391,638,5742,339.4629868,129.7290818,59.72036031,0.626164145,2.172275605,1.0,2.25
-xy015c1.tiff,15,3,376,855,20895,722.6955262,189.7550725,154.1113464,0.502737761,1.2312855409999999,4.0,2.81
-xy016c1.tiff,16,1,695,537,4397,363.1614716,94.97990283,68.53773985,0.418954438,1.38580442,5.0,4.22
-xy016c1.tiff,16,2,795,572,6323,387.3685784,109.9050504,79.06123152,0.5295223139999999,1.390125707,5.0,3.39
-xy017c1.tiff,17,1,1030,76,4651,318.8772004,88.73930764,68.5211603,0.574790083,1.295064287,7.0,3.48
-xy017c1.tiff,17,2,81,227,6628,365.119841,117.01213670000001,76.07520831,0.624771912,1.5381112890000002,3.0,1.54
-xy017c1.tiff,17,3,512,306,7376,339.747258,101.805683,93.61853520000001,0.803006113,1.08745221,4.0,2.55
-xy017c1.tiff,17,4,1019,598,8780,696.3462837999999,190.4952147,84.16049807,0.227537958,2.263475372,5.0,4.43
-xy017c1.tiff,17,5,855,623,8768,472.87824639999997,142.41210569999998,87.95520481,0.492733095,1.6191435859999999,3.0,2.77
-xy017c1.tiff,17,6,338,750,9156,613.3401872000001,147.27877990000002,87.37572891,0.305853043,1.685579986,9.0,6.24
-xy018c1.tiff,18,1,808,126,9324,444.45183949999995,146.7721375,82.79060695,0.593147512,1.772811469,3.0,1.88
-xy018c1.tiff,18,2,517,174,7040,431.3036072,122.06725220000001,83.43795737,0.475572072,1.462970284,2.0,3.32
-xy018c1.tiff,18,3,870,264,12366,602.0975465,155.52110249999998,119.61958600000001,0.42865253,1.300130754,6.0,3.86
-xy018c1.tiff,18,4,1114,273,7074,392.4335495,133.891722,72.97859624,0.577221717,1.8346711080000002,3.0,2.07
-xy018c1.tiff,18,5,928,513,10485,462.29751060000007,136.6176303,105.26662859999999,0.616502993,1.297824697,2.0,2.89
-xy018c1.tiff,18,6,701,897,7274,373.2619767,137.1190916,71.78208194,0.656078276,1.9102133559999999,1.0,2.4
-xy019c1.tiff,19,1,487,636,2901,203.8233765,64.05501781,57.85592326,0.8775050190000001,1.107147103,4.0,1.82
-xy019c1.tiff,19,2,338,762,9244,440.2031022,132.5964117,93.43065302,0.599464685,1.4191960280000002,4.0,2.35
-xy019c1.tiff,19,3,361,896,4449,259.5218613,85.76108590000001,67.11021887,0.830088458,1.277913965,4.0,1.77
-xy022c1.tiff,22,1,167,252,2243,456.7949853,102.51722,48.14942732,0.135081689,2.129147235,9.0,5.62
-xy022c1.tiff,22,2,788,313,9559,585.9259736,120.24695619999999,107.4832861,0.349894266,1.118750278,9.0,4.83
-xy022c1.tiff,22,3,418,292,14805,517.1025971,176.66475559999998,112.75453229999998,0.69576862,1.566808465,1.0,1.54
-xy022c1.tiff,22,4,963,318,7191,352.492424,132.5936369,70.01671258,0.7272765390000001,1.893742679,1.0,2.02
-xy022c1.tiff,22,5,1182,340,3480,235.7228714,83.90931606,53.30578509,0.7870198070000001,1.5741127519999998,10.0,1.37
-xy022c1.tiff,22,6,1110,359,3350,355.8772004,100.91643640000001,47.54601283,0.332394891,2.122500509,6.0,4.96
-xy023c1.tiff,23,1,700,179,25907,650.9676042,186.48145459999998,177.52793909999997,0.768260026,1.050434402,4.0,2.04
-xy023c1.tiff,23,2,142,295,3824,247.62236639999998,86.91566365,56.73694964,0.783696683,1.531905825,1.0,1.44
-xy023c1.tiff,23,3,71,312,4034,299.0782105,99.6877691,53.55798533,0.566730011,1.861305433,1.0,2.54
-xy023c1.tiff,23,4,456,474,13524,454.156421,146.6319618,119.41880400000001,0.8239561670000001,1.227880006,4.0,1.6
-xy023c1.tiff,23,5,357,578,17525,705.5828278,199.14257759999998,135.81797759999998,0.44235596200000005,1.4662460819999998,8.0,3.37
-xy023c1.tiff,23,6,847,612,8438,579.4995667000001,136.3818601,91.99389228,0.315749851,1.482509944,2.0,3.62
-xy023c1.tiff,23,7,996,725,5614,454.2680733,109.72821709999998,79.85280783,0.341867521,1.374130981,8.0,6.55
-xy024c1.tiff,24,1,954,328,8270,584.9726548,182.2196866,64.58705076,0.303699617,2.82130372,8.0,4.77
-xy024c1.tiff,24,2,503,448,10870,644.6772363,182.80283799999998,90.44989416,0.32866596800000003,2.021039822,7.0,4.32
-xy024c1.tiff,24,3,470,566,7982,548.4345956000001,142.1783186,82.29213458,0.33348162,1.727726706,1.0,3.81
-xy024c1.tiff,24,4,392,621,3085,272.97770539999993,82.58427727,57.15479711,0.520248306,1.444922936,5.0,3.15
-xy024c1.tiff,24,5,718,622,7059,346.67619019999995,98.30149315,92.82630023,0.738082682,1.058983207,4.0,1.94
-xy026c1.tiff,26,1,652,175,6740,324.97770539999993,104.26133229999999,84.0856687,0.8019783140000001,1.239941763,4.0,1.35
-xy026c1.tiff,26,2,1012,318,8922,421.9604615,131.709878,91.75433299,0.629692565,1.435462214,10.0,2.35
-xy026c1.tiff,26,3,394,697,10151,562.2447327000001,171.04835759999997,87.36295701,0.403522635,1.9579048540000001,8.0,4.17
-xy027c1.tiff,27,1,262,219,1981,588.6650429,102.24016479999999,47.07767502,0.071838598,2.171733517,10.0,10.73
-xy027c1.tiff,27,2,210,298,6743,447.6883835,143.5222757,63.70894771,0.422777019,2.252780509,2.0,4.09
-xy027c1.tiff,27,3,256,373,4228,489.901587,91.21133874,77.9492493,0.22137427399999998,1.170137488,6.0,8.29
-xy027c1.tiff,27,4,384,543,3535,228.2081528,76.84549247,59.74984191,0.852976313,1.286120432,4.0,2.03
-xy027c1.tiff,27,5,878,956,5564,279.17871560000003,86.05884658,82.38765763,0.897082481,1.044559938,4.0,1.84
-xy028c1.tiff,28,1,520,87,4016,430.77164469999997,121.69229740000002,44.88384806,0.271962705,2.7112714860000002,9.0,3.65
-xy028c1.tiff,28,2,678,124,7147,475.7888886,157.3073388,64.92757311,0.396739147,2.422812547,1.0,3.48
-xy028c1.tiff,28,3,858,391,2802,327.6406563,96.11893202,53.47897571,0.328006478,1.797321859,10.0,4.1
-xy028c1.tiff,28,4,792,492,2484,354.25588,71.23751269,54.61991034,0.24872951399999998,1.304240748,1.0,7.07
-xy028c1.tiff,28,5,635,817,7895,337.3624817,106.59129579999998,94.59784268,0.8717027829999999,1.126783579,4.0,1.46
-xy029c1.tiff,29,1,598,824,5525,302.67619019999995,93.23104636,77.24236744,0.757854142,1.206993642,4.0,1.59
-xy029c1.tiff,29,2,537,876,5272,303.86500710000007,88.89810617,77.66896765,0.7175032720000001,1.144576899,4.0,2.35
-xy030c1.tiff,30,1,935,291,4768,438.0670632,140.3812614,52.82855722,0.312223011,2.657298795,8.0,5.25
-xy030c1.tiff,30,2,474,458,17065,858.5067094000001,164.53284069999998,140.4738074,0.290956973,1.1712706,4.0,4.17
-xy030c1.tiff,30,3,212,811,11833,485.2447327,131.0121398,117.11967279999999,0.631514125,1.118617708,4.0,1.77
-xy030c1.tiff,30,4,817,846,8641,566.3279938,118.3241564,98.37888536,0.33856168700000006,1.202739348,8.0,4.32
-xy030c1.tiff,30,5,911,883,3297,355.5756852,129.9428355,41.50337159,0.327691141,3.130898298,10.0,4.67
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/c1_boundary_coordinate_stack.pickle b/example_dataset/example_segmented_images/MEF_LMNA--/c1_boundary_coordinate_stack.pickle
deleted file mode 100644
index 53ce9f0..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/c1_boundary_coordinate_stack.pickle and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy001c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy001c1.tiff
deleted file mode 100644
index 416d068..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy001c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy002c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy002c1.tiff
deleted file mode 100644
index acbb93c..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy002c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy003c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy003c1.tiff
deleted file mode 100644
index 9cf8812..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy003c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy004c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy004c1.tiff
deleted file mode 100644
index 239365c..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy004c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy005c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy005c1.tiff
deleted file mode 100644
index e82fbea..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy005c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy006c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy006c1.tiff
deleted file mode 100644
index 8d25214..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy006c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy007c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy007c1.tiff
deleted file mode 100644
index 3643789..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy007c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy008c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy008c1.tiff
deleted file mode 100644
index 652ff1c..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy008c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy009c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy009c1.tiff
deleted file mode 100644
index da209c0..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy009c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy010c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy010c1.tiff
deleted file mode 100644
index b0fea3e..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy010c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy011c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy011c1.tiff
deleted file mode 100644
index 1b24fe3..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy011c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy012c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy012c1.tiff
deleted file mode 100644
index 3271356..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy012c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy013c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy013c1.tiff
deleted file mode 100644
index 4ec53d7..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy013c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy014c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy014c1.tiff
deleted file mode 100644
index 2acfd31..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy014c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy015c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy015c1.tiff
deleted file mode 100644
index b13549a..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy015c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy016c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy016c1.tiff
deleted file mode 100644
index 05d1b3a..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy016c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy017c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy017c1.tiff
deleted file mode 100644
index 297b023..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy017c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy018c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy018c1.tiff
deleted file mode 100644
index ae0021a..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy018c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy019c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy019c1.tiff
deleted file mode 100644
index 4ec0292..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy019c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy020c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy020c1.tiff
deleted file mode 100644
index cb01480..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy020c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy021c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy021c1.tiff
deleted file mode 100644
index 76cdb1a..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy021c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy022c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy022c1.tiff
deleted file mode 100644
index b197af7..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy022c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy023c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy023c1.tiff
deleted file mode 100644
index df744c9..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy023c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy024c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy024c1.tiff
deleted file mode 100644
index d51e749..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy024c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy025c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy025c1.tiff
deleted file mode 100644
index 3f34581..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy025c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy026c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy026c1.tiff
deleted file mode 100644
index f1344b6..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy026c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy027c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy027c1.tiff
deleted file mode 100644
index 4ab36fa..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy027c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy028c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy028c1.tiff
deleted file mode 100644
index aad15ae..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy028c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy029c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy029c1.tiff
deleted file mode 100644
index 70d27a3..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy029c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_LMNA--/xy030c1.tiff b/example_dataset/example_segmented_images/MEF_LMNA--/xy030c1.tiff
deleted file mode 100644
index 1e40cc6..0000000
Binary files a/example_dataset/example_segmented_images/MEF_LMNA--/xy030c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/VAMPIRE datasheet c1.csv b/example_dataset/example_segmented_images/MEF_wildtype/VAMPIRE datasheet c1.csv
deleted file mode 100644
index b03f4a3..0000000
--- a/example_dataset/example_segmented_images/MEF_wildtype/VAMPIRE datasheet c1.csv
+++ /dev/null
@@ -1,206 +0,0 @@
-Filename,ImageID,ObjectID,X,Y,Area,Perimeter,Major Axis,Minor Axis,Circularity,Aspect Ratio,Shape mode,Distance from cluster center
-xy001c1.tiff,1,1,407,103,21468,969.2762620207483,236.54346388581303,125.91050823560076,0.2871483314260823,1.878663403083073,2.0,2.79
-xy001c1.tiff,1,2,168,485,2805,282.93607486307087,103.91644603357223,41.148249155669056,0.4403176364126969,2.525415981623984,10.0,2.93
-xy001c1.tiff,1,3,1154,498,6235,531.6356056934325,131.4590390288459,67.83262821099729,0.27721594246545395,1.937991236605708,12.0,3.85
-xy001c1.tiff,1,4,502,537,3774,335.6467529817257,95.3761048218189,54.97685381541423,0.4209658436201736,1.734841086797107,10.0,2.72
-xy001c1.tiff,1,5,313,572,11321,719.9026329981913,177.6405211075476,96.54304420964344,0.2745030223283394,1.840013670190478,9.0,3.59
-xy001c1.tiff,1,6,138,595,4038,242.10764773832474,73.43116490276039,70.34993162749811,0.8656835443854952,1.043798667660082,4.0,2.11
-xy001c1.tiff,1,7,828,661,11246,644.7188668141176,131.52864122546956,113.5095077103008,0.3399908148352468,1.1587455877366435,4.0,2.73
-xy001c1.tiff,1,8,510,822,9078,461.3452377915606,131.62640297642324,90.95066664650496,0.5359792275193461,1.4472285671968887,5.0,2.15
-xy001c1.tiff,1,9,144,920,3020,372.19090885900994,95.74684084168365,49.362505541045415,0.2739588189634936,1.9396673607272468,12.0,4.49
-xy002c1.tiff,2,1,653,119,5220,452.20310216782974,102.84145902637094,80.23100596006927,0.3207844386452269,1.2818168960458358,5.0,4.26
-xy002c1.tiff,2,2,1213,166,2627,400.19090885900994,85.51807495360379,46.44147342347972,0.20612729293951154,1.8414160587413204,8.0,6.85
-xy002c1.tiff,2,3,1161,261,2128,358.8772003600252,80.38544006697008,46.99621671195778,0.2076298313014013,1.7104661968782842,12.0,7.42
-xy002c1.tiff,2,4,121,453,7746,471.5168106668145,137.41434284663174,79.70140184441789,0.4378173770143837,1.7241145032163063,12.0,2.29
-xy002c1.tiff,2,5,1226,467,3586,446.30360723121805,77.32366182223117,71.52812408394539,0.22623477146211804,1.081024601337009,7.0,6.4
-xy002c1.tiff,2,6,316,492,17791,1164.5382386916235,173.2263929050527,155.46462059781774,0.16485522505121528,1.1142496102260089,8.0,6.95
-xy003c1.tiff,3,1,199,121,6940,448.92492757477766,120.65985684837177,82.1035383608527,0.4327348696803784,1.4696060517886629,2.0,3.73
-xy003c1.tiff,3,2,118,214,3503,302.4213562373095,81.64023150339024,60.646600667650105,0.4813102249712585,1.3461633563072644,8.0,2.99
-xy003c1.tiff,3,3,496,312,8647,578.3218971944477,178.99766340165337,75.04440623775898,0.3248897800443598,2.3852232614719493,12.0,3.14
-xy003c1.tiff,3,4,186,425,4976,416.0965004500315,92.87277811194731,76.68957867429144,0.3611620498561228,1.2110221456084354,10.0,5.15
-xy003c1.tiff,3,5,991,438,2895,339.2558800164656,87.37083187953516,47.28654819828135,0.3160848385666704,1.8476889349836427,5.0,3.53
-xy003c1.tiff,3,6,603,495,2364,294.1787155501902,92.34241131070095,42.18655289074117,0.34326920279112993,2.188906297934754,2.0,3.6
-xy003c1.tiff,3,7,669,709,12734,598.7899346259833,157.7928573007213,107.42267686171677,0.4462988077417014,1.4688970886831012,7.0,3.06
-xy003c1.tiff,3,8,1188,710,2376,365.21424945612296,95.34098442173763,40.65986388021168,0.22385194512522635,2.344842685716372,6.0,5.4
-xy004c1.tiff,4,1,984,128,18591,805.0924958366749,217.61251050916283,122.5165039948863,0.3604301043486826,1.77618935746196,8.0,2.43
-xy004c1.tiff,4,2,656,182,6492,613.4467888754755,158.4433842454166,62.03812646125064,0.21678767129017032,2.5539679110777342,11.0,3.81
-xy004c1.tiff,4,3,797,364,3426,441.75335469952387,102.12960095522018,59.653130517648776,0.22061625604988186,1.7120576920100523,9.0,4.83
-xy004c1.tiff,4,4,1013,529,3728,355.6761902332486,83.07313593123277,65.50775791180179,0.3703191583217276,1.2681419511118148,11.0,3.33
-xy004c1.tiff,4,5,148,671,3430,362.94826817189073,115.99533426225719,47.387061674920034,0.32720092827132585,2.4478271106572667,2.0,3.84
-xy005c1.tiff,5,1,785,552,5717,515.7005768508882,138.07048827785653,60.12366936586328,0.2701362220681035,2.296441480270822,3.0,3.25
-xy005c1.tiff,5,2,288,634,8600,568.0853531617382,135.68709681163003,98.68394629795887,0.3348737697260148,1.3749662625159582,10.0,4.45
-xy005c1.tiff,5,3,580,784,4789,416.36248173426384,173.5564782021643,43.77279795800774,0.3471454873353028,3.964939101417759,2.0,3.91
-xy005c1.tiff,5,4,145,846,7465,495.2741699796952,124.42057213670068,83.2975409378797,0.3824267990663223,1.493688417878855,8.0,4.07
-xy005c1.tiff,5,5,174,936,2588,277.4213562373095,89.08623865414509,48.094513246152324,0.4225658097645408,1.852316046909409,10.0,4.01
-xy005c1.tiff,5,6,942,935,11300,577.7422074112308,162.04580592745242,102.17940550130164,0.4254221662152025,1.5858949769030326,1.0,3.94
-xy005c1.tiff,5,7,72,952,4302,458.58787847867995,133.21502974599574,68.04860343544158,0.25706036967892537,1.9576453155629887,10.0,4.57
-xy006c1.tiff,6,1,402,349,5133,404.23253941935263,94.90454561563446,73.99671954861999,0.39474677308555606,1.2825507156878333,8.0,3.95
-xy006c1.tiff,6,2,1099,559,3120,226.45079348883235,81.91864886225308,49.31872136352126,0.7645691394014327,1.661005123357567,4.0,1.57
-xy006c1.tiff,6,3,758,610,12417,680.9676041556469,178.95442943987152,99.6370033623648,0.33649114654169743,1.7960639461329562,3.0,3.18
-xy006c1.tiff,6,4,321,647,6059,426.4163056034262,117.79523786562125,75.07896548577055,0.4187388057895791,1.5689512648911836,5.0,3.24
-xy007c1.tiff,7,1,740,426,7274,483.86605307173187,115.91165366927805,86.86710376931845,0.3904207151089005,1.3343561444973393,1.0,2.43
-xy007c1.tiff,7,2,58,514,3186,224.2081528017131,82.14616961760662,50.12710065459389,0.7964395791492641,1.6387576489540765,4.0,1.96
-xy007c1.tiff,7,3,813,588,2722,469.31580054003786,111.1686740976634,45.70406482486996,0.15529863304171873,2.432358577374736,12.0,4.69
-xy007c1.tiff,7,4,868,713,6627,764.1513703397203,142.75533601105846,89.2276009441466,0.14261593124099728,1.5999010900272703,6.0,6.18
-xy007c1.tiff,7,5,748,805,3699,329.3330444827409,85.82871173921122,63.7935503295524,0.4285717865074053,1.3454136240392158,5.0,3.55
-xy007c1.tiff,7,6,1078,932,8165,1148.8580438450185,132.82240763735612,117.06578012435743,0.07773799127368068,1.1345963568197353,2.0,9.44
-xy008c1.tiff,8,1,350,199,10536,428.71782079359116,145.46525296682356,93.33356563369931,0.7203483288802478,1.5585524026556792,4.0,1.39
-xy008c1.tiff,8,2,760,310,4235,550.0081886954629,111.79023337613155,79.44461026833557,0.17592395006148007,1.407146853619698,10.0,4.9
-xy008c1.tiff,8,3,1013,459,10919,747.0559159102153,150.3527364466329,97.54737803973406,0.24585922680541755,1.5413303716414557,4.0,4.59
-xy008c1.tiff,8,4,915,527,6125,507.10869375885125,119.87732825062206,83.34300103654972,0.29930491246734875,1.4383610712320083,1.0,3.74
-xy008c1.tiff,8,5,376,738,5660,578.6833329081849,165.0524315515625,55.366867856485264,0.21239502847271646,2.9810686054950732,9.0,4.73
-xy008c1.tiff,8,6,496,789,8081,547.422402257836,109.7203548435023,104.9414342179706,0.3388674374605383,1.045538929986468,5.0,4.12
-xy008c1.tiff,8,7,497,871,3288,411.71172413918123,110.0182313548024,48.87095978120009,0.2437559380026663,2.2511985000369217,7.0,5.49
-xy009c1.tiff,9,1,896,139,5361,402.8893936688449,125.0178556837225,68.76710014853327,0.4150343118713621,1.8179893497572328,10.0,3.3
-xy009c1.tiff,9,2,969,134,4032,407.4102089490162,142.13923312666432,40.486728657057036,0.305257661316901,3.5107611269523677,7.0,3.93
-xy009c1.tiff,9,3,283,183,7216,618.0914498161483,155.36193712237494,67.58139822031399,0.2373563837514416,2.2988861019995204,7.0,4.56
-xy009c1.tiff,9,4,1133,357,12762,618.1269837220808,137.25034122900314,121.71281708768862,0.4197330807071316,1.1276572551115995,5.0,2.08
-xy009c1.tiff,9,5,991,455,6736,463.38077169749334,112.00493789642141,82.58464759585823,0.39421752368578494,1.3562440617841742,7.0,3.82
-xy009c1.tiff,9,6,361,873,16510,825.3767670841366,222.12988914967517,114.76739172981024,0.3045456896320867,1.9354791095420365,2.0,3.9
-xy010c1.tiff,10,1,468,64,5609,521.3868683519033,111.39000111398308,87.28411322312729,0.2592836467955597,1.2761772675542125,6.0,5.99
-xy010c1.tiff,10,2,1229,91,5173,331.7411613907041,94.66851876162102,74.55355848380593,0.5906818425658971,1.269805502069822,4.0,2.88
-xy010c1.tiff,10,3,362,289,6312,514.2741699796952,153.6382883162915,63.280427222735575,0.2999075716623911,2.427895876485042,2.0,4.37
-xy010c1.tiff,10,4,100,308,14496,664.8610024378486,150.84449798445885,126.59437208520228,0.4120940312089145,1.191557693283043,4.0,2.54
-xy010c1.tiff,10,5,723,382,14031,552.6356056934324,137.12752850442592,132.34561353922098,0.5773255307261416,1.0361320246083396,4.0,2.47
-xy010c1.tiff,10,6,756,762,4185,435.7239174480009,115.62440750674887,69.19463256837993,0.27700175919028885,1.6710025505589012,9.0,4.65
-xy010c1.tiff,10,7,1097,766,5053,457.6883835420683,105.59969645558078,99.46372423254635,0.3031233434312776,1.061690553720757,1.0,5.12
-xy010c1.tiff,10,8,941,803,3720,348.8416664540925,89.30413318522079,59.82386639649189,0.384145808542957,1.4927843779494945,3.0,3.35
-xy011c1.tiff,11,1,885,388,17311,802.8792924010781,166.4121687300694,134.68967054089995,0.3374671522213851,1.2355228731481422,5.0,2.22
-xy011c1.tiff,11,2,205,459,6921,536.8010819142763,110.03259715083328,86.79707319142983,0.3018227336911389,1.2676993947498414,6.0,3.15
-xy011c1.tiff,11,3,306,658,10540,611.9259735953043,135.82219581897832,104.82621134452499,0.3537143668069139,1.2956892563118685,3.0,2.89
-xy011c1.tiff,11,4,293,893,2487,393.71782079359116,82.09875086443705,49.04745990513728,0.20161159432813894,1.673863458438506,10.0,6.94
-xy012c1.tiff,12,1,418,348,8793,471.52290732122435,113.25399130446348,107.30680221941782,0.4969827840167067,1.05542229348038,3.0,3.71
-xy012c1.tiff,12,2,446,486,10221,666.2985565973347,142.02944599964266,96.82543397037512,0.2893113736934831,1.4668609287421135,6.0,3.61
-xy012c1.tiff,12,3,612,539,8575,484.144227664784,122.30504630575741,94.27689064604391,0.45972114136981607,1.2972961397819462,7.0,3.56
-xy012c1.tiff,12,4,1029,584,18098,742.4234482783627,163.61849305519854,143.92537310326978,0.4126074716040438,1.1368286878631089,8.0,2.31
-xy012c1.tiff,12,5,847,677,4628,510.0437226013957,100.74034744422346,79.19376743176895,0.2235570657035196,1.2720741885530125,1.0,4.45
-xy012c1.tiff,12,6,686,874,13582,560.1858582251266,143.79115695341332,122.6036722095574,0.5438877887601428,1.1728128070066426,5.0,2.0
-xy013c1.tiff,13,1,952,147,8374,538.9076836320746,122.09017850058193,93.02182690037549,0.3623381554485547,1.312489579798708,7.0,2.8
-xy013c1.tiff,13,2,160,252,3416,337.3624817342638,90.28285574876736,59.99754056931256,0.3771674108004357,1.5047759440150297,10.0,3.63
-xy013c1.tiff,13,3,1089,267,5846,528.3929650063131,128.64257452434072,76.17021738172994,0.2631204610270913,1.688882859289262,11.0,4.61
-xy013c1.tiff,13,4,241,809,3566,339.9421715174808,81.6402924470161,69.26580018579413,0.38777616763853895,1.1786522674686413,6.0,3.49
-xy013c1.tiff,13,5,779,850,4963,434.167568261897,95.13783781583368,75.78539181139297,0.33085627891251723,1.255358526780506,10.0,5.28
-xy014c1.tiff,14,1,1040,176,2245,318.08430714121175,89.81227250387514,39.950807551319755,0.27883142765081576,2.2480715161640887,3.0,4.18
-xy014c1.tiff,14,2,793,175,21362,1259.5260453828039,255.33825939384732,127.72828142243029,0.1692144589238681,1.9990737881251064,2.0,5.33
-xy014c1.tiff,14,3,1199,297,2508,303.634559672906,79.2595326572523,51.71276153435828,0.3418495348891253,1.532688069744482,10.0,4.05
-xy014c1.tiff,14,4,606,372,10159,789.9503602129437,142.8611811036667,106.95438464010431,0.20457908008505835,1.335720658712467,8.0,5.29
-xy014c1.tiff,14,5,745,507,10112,548.0914498161483,146.73353618356322,92.38769549360237,0.4230004901677097,1.5882367819611234,6.0,2.93
-xy014c1.tiff,14,6,840,720,6499,614.9259735953043,134.88940794471205,75.16270937728719,0.21597860222369297,1.7946320597308485,3.0,5.6
-xy014c1.tiff,14,7,211,832,3987,514.0315292925759,113.82056351851195,61.34018896045453,0.189616699621122,1.855562649014516,2.0,4.61
-xy015c1.tiff,15,1,1001,195,7715,523.3513344459706,123.18812890109993,88.49626170031607,0.3539640231758127,1.3920150584243258,8.0,2.81
-xy015c1.tiff,15,2,254,215,5007,423.03762594698577,106.29195024920715,67.15227815943896,0.3515842915066629,1.5828495050732196,11.0,3.18
-xy015c1.tiff,15,3,411,256,12558,594.0264786586928,148.58750224004788,111.77682937745642,0.44721744351633147,1.329322929158121,11.0,2.36
-xy015c1.tiff,15,4,199,282,4991,394.5107140124046,120.3718856933096,56.176000675957916,0.4029766011776325,2.1427635332685067,6.0,3.34
-xy015c1.tiff,15,5,142,789,5947,634.0498192558055,174.27574145111112,65.72298871322928,0.1858921439752793,2.651670973326132,12.0,5.92
-xy015c1.tiff,15,6,566,946,3921,449.07925650732835,97.13849604597593,58.12939025183775,0.2443209545054797,1.671073713746807,1.0,5.6
-xy015c1.tiff,15,7,662,944,7895,600.8549057834388,141.33029440662364,90.30041828275348,0.2748038263877272,1.5651122895586456,9.0,4.33
-xy016c1.tiff,16,1,211,66,2641,276.73506473629425,86.8425593128462,50.43114835317297,0.4333610573501301,1.7220024161393568,3.0,3.98
-xy016c1.tiff,16,2,977,66,10786,612.1330803764909,160.68148831917577,96.87038632692969,0.3617250456815293,1.6587266182348934,12.0,4.46
-xy016c1.tiff,16,3,110,82,4272,451.93102422918764,132.58200529776215,67.23503725969519,0.2628432305107425,1.9719183732384118,10.0,4.37
-xy016c1.tiff,16,4,202,199,3747,344.0487732352791,92.30760883900491,66.5039104074814,0.3977896964600453,1.3880027245528812,10.0,3.95
-xy016c1.tiff,16,5,198,297,5532,396.912734265958,94.89050115542648,79.30630202552041,0.4412675282556694,1.1965064406217196,11.0,2.74
-xy016c1.tiff,16,6,338,468,17679,865.9270196158307,196.7628574024754,134.08353619530206,0.2962818020431603,1.4674647088355166,1.0,4.8
-xy016c1.tiff,16,7,1177,833,7957,558.1797615707167,185.0677296146285,72.05046486904686,0.32093054263146115,2.568584809979959,12.0,2.93
-xy016c1.tiff,16,8,453,829,4368,387.0193359837562,91.9208515885543,76.27543263880574,0.3664604974317642,1.2051174068567505,2.0,3.53
-xy017c1.tiff,17,1,80,463,8283,478.7300141024109,109.54358108490227,103.00778496885222,0.4541676664856363,1.0634495355669122,5.0,3.9
-xy017c1.tiff,17,2,353,839,3877,396.33304448274095,87.55067408505171,68.40727627487766,0.3101595074133661,1.2798444676155658,3.0,4.32
-xy018c1.tiff,18,1,900,78,3085,292.90054095713816,88.26823095035911,50.80638596177926,0.4518816281118182,1.7373452033522268,11.0,2.65
-xy018c1.tiff,18,2,267,158,7606,709.517856687341,130.85097984342713,99.68044492992641,0.18986264428285807,1.3127046125788568,3.0,5.99
-xy018c1.tiff,18,3,127,163,12721,704.2457787486991,145.9659990819314,116.58873741188528,0.3223165543150001,1.251973409457742,5.0,2.66
-xy018c1.tiff,18,4,481,232,5502,444.07315985291854,113.26037998688332,72.58588461203558,0.3506076601134296,1.560363706969322,8.0,3.93
-xy019c1.tiff,19,1,1166,210,10170,552.4640328181787,122.42703198766036,108.9838746891225,0.4187191255501213,1.1233499665605078,5.0,2.68
-xy019c1.tiff,19,2,604,209,6762,447.73611075682084,126.71834995897495,73.63387784774093,0.4238779111939319,1.7209245752478408,11.0,3.28
-xy019c1.tiff,19,3,465,375,8107,520.1919548795365,117.9057973139992,97.17723466116709,0.3764807495892495,1.2133067762743757,6.0,4.4
-xy019c1.tiff,19,4,581,466,4370,434.98989873223326,122.77710942188429,65.78908369438912,0.29022375516648113,1.8662231258946,6.0,4.47
-xy019c1.tiff,19,5,982,708,6541,387.11984104714446,108.04923634062591,79.63280720192712,0.5484829750625728,1.3568432425927477,7.0,1.74
-xy019c1.tiff,19,6,253,894,3348,623.7188668141177,128.5382122013527,94.99299592280636,0.1081477655757498,1.353133575298604,11.0,8.39
-xy020c1.tiff,20,1,323,112,7879,629.4406922210658,139.97516466557116,83.77980550662198,0.2499029212165619,1.67075065189197,8.0,3.96
-xy020c1.tiff,20,2,245,258,6837,353.30360723121805,110.33607185716993,82.36585453239219,0.6883024266875607,1.3395851045748317,4.0,2.3
-xy020c1.tiff,20,3,964,289,8675,675.7604973744602,141.54706237288937,88.67731293238909,0.23872271542404244,1.5962037830443754,7.0,3.89
-xy020c1.tiff,20,4,739,824,8769,536.5939751330899,130.0840099854509,89.48774156755897,0.3827087130881934,1.4536517259991828,4.0,3.79
-xy021c1.tiff,21,1,130,411,7102,488.70057685088807,101.20518821799595,98.29117523796191,0.37368428013771815,1.0296467406456304,1.0,4.62
-xy021c1.tiff,21,2,748,705,4780,525.144227664784,134.07597928679263,54.779017160388676,0.21781138088938565,2.447579132247456,3.0,6.58
-xy022c1.tiff,22,1,1198,163,16437,713.0325753131026,220.34030951164848,105.58618815882872,0.406268991989704,2.0868289058811373,2.0,2.08
-xy022c1.tiff,22,2,800,536,8393,610.7127701597077,173.7465371562352,79.57332404042543,0.2827828613140921,2.183477179713734,10.0,3.46
-xy022c1.tiff,22,3,952,622,5531,520.5290039756342,114.08204289948073,73.86175836949162,0.2565214319161088,1.5445346200504475,11.0,4.02
-xy022c1.tiff,22,4,675,821,6380,554.3218971944477,122.73162192426778,88.80899935115053,0.26091946136050786,1.38197280479411,1.0,3.9
-xy023c1.tiff,23,1,538,144,4277,260.10764773832483,86.74210452236639,63.83658839487297,0.7944066501600255,1.3588148537294495,4.0,1.61
-xy023c1.tiff,23,2,163,123,12547,612.6417023478423,211.22858279461997,85.9848841671405,0.4200844931638809,2.456578093238181,2.0,2.45
-xy023c1.tiff,23,3,1008,233,4191,341.469083452062,95.8433399435957,62.111044160523406,0.4516736416480069,1.5430965819201583,3.0,3.14
-xy023c1.tiff,23,4,418,222,9722,496.03762594698577,118.73106686455273,109.27649788228656,0.4965194209017723,1.0865196923903135,4.0,3.08
-xy023c1.tiff,23,5,959,297,10820,515.0203820042827,135.04396842546677,105.7663975986055,0.5126114368723058,1.2768135390029327,6.0,1.75
-xy023c1.tiff,23,6,210,359,17652,612.8843430349616,173.9328556197628,131.4193430929457,0.5905364773690008,1.3234950923225182,5.0,1.54
-xy023c1.tiff,23,7,1042,443,6045,607.848809129029,154.6793218850645,71.94759950797999,0.20559617417424605,2.149888570888434,12.0,4.24
-xy023c1.tiff,23,8,414,505,3955,375.8772003600252,98.69953261533159,62.58353501280639,0.3517745247385038,1.5770846532579992,1.0,4.05
-xy023c1.tiff,23,9,767,652,5223,662.0569619307419,177.11809264795124,71.48836954223498,0.14974046459741994,2.477579133250629,1.0,5.35
-xy023c1.tiff,23,10,170,931,9598,546.8010819142763,149.36828122074743,92.58057418215552,0.4033962709457047,1.6133868529143067,4.0,4.43
-xy023c1.tiff,23,11,1145,953,5597,415.0904037956216,122.91624376805895,67.83153657951351,0.4082063763421754,1.8120810756509111,12.0,3.42
-xy024c1.tiff,24,1,370,134,5401,346.5634918610405,111.27785482340951,65.24724774814166,0.565091038055316,1.7054796740691471,7.0,2.25
-xy024c1.tiff,24,2,1066,195,12252,511.17366491630685,132.75448543318308,120.72463641487532,0.5892232963985035,1.0996470097202584,4.0,2.15
-xy024c1.tiff,24,3,686,210,5463,386.05486988968886,101.82958586099994,76.77397357628672,0.4606202153895308,1.326355548860795,9.0,3.18
-xy024c1.tiff,24,4,968,444,4588,332.46298679765215,97.78605945358272,61.62965605971848,0.5216109128168666,1.586672159241471,11.0,2.18
-xy024c1.tiff,24,5,851,575,3473,387.70562748477136,128.37172718993241,45.13698635484052,0.2903423835327061,2.8440473668478705,12.0,2.96
-xy024c1.tiff,24,6,493,675,6524,508.3513344459705,138.0168647800258,69.07039069806993,0.3172457994967248,1.99820593723502,6.0,4.24
-xy024c1.tiff,24,7,442,857,9420,544.4640328181787,152.01649928829193,92.37842371141261,0.3993212073585351,1.6455844685463228,12.0,2.53
-xy024c1.tiff,24,8,105,908,5379,363.60512242138304,122.4811720771076,65.41720586873986,0.5112713095098699,1.8723082169371008,9.0,2.38
-xy024c1.tiff,24,9,1063,939,6785,479.4812767608817,112.3197837162118,82.48883503891868,0.37086547877158105,1.3616361979559868,12.0,4.74
-xy025c1.tiff,25,1,821,184,4550,485.1797615707168,129.92771660597384,65.78741762501289,0.2428935079836868,1.9749630141520296,10.0,3.85
-xy025c1.tiff,25,2,371,369,7307,412.09040379562174,107.82301656857204,90.2968479146773,0.5407094609376885,1.194095021682877,4.0,3.4
-xy025c1.tiff,25,3,1098,476,4538,363.1970055134199,103.41888879424368,68.13595765463168,0.4323045805431616,1.5178312942845027,12.0,3.75
-xy025c1.tiff,25,4,175,507,3255,325.80613254815967,94.2093979521311,52.93325752912505,0.3853384513483523,1.779777069270581,9.0,3.93
-xy025c1.tiff,25,5,1142,607,12889,956.2112908632927,187.33697680496067,118.17503663385928,0.17714192208888074,1.585249999840366,7.0,6.03
-xy025c1.tiff,25,6,940,617,4985,405.26807332528534,95.72902599740948,75.30119288853112,0.38140839089556017,1.271281666667855,9.0,4.29
-xy025c1.tiff,25,7,447,614,18107,684.9087296526011,179.18683017873911,132.70539977332615,0.4850549739034501,1.350260279421997,4.0,2.09
-xy025c1.tiff,25,8,515,706,8928,505.2091988222396,132.85150887709617,94.01073513584456,0.4395634227861206,1.4131525371557525,1.0,3.74
-xy025c1.tiff,25,9,280,743,4085,449.58787847867995,98.02877703915674,63.04185300084565,0.2539643386565322,1.554979309346154,11.0,7.24
-xy025c1.tiff,25,10,785,791,12607,652.6538956566621,134.50817709121486,124.12363602794028,0.3719253219159943,1.083662881587976,4.0,3.4
-xy025c1.tiff,25,11,284,844,3883,426.3746750430834,98.28250786284585,64.16651374254224,0.2684073793262601,1.5316790975615182,1.0,7.16
-xy025c1.tiff,25,12,1164,947,8615,499.2508293825822,123.44469796224425,96.1038584268552,0.4343377285281227,1.2844926310237397,6.0,2.8
-xy026c1.tiff,26,1,701,79,9890,672.8610024378486,154.52612926155118,104.10819038493618,0.27450827397379857,1.4842840768838317,9.0,4.37
-xy026c1.tiff,26,2,564,114,17884,955.6965722375313,181.07652267330985,138.77830103361958,0.2460562883221695,1.3047898794311032,3.0,4.08
-xy026c1.tiff,26,3,1085,166,21269,975.8509011700819,263.040194569108,115.83639740761524,0.2806661260454623,2.2707905326466533,2.0,3.77
-xy026c1.tiff,26,4,253,368,5446,374.26197667087547,110.32111809478718,67.86471402663965,0.4885804520918246,1.625603521315682,11.0,2.44
-xy026c1.tiff,26,5,513,389,10434,558.4995667241114,130.79699792496774,111.1748658686917,0.4203538363436432,1.1764979152702708,5.0,3.27
-xy026c1.tiff,26,6,778,504,4253,360.048773235279,90.12075179935516,71.84521961287336,0.4122707996916311,1.254373670022259,4.0,3.68
-xy026c1.tiff,26,7,101,559,11918,1040.7442994522837,182.54476260331177,117.9769723119128,0.13826910877382553,1.547291467361035,3.0,8.12
-xy026c1.tiff,26,8,944,632,3888,439.7716446627536,143.66597958076613,46.75178854590103,0.2526281098915649,3.072951517987692,3.0,5.05
-xy026c1.tiff,26,9,224,673,6507,477.6589462905455,125.20202138198616,79.04237402322858,0.3583891376547684,1.583986095169566,12.0,4.1
-xy026c1.tiff,26,10,675,700,7482,472.3868683519032,119.94549909204682,83.93793696254984,0.4213392667634465,1.4289784027639645,8.0,3.31
-xy026c1.tiff,26,11,344,883,7784,535.564537881567,119.38994642424898,92.70824114943585,0.34102723656743844,1.2878029498133294,7.0,4.13
-xy026c1.tiff,26,12,124,901,8505,456.17366491630685,123.35296565264687,94.31538960137324,0.5135985188582523,1.3078773906782528,6.0,2.34
-xy027c1.tiff,27,1,1084,231,9101,552.9076836320746,118.1263430032273,103.2387329013124,0.3741051853423527,1.1442056647106105,5.0,3.22
-xy027c1.tiff,27,2,270,257,3137,382.91883092036784,110.02912461837994,61.47237579205655,0.2688505529898916,1.789895431902241,10.0,4.38
-xy027c1.tiff,27,3,164,291,15391,813.9980874276962,173.99067198604615,128.20753980521894,0.2918972028135236,1.3571017137555548,3.0,4.41
-xy027c1.tiff,27,4,819,379,9352,739.636651713959,146.68063397199526,102.64741298271444,0.2148213860451627,1.42897545792699,1.0,6.67
-xy027c1.tiff,27,5,1046,348,8428,632.9787514439399,126.6492778383536,93.87900244607991,0.264335887013265,1.3490692757530691,3.0,6.13
-xy027c1.tiff,27,6,1089,438,2137,197.33809511662426,61.46588266299741,47.90735640053039,0.6895924894379627,1.2830155383467772,10.0,2.14
-xy027c1.tiff,27,7,895,483,3092,339.54015126392744,99.2143182772968,42.910746546041594,0.3370288462602972,2.3121088832804078,10.0,5.24
-xy027c1.tiff,27,8,859,634,6492,485.85995641732217,109.11615611859413,82.6247933232687,0.3455939741517205,1.3206224394617028,6.0,4.9
-xy027c1.tiff,27,9,772,619,2163,233.86500705120545,81.13931645896737,38.95264486075322,0.4969765490680974,2.083024573787527,4.0,3.5
-xy027c1.tiff,27,10,691,750,10249,598.677236253775,168.36851819393038,86.89025699261705,0.3593402510988158,1.9377145841361296,12.0,3.72
-xy027c1.tiff,27,11,1051,967,6074,489.71782079359116,97.15602708595219,91.0017491872493,0.3182678974618058,1.0676281275213684,4.0,6.05
-xy028c1.tiff,28,1,593,131,5322,386.6406563273158,107.4974887768322,72.10720445386596,0.447372923424951,1.4908009482687532,5.0,3.52
-xy028c1.tiff,28,2,999,237,2806,421.2447327281724,101.80534709821295,62.96583747322616,0.19871404494975412,1.6168346389659602,2.0,5.12
-xy028c1.tiff,28,3,1157,274,9662,540.3574311003804,133.45402862423896,97.07807180467071,0.41582887231063337,1.3747082749311268,5.0,2.66
-xy028c1.tiff,28,4,1056,323,3450,456.6528496361356,79.54529583420221,65.28564053601427,0.20790101751970044,1.2184194744987102,10.0,6.98
-xy028c1.tiff,28,5,51,392,2224,196.5096679918781,62.389204814230006,48.80954324763774,0.7237303796088265,1.2782173456878123,9.0,2.17
-xy028c1.tiff,28,6,649,405,9404,622.6894295625948,125.94667720826395,108.29759557067514,0.30477511064201945,1.1629683608817614,12.0,5.83
-xy028c1.tiff,28,7,951,408,8622,667.2457787486987,115.08643558349961,104.42437073648186,0.2433583278551133,1.1021032233358992,7.0,3.11
-xy028c1.tiff,28,8,729,490,9430,606.5290039756343,131.50119235555917,108.26806881103666,0.3221205335575564,1.2145888792481554,8.0,4.67
-xy028c1.tiff,28,9,770,936,8450,530.0437226013958,115.37898342372073,102.74977175558134,0.3779576865715034,1.122912308731755,5.0,3.91
-xy029c1.tiff,29,1,119,230,10954,568.6417023478423,139.88338204066156,105.26831580594595,0.4257015074494672,1.3288270166545249,4.0,2.63
-xy029c1.tiff,29,2,1236,282,2516,295.27922061357856,92.98958148469723,39.81063572497979,0.3626224844279221,2.3357974518942317,8.0,4.26
-xy029c1.tiff,29,3,538,401,9072,573.8843430349616,130.4329519349031,97.3739506174516,0.3461498541675777,1.339505597830048,3.0,3.82
-xy029c1.tiff,29,4,353,556,4928,329.01933598375615,95.11311982726221,69.48754600352285,0.5720549133396133,1.368779375550839,11.0,2.51
-xy029c1.tiff,29,5,724,594,3070,307.39191898578673,79.49352151448406,59.04945520374305,0.4082849271607592,1.3462193891578036,9.0,3.72
-xy029c1.tiff,29,6,596,689,22661,1053.5483399593904,235.82843778856443,150.67008706782406,0.2565547149712682,1.5651974614072293,2.0,3.78
-xy029c1.tiff,29,7,672,777,7589,817.6843789287116,122.0384908455795,95.87201530130085,0.14263398459442345,1.2729313185087874,12.0,7.28
-xy029c1.tiff,29,8,963,960,2586,476.65894629054543,94.4955931210272,63.70580716768639,0.14302859989956898,1.4833120765944614,10.0,7.09
-xy030c1.tiff,30,1,186,142,3313,395.747258045114,135.97571209264186,48.269148981031634,0.2658247847469761,2.817031477933789,12.0,3.32
-xy030c1.tiff,30,2,818,195,3079,471.52290732122435,92.27761238345371,62.193804139187314,0.17402592880557716,1.4837106953120278,11.0,6.78
-xy030c1.tiff,30,3,703,217,2494,397.7827919510468,97.41162483825629,64.49666929260961,0.19806800618532847,1.5103357414677887,1.0,5.87
-xy030c1.tiff,30,4,443,227,6804,709.6955262170047,151.19399813499726,91.16926291014792,0.16975791554879044,1.6583878525376126,1.0,6.38
-xy030c1.tiff,30,5,194,237,2976,338.8355697996826,99.25237691791052,46.07615625656115,0.3257352816968145,2.154094112478776,8.0,4.61
-xy030c1.tiff,30,6,329,722,4322,330.2914139223983,88.22158437386726,67.12318564878849,0.4978517731641989,1.3143235607957109,5.0,2.28
-xy030c1.tiff,30,7,433,736,8070,583.5411972844539,147.65940432451958,85.53384361265822,0.2978107391622691,1.726327241801482,11.0,3.91
-xy030c1.tiff,30,8,1190,950,4519,550.8010819142763,92.08159035255008,81.74210283644122,0.18718137622896347,1.1264891305377511,7.0,7.53
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/c1_boundary_coordinate_stack.pickle b/example_dataset/example_segmented_images/MEF_wildtype/c1_boundary_coordinate_stack.pickle
deleted file mode 100644
index 86d525c..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/c1_boundary_coordinate_stack.pickle and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy001c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy001c1.tiff
deleted file mode 100644
index 5c9121e..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy001c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy002c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy002c1.tiff
deleted file mode 100644
index 48bfacf..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy002c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy003c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy003c1.tiff
deleted file mode 100644
index ede060f..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy003c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy004c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy004c1.tiff
deleted file mode 100644
index 32b9a0e..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy004c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy005c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy005c1.tiff
deleted file mode 100644
index c2b21fc..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy005c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy006c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy006c1.tiff
deleted file mode 100644
index f1f25d9..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy006c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy007c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy007c1.tiff
deleted file mode 100644
index 66ab92b..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy007c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy008c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy008c1.tiff
deleted file mode 100644
index b01992d..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy008c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy009c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy009c1.tiff
deleted file mode 100644
index e31a2d6..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy009c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy010c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy010c1.tiff
deleted file mode 100644
index 05b713a..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy010c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy011c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy011c1.tiff
deleted file mode 100644
index 6ee58ca..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy011c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy012c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy012c1.tiff
deleted file mode 100644
index 31d90b0..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy012c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy013c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy013c1.tiff
deleted file mode 100644
index 9e797a6..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy013c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy014c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy014c1.tiff
deleted file mode 100644
index d99e66d..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy014c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy015c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy015c1.tiff
deleted file mode 100644
index 428c1ba..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy015c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy016c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy016c1.tiff
deleted file mode 100644
index 368b967..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy016c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy017c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy017c1.tiff
deleted file mode 100644
index 05400eb..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy017c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy018c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy018c1.tiff
deleted file mode 100644
index d321da4..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy018c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy019c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy019c1.tiff
deleted file mode 100644
index 939af85..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy019c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy020c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy020c1.tiff
deleted file mode 100644
index 4cd069f..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy020c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy021c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy021c1.tiff
deleted file mode 100644
index d844972..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy021c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy022c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy022c1.tiff
deleted file mode 100644
index 1839f88..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy022c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy023c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy023c1.tiff
deleted file mode 100644
index fab5131..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy023c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy024c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy024c1.tiff
deleted file mode 100644
index eeff4d3..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy024c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy025c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy025c1.tiff
deleted file mode 100644
index 76ce911..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy025c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy026c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy026c1.tiff
deleted file mode 100644
index b47f886..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy026c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy027c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy027c1.tiff
deleted file mode 100644
index d609dee..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy027c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy028c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy028c1.tiff
deleted file mode 100644
index b456c7d..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy028c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy029c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy029c1.tiff
deleted file mode 100644
index a08799e..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy029c1.tiff and /dev/null differ
diff --git a/example_dataset/example_segmented_images/MEF_wildtype/xy030c1.tiff b/example_dataset/example_segmented_images/MEF_wildtype/xy030c1.tiff
deleted file mode 100644
index 0f75873..0000000
Binary files a/example_dataset/example_segmented_images/MEF_wildtype/xy030c1.tiff and /dev/null differ
diff --git a/example_dataset/images_to_apply_model.csv b/example_dataset/images_to_apply_model.csv
deleted file mode 100644
index 21bdbd1..0000000
--- a/example_dataset/images_to_apply_model.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-set ID,condition,set location,tag,note
-1,test1,/Users/hhelmbre/Desktop/packages/vampire_open/example_dataset/example_segmented_images/MEF_LMNA-- ,c1,
-2,test2,/Users/hhelmbre/Desktop/packages/vampire_open/example_dataset/example_segmented_images/MEF_wildtype,c1,
\ No newline at end of file
diff --git a/notebooks/morphology_plots.ipynb b/notebooks/morphology_plots.ipynb
new file mode 100644
index 0000000..44b7c63
--- /dev/null
+++ b/notebooks/morphology_plots.ipynb
@@ -0,0 +1,232 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7fa25975",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import numpy as np \n",
+ "import matplotlib.pyplot as plt \n",
+ "from statannotations.Annotator import Annotator\n",
+ "import seaborn as sns\n",
+ "import statsmodels\n",
+ "from scipy.stats import ttest_ind\n",
+ "from itertools import combinations\n",
+ "from statsmodels.stats.multitest import multipletests\n",
+ "\n",
+ "from scipy.stats import kruskal\n",
+ "import scikit_posthocs as sp\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "df1dad0a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "regionprops_df = pd.read_csv(\"../../turmoric/jul_3_rot_ogd_regionprops.csv\")\n",
+ "regionprops_df = regionprops_df[regionprops_df[\"treatment\"] != \"30mR_control\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c503bcb1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "treatments = regionprops_df['treatment'].unique()\n",
+ "treatments"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b6fbdefa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "groups = [group['aspect_ratio'].values for name, group in regionprops_df.groupby('treatment')]\n",
+ "kw_stat, kw_p = kruskal(*groups)\n",
+ "print(f\"Kruskal-Wallis p-value: {kw_p:.4e}\")\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2908fc8d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Step 2: Pairwise posthoc Dunn’s test\n",
+ "dunn_df = sp.posthoc_dunn(regionprops_df, val_col='aspect_ratio', group_col='treatment', p_adjust=None)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ef342404",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Flatten the p-values and correct\n",
+ "pairs = []\n",
+ "raw_pvals = []\n",
+ "\n",
+ "treatments = dunn_df.columns\n",
+ "for i in range(len(treatments)):\n",
+ " for j in range(i+1, len(treatments)):\n",
+ " g1, g2 = treatments[i], treatments[j]\n",
+ " pairs.append((g1, g2))\n",
+ " raw_pvals.append(dunn_df.loc[g1, g2])\n",
+ "\n",
+ "# Apply correction (Bonferroni, Holm, etc.)\n",
+ "_, corrected_pvals, _, _ = multipletests(raw_pvals, method='bonferroni') # or 'bonferroni', 'fdr_bh', etc.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2776547c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Build final result table\n",
+ "results = pd.DataFrame(pairs, columns=['Group1', 'Group2'])\n",
+ "results['p_corrected'] = corrected_pvals\n",
+ "results['significance'] = results['p_corrected'].apply(\n",
+ " lambda p: 'ns' if p > 0.05 else ('*' if p <= 0.05 else '**' if p <= 0.01 else '***')\n",
+ ")\n",
+ "\n",
+ "print(results)\n",
+ "\n",
+ "significant_results = results[results['p_corrected'] < 0.05].copy()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6b5fd9aa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.figure(figsize=(16, 12))\n",
+ "\n",
+ "ax = sns.boxplot(data=regionprops_df, x='treatment', y='aspect_ratio', palette='Set2', )\n",
+ "\n",
+ "# Optional: Add error bars\n",
+ "# ...\n",
+ "\n",
+ "# Manual annotation\n",
+ "# Starting y value (above the max of your data)\n",
+ "y_base = regionprops_df['aspect_ratio'].max() * 1.01\n",
+ "\n",
+ "# How much space between each annotation line\n",
+ "y_step = regionprops_df['aspect_ratio'].max() * 0.1\n",
+ "\n",
+ "for i, row in significant_results.iterrows():\n",
+ " g1, g2 = row['Group1'], row['Group2']\n",
+ " x1 = list(regionprops_df['treatment'].unique()).index(g1)\n",
+ " x2 = list(regionprops_df['treatment'].unique()).index(g2)\n",
+ " \n",
+ " # Vertical offset increases with each comparison\n",
+ " y = y_base + i * y_step\n",
+ " h = y_step * 0.5 # height of the tick\n",
+ "\n",
+ " # Draw annotation line and text\n",
+ " ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1.5, c='black')\n",
+ " ax.text((x1 + x2) * 0.5, y + h, row['significance'], ha='center', va='bottom', color='black')\n",
+ "\n",
+ "\n",
+ "plt.yscale('log')\n",
+ "plt.tight_layout()\n",
+ "plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f16705b0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create the plot\n",
+ "plt.figure(figsize=(8, 6))\n",
+ "sns.set(style=\"whitegrid\")\n",
+ "\n",
+ "# Scatter (beeswarm) plot\n",
+ "#ax = sns.stripplot(data=regionprops_df, x=\"treatment\", y='perimeter', jitter=True, size=1, color=\".3\", alpha=0.5)\n",
+ "ax = sns.swarmplot(data=regionprops_df, x=\"treatment\", y='perimeter', size=0.5, palette='Set2', alpha=0.5)\n",
+ "\n",
+ "# Add mean ± SD bars manually\n",
+ "\n",
+ "\n",
+ "groups = regionprops_df['treatment'].unique()\n",
+ "group_means = regionprops_df.groupby(\"treatment\")[\"perimeter\"].mean()\n",
+ "group_stds = regionprops_df.groupby(\"treatment\")[\"perimeter\"].std()\n",
+ "\n",
+ "# for x, group in enumerate(groups):\n",
+ "# mean = group_means[group]\n",
+ "# std = group_stds[group]\n",
+ " \n",
+ "# lower = mean - std\n",
+ "# upper = mean + std\n",
+ "\n",
+ "# # Enforce positive range for log scale\n",
+ "# lower = max(lower, 1e-1)\n",
+ " \n",
+ "# ax.plot([x, x], [lower, upper], color='black', linewidth=2)\n",
+ "# ax.plot(x, mean, 'o', color='black')\n",
+ "\n",
+ "\n",
+ "# Generate all pairwise combinations\n",
+ "pairs = list(combinations(groups, 2))\n",
+ "\n",
+ "annotator = Annotator(ax, pairs, data=regionprops_df, x='treatment', y='perimeter')\n",
+ "annotator.configure(test='t-test_ind', text_format='star', loc='outside', comparisons_correction=\"bonferroni\")\n",
+ "annotator.apply_and_annotate()\n",
+ "\n",
+ "\n",
+ "plt.yscale(\"log\")\n",
+ "plt.ylabel(\"Perimeter (Pixels)\")\n",
+ "plt.title(\"Perimeter by Group\")\n",
+ "plt.tight_layout()\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f1706cf1",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "vampire",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/threshold_validation.ipynb b/notebooks/threshold_validation.ipynb
new file mode 100644
index 0000000..2bc84d1
--- /dev/null
+++ b/notebooks/threshold_validation.ipynb
@@ -0,0 +1,229 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fb043d93",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import numpy as np\n",
+ "import tifffile as tiff\n",
+ "import matplotlib.pyplot as plt\n",
+ "from pathlib import Path\n",
+ "from turmoric.apply_thresholds import apply_li_threshold\n",
+ "from skimage import filters\n",
+ "from skimage.segmentation import clear_border\n",
+ "from skimage.morphology import remove_small_objects\n",
+ "from skimage.measure import block_reduce, label, regionprops\n",
+ "from scipy import ndimage"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "056a105a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def create_microglia_mask(image, threshold_method=filters.threshold_li):\n",
+ "\n",
+ " thresh_li = threshold_method(image)\n",
+ " binary_li = image > thresh_li\n",
+ "\n",
+ " objects = label(binary_li)\n",
+ " objects = clear_border(objects)\n",
+ " large_objects = remove_small_objects(objects, min_size=50000)\n",
+ " small_objects = label((objects ^ large_objects) > thresh_li)\n",
+ "\n",
+ " binary_li = ndimage.binary_fill_holes(remove_small_objects(small_objects > thresh_li, min_size=500))\n",
+ "\n",
+ " #scaled_img = ((image - image.min()) * (1/(image.max() - image.min()) * 255)).astype('uint8')\n",
+ " #hist = np.histogram(scaled_img.flatten(), range=[0,50], bins=50)\n",
+ " return binary_li"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f0fe97f8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_object_sizes(image, threshold_method=filters.threshold_li):\n",
+ "\n",
+ " thresh_li = threshold_method(image)\n",
+ " binary_li = image > thresh_li\n",
+ " binary_li = ndimage.binary_fill_holes(remove_small_objects(binary_li, min_size=500))\n",
+ "\n",
+ " objects = label(binary_li)\n",
+ " objects = clear_border(objects)\n",
+ " \n",
+ " props = regionprops(objects)\n",
+ "\n",
+ " # Get sizes for each label\n",
+ " label_sizes = [prop.area for prop in props]\n",
+ " return label_sizes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b5f5a2a8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "files = Path(\"/Users/nelsschimek/Documents/nancelab/Data/tommy_data/Sham_3/Left_HC\")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "839d3c2d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "npy_files = list(files.glob(\"*.tif\"))\n",
+ "\n",
+ "# If you want full paths as strings:\n",
+ "npy_file_paths = [str(f) for f in npy_files]\n",
+ "(npy_file_paths)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6316ee86",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "microglia_images = [tiff.imread(f) for f in sorted(npy_file_paths)]\n",
+ "print(len(microglia_images))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4aa9d57c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mean_masks = [create_microglia_mask(f, threshold_method=filters.threshold_mean) for f in microglia_images]\n",
+ "li_masks = [create_microglia_mask(f, threshold_method=filters.threshold_li) for f in microglia_images]\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "98a5f120",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sizes = get_object_sizes(microglia_images[1])\n",
+ "min(sizes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c73ffc29",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.hist((sizes))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "12d327eb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "columns = 3\n",
+ "rows = len(li_masks)\n",
+ "\n",
+ "fig, axes = plt.subplots(len(li_masks), columns, figsize=(10*columns, rows*10))\n",
+ "for ax, li, mean, raw in zip(axes, li_masks, mean_masks, microglia_images):\n",
+ " ax[0].imshow(raw > filters.threshold_li(raw), cmap=\"gray\")\n",
+ " ax[1].imshow(mean, cmap=\"gray\")\n",
+ " ax[2].imshow(li, cmap=\"gray\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7170cbc3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "npy_files = Path(\"/Users/nelsschimek/Documents/nancelab/Data/tommy_data/Sham_3/li_thresh/Left_HC\")\n",
+ "\n",
+ "\n",
+ "test_masks = npy_files = list(npy_files.glob(\"*.npy\"))\n",
+ "\n",
+ "# If you want full paths as strings:\n",
+ "npy_file_paths = [str(f) for f in npy_files]\n",
+ "(npy_file_paths)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3def6adc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loaded_masks = [np.load(f) for f in npy_file_paths]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c6e97180",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "loaded_masks[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "923851ad",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "columns = 1\n",
+ "rows = len(li_masks)\n",
+ "\n",
+ "fig, axes = plt.subplots(len(loaded_masks), columns, figsize=(10*columns, rows*10))\n",
+ "for ax, li in zip(axes, loaded_masks):\n",
+ " ax.imshow(li)\n",
+ " "
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "vampire",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/train_vampire_model.ipynb b/notebooks/train_vampire_model.ipynb
new file mode 100644
index 0000000..b48d8ad
--- /dev/null
+++ b/notebooks/train_vampire_model.ipynb
@@ -0,0 +1,165 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ad0dccf1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import vampire"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0dc50e22",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "treatment = [\"24R_control\", \"24R_OGD\", \"2R_control\", \"2R_OGD\", \"30mR_control\", \"OGD_only\", \"ORST\"]\n",
+ "groups = ['vampire_data']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d3bbf577",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "image_set_path = \"/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data\"\n",
+ "\n",
+ "vampire.extraction.extract_properties(image_set_path)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9e940940",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "build_info_df = pd.DataFrame({\n",
+ " 'img_set_path': [image_set_path],\n",
+ " 'output_path': [image_set_path],\n",
+ " 'model_name': ['li'],\n",
+ " 'num_points': [50],\n",
+ " 'num_clusters': [5],\n",
+ " 'num_pc': [np.nan]\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6bab620e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vampire.quickstart.fit_models(build_info_df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "29b8cd1d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model_path = os.path.join('/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data', 'model_li_(50_5_39)__.pickle')\n",
+ "vampire_model = vampire.util.read_pickle(model_path)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "74cb4cce",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "main_path = \"/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/testing/vampire_data\"\n",
+ "\n",
+ "apply_info_df = pd.DataFrame({\n",
+ " 'img_set_path': [\n",
+ " f\"{main_path}/24R_control\",\n",
+ " f\"{main_path}/24R_OGD\",\n",
+ " f\"{main_path}/2R_control\",\n",
+ " f\"{main_path}/2R_OGD\",\n",
+ " f\"{main_path}/30mR_control\",\n",
+ " f\"{main_path}/OGD_ONLY\",\n",
+ " f\"{main_path}/ORST\",\n",
+ " ],\n",
+ " 'model_path': ['/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle',\n",
+ " '/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle',\n",
+ " '/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle',\n",
+ " '/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle',\n",
+ " '/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle',\n",
+ " '/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle',\n",
+ " '/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/training/vampire_data/model_li_(50_5_39)__.pickle'],\n",
+ " 'output_path': [\n",
+ " f\"{main_path}/24R_control\",\n",
+ " f\"{main_path}/24R_OGD\",\n",
+ " f\"{main_path}/2R_control\",\n",
+ " f\"{main_path}/2R_OGD\",\n",
+ " f\"{main_path}/30mR_control\",\n",
+ " f\"{main_path}/OGD_ONLY\",\n",
+ " f\"{main_path}/ORST\",\n",
+ " ],\n",
+ " 'img_set_name': [\n",
+ " \"24R_control\",\n",
+ " \"24R_OGD\",\n",
+ " \"2R_control\",\n",
+ " \"2R_OGD\",\n",
+ " \"30mR_control\",\n",
+ " \"OGD_ONLY\",\n",
+ " \"ORST\",\n",
+ " ],\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "409178b4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vampire.quickstart.transform_datasets(apply_info_df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ba56d69f",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "vampire",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/pyproject.toml b/pyproject.toml
index 6496699..8979656 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,9 @@
[build-system]
-requires = ["setuptools >= 64.0"]
+requires = ["setuptools >= 64.0", "wheel", "setuptools_scm>=8.1"]
build-backend = "setuptools.build_meta"
[project]
-name = "nosferatu"
-version = "0.1.0"
+name = "turmoric"
authors = [
{ name="Nels Schimek", email="nlsschim@uw.edu"},
{ name="Krista Phommatha", email="krisphom@uw.edu" },
@@ -13,3 +12,20 @@ authors = [
{ name="Heather Wood", email="hwood2@uw.edu"},
{ name="Sergi Mayta", email="smayta@uw.edu"},
]
+
+readme = "README.md"
+dynamic = ["version"]
+dependencies = [
+
+ "numpy>=1.23",
+ "scipy>=1.8",
+ "setuptools>=64",
+ "setuptools_scm>=8.1",
+]
+requires-python = ">=3.9"
+
+[tool.setuptools]
+packages = ["turmoric"]
+
+[tool.setuptools.package-dir]
+"" = "src"
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..6a11800
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,134 @@
+alabaster==1.0.0
+annotated-types==0.7.0
+appnope==0.1.4
+asttokens==3.0.0
+attrs==25.3.0
+babel==2.17.0
+beautifulsoup4==4.13.4
+bleach==6.2.0
+Bottleneck==1.4.2
+Brotli==1.0.9
+certifi==2025.8.3
+charset-normalizer==3.4.3
+click==8.2.1
+cloudpickle==3.1.1
+comm==0.2.2
+contourpy==1.3.1
+cycler==0.11.0
+dask==2025.5.1
+debugpy==1.8.14
+decorator==5.2.1
+defusedxml==0.7.1
+docutils==0.21.2
+executing==2.2.0
+fastjsonschema==2.21.1
+fonttools==4.55.3
+fsspec==2025.5.0
+idna==3.10
+imageio==2.37.0
+imagesize==1.4.1
+importlib_metadata==8.7.0
+iniconfig==1.1.1
+ipykernel==6.29.5
+ipython==9.3.0
+ipython_pygments_lexers==1.1.1
+jedi==0.19.2
+Jinja2==3.1.6
+joblib==1.4.2
+jsonschema==4.24.1
+jsonschema-specifications==2025.4.1
+jupyter_client==8.6.3
+jupyter_core==5.8.1
+jupyterlab_pygments==0.3.0
+kiwisolver==1.4.8
+lazy_loader==0.4
+locket==1.0.0
+MarkupSafe==3.0.2
+matplotlib==3.10.0
+matplotlib-inline==0.1.7
+mistune==3.1.3
+nbclient==0.10.2
+nbconvert==7.16.6
+nbformat==5.10.4
+nd2==0.10.3
+nd2reader==3.3.1
+nest-asyncio==1.6.0
+networkx==3.4.2
+nosferatu==0.0.0
+numexpr==2.10.1
+numpy==2.2.5
+ome-types==0.6.1
+opencv-python==4.11.0.86
+packaging==24.2
+pandas==2.2.3
+pandocfilters==1.5.1
+parso==0.8.4
+partd==1.4.2
+patsy==1.0.1
+pexpect==4.9.0
+pillow==11.1.0
+PIMS==0.7
+pip==25.1.1
+platformdirs==4.3.8
+pluggy==1.5.0
+prompt_toolkit==3.0.51
+psutil==7.0.0
+ptyprocess==0.7.0
+pure_eval==0.2.3
+pydantic==2.11.4
+pydantic_core==2.33.2
+pydantic-extra-types==2.10.4
+Pygments==2.19.1
+pyparsing==3.2.0
+pytest==8.3.4
+python-dateutil==2.9.0.post0
+pytz==2024.1
+PyYAML==6.0.2
+pyzmq==27.0.0
+referencing==0.36.2
+requests==2.32.5
+resource-backed-dask-array==0.1.0
+roman-numerals-py==3.1.0
+rpds-py==0.26.0
+scikit-image==0.25.0
+scikit-learn==1.6.1
+scikit-posthocs==0.11.4
+scipy==1.15.3
+seaborn==0.13.2
+setuptools==72.1.0
+setuptools-scm==8.3.1
+six==1.17.0
+slicerator==1.1.0
+snowballstemmer==3.0.1
+soupsieve==2.7
+Sphinx==8.2.3
+sphinx-rtd-theme==3.0.2
+sphinxcontrib-applehelp==2.0.0
+sphinxcontrib-devhelp==2.0.0
+sphinxcontrib-htmlhelp==2.1.0
+sphinxcontrib-jquery==4.1
+sphinxcontrib-jsmath==1.0.1
+sphinxcontrib-qthelp==2.0.0
+sphinxcontrib-serializinghtml==2.0.0
+stack-data==0.6.3
+statannotations==0.7.2
+statsmodels==0.14.4
+threadpoolctl==3.5.0
+tifffile==2025.2.18
+tinycss2==1.4.0
+toolz==1.0.0
+tornado==6.4.2
+traitlets==5.14.3
+turmoric==0.0.0
+typing_extensions==4.13.2
+typing-inspection==0.4.0
+tzdata==2025.2
+unicodedata2==15.1.0
+urllib3==2.5.0
+vampire-analysis==0.2.0.dev1
+wcwidth==0.2.13
+webencodings==0.5.1
+wheel==0.45.1
+xmltodict==0.14.2
+xsdata==24.3.1
+zipp==3.21.0
diff --git a/scripts/apply_regionprops.py b/scripts/apply_regionprops.py
new file mode 100644
index 0000000..9ebcfb5
--- /dev/null
+++ b/scripts/apply_regionprops.py
@@ -0,0 +1,27 @@
+from pathlib import Path
+from turmoric.cell_analysis import apply_regionprops_recursively
+import click
+import numpy as np
+
+# props_list = ('area', 'bbox_area', 'centroid', 'convex_area',
+# 'eccentricity', 'equivalent_diameter', 'euler_number',
+# 'extent', 'filled_area', 'major_axis_length',
+# 'minor_axis_length', 'orientation', 'perimeter', 'solidity')
+
+props_list = ('area', 'perimeter')
+
+
+@click.command()
+@click.argument('input_folder', type=click.Path(exists=True,
+ readable=True, path_type=Path))
+@click.argument('output_csv', type=click.Path(exists=False, path_type=Path))
+def recursively_apply_regionprops(input_folder, output_csv):
+ regionprops_df = apply_regionprops_recursively(input_folder, props_list)
+ regionprops_df['circularity'] = 4*np.pi*regionprops_df.area/regionprops_df.perimeter**2
+ #regionprops_df['aspect_ratio'] = regionprops_df.major_axis_length/regionprops_df.minor_axis_length
+ regionprops_df.to_csv(output_csv, index=False)
+
+
+# Example usage
+if __name__ == "__main__":
+ recursively_apply_regionprops()
diff --git a/scripts/apply_single_threshold.py b/scripts/apply_single_threshold.py
new file mode 100644
index 0000000..3f75419
--- /dev/null
+++ b/scripts/apply_single_threshold.py
@@ -0,0 +1,95 @@
+import os
+import numpy as np
+from skimage import io, filters, morphology
+from scipy import ndimage
+import click
+from pathlib import Path
+from skimage.measure import block_reduce, label, regionprops
+from skimage.morphology import remove_small_objects
+from skimage.segmentation import clear_border
+import tifffile as tiff
+
+def create_microglia_mask(image, threshold_method=filters.threshold_li):
+
+ thresh_li = threshold_method(image)
+ binary_li = image > thresh_li
+
+ objects = label(binary_li)
+ objects = clear_border(objects)
+ large_objects = remove_small_objects(objects, min_size=50000)
+ small_objects = label((objects ^ large_objects) > thresh_li)
+
+ binary_li = ndimage.binary_fill_holes(remove_small_objects(small_objects > thresh_li, min_size=500))
+
+ #scaled_img = ((image - image.min()) * (1/(image.max() - image.min()) * 255)).astype('uint8')
+ #hist = np.histogram(scaled_img.flatten(), range=[0,50], bins=50)
+ return binary_li
+
+
+
+@click.command()
+@click.argument('input_folder', type=click.Path(exists=True, readable=True,
+ path_type=Path))
+@click.argument('output_folder', type=click.Path(exists=False, path_type=Path))
+@click.option("-s", "--size", type=click.INT, default=71)
+def apply_li_threshold(input_folder, output_folder, size=71):
+ """
+ Applies Li thresholding to all .tif images in the input folder
+ (and subfolders)
+ and saves the binary masks in the output folder.
+
+ Parameters:
+ - input_folder: Path to the folder containing .tif images.
+ - output_folder: Path to save the processed binary masks.
+ - size: Minimum size of objects to retain in the binary mask.
+ """
+ if not os.path.isdir(input_folder):
+ print(f"Error: Input folder '{input_folder}' does not exist.")
+ return
+
+ # Create output folder if it doesn't exist
+ os.makedirs(output_folder, exist_ok=True)
+
+ # Walk through all files and subfolders
+ for root, _, files in os.walk(input_folder):
+ for file in files:
+ if file.endswith(".tif"):
+ # Full input path
+ input_path = os.path.join(root, file)
+
+ # Create corresponding output subfolder
+ relative_path = os.path.relpath(root, input_folder)
+ output_subfolder = os.path.join(output_folder, relative_path)
+ os.makedirs(output_subfolder, exist_ok=True)
+
+ # Full output path
+ output_path = os.path.join(output_subfolder,
+ file.replace(".tif",
+ "_li_thresh.npy"))
+
+ try:
+ # Read the image
+ img = io.imread(input_path)
+
+ # Assume the second channel is the microglia channel
+ microglia_im = img[:, :, 1] if img.ndim == 3 else img
+
+ # Apply Li threshold
+
+
+ # img = tiff.imread(input_path)
+
+ binary_li = create_microglia_mask(microglia_im)
+
+ # Save the binary mask as .npy
+ np.save(output_path, binary_li)
+
+
+ except Exception as e:
+ print(f"Error processing {input_path}: {e}")
+
+ print(f"Processing completed. Results are saved in '{output_folder}'.")
+
+
+if __name__ == "__main__":
+ apply_li_threshold()
diff --git a/scripts/combine_regionprops_csvs.py b/scripts/combine_regionprops_csvs.py
new file mode 100644
index 0000000..106c99d
--- /dev/null
+++ b/scripts/combine_regionprops_csvs.py
@@ -0,0 +1,51 @@
+from pathlib import Path
+import click
+import numpy as np
+import pandas as pd
+
+
+
+
+
+@click.command()
+
+@click.argument('output_csv', type=click.Path(exists=False, path_type=Path))
+def recursively_apply_regionprops(output_csv):
+
+ csv_list = [
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/2R_control/cd11b/li_thresh/2R_control_regionprops.csv",
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/2R_OGD/cd11b/li_thresh/2R_OGD_regionprops.csv",
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/24R_control/cd11b/li_thresh/24R_control_regionprops.csv",
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/24R_OGD/cd11b/li_thresh/24R_OGD_regionprops.csv",
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/ORST/cd11b/li_thresh/ORST_regionprops.csv",
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/OGD_only/cd11b/li_thresh/OGD_only_regionprops.csv",
+ "/Users/nelsschimek/Documents/nancelab/Data/mito_images/brendan_full_analysis/tifs/30mR_control/cd11b/li_thresh/30mR_control_regionprops.csv"
+
+ ]
+
+ regionprops_dfs = []
+
+ for csv in csv_list:
+ df = pd.read_csv(csv)
+ file_name = csv.split("/")[-1]
+ treatment_1 = file_name.split("_")[0]
+ treatment_2 = file_name.split("_")[1]
+
+ if treatment_1 == "ORST":
+ treatment = "ORST"
+ else:
+ treatment = f'{treatment_1}_{treatment_2}'
+
+ df['treatment'] = treatment
+ regionprops_dfs.append(df)
+
+ regionprops_df = pd.concat(regionprops_dfs)
+
+ regionprops_df['circularity'] = 4*np.pi*regionprops_df["area"]/regionprops_df.perimeter**2
+ regionprops_df['aspect_ratio'] = regionprops_df.major_axis_length/regionprops_df.minor_axis_length
+ regionprops_df.to_csv(output_csv, index=False)
+
+
+# Example usage
+if __name__ == "__main__":
+ recursively_apply_regionprops()
\ No newline at end of file
diff --git a/scripts/concat_csvs.py b/scripts/concat_csvs.py
new file mode 100644
index 0000000..5f7c3a3
--- /dev/null
+++ b/scripts/concat_csvs.py
@@ -0,0 +1,166 @@
+import os
+import pandas as pd
+from pathlib import Path
+import glob
+
+def load_and_concatenate_csvs(root_directory, output_file=None):
+ """
+ Recursively search for CSV files, add treatment column based on subdirectory,
+ and concatenate all CSVs into a single DataFrame.
+
+ Args:
+ root_directory (str): Root directory to search for CSV files
+ output_file (str, optional): Path to save the concatenated CSV
+
+ Returns:
+ pandas.DataFrame: Concatenated DataFrame with treatment column
+ """
+
+ # Convert to Path object for easier handling
+ root_path = Path(root_directory)
+
+ if not root_path.exists():
+ raise ValueError(f"Directory {root_directory} does not exist")
+
+ # Find all CSV files recursively
+ csv_files = list(root_path.rglob("*.csv"))
+
+ if not csv_files:
+ print(f"No CSV files found in {root_directory}")
+ return pd.DataFrame()
+
+ print(f"Found {len(csv_files)} CSV files")
+
+ dataframes = []
+
+ for csv_file in csv_files:
+ try:
+ # Load the CSV
+ df = pd.read_csv(csv_file)
+
+ # Get the immediate parent directory name as treatment
+ treatment = csv_file.parent.name
+
+ # If the CSV is directly in the root directory, use root dir name
+ if csv_file.parent == root_path:
+ treatment = root_path.name
+
+ # Add treatment column
+ df['treatment'] = treatment
+
+ # Add source file info (optional, can be useful for debugging)
+ df['source_file'] = str(csv_file.relative_to(root_path))
+
+ dataframes.append(df)
+ print(f"Loaded: {csv_file.name} with treatment '{treatment}' ({len(df)} rows)")
+
+ except Exception as e:
+ print(f"Error loading {csv_file}: {e}")
+ continue
+
+ if not dataframes:
+ print("No CSV files could be loaded successfully")
+ return pd.DataFrame()
+
+ # Concatenate all dataframes
+ try:
+ combined_df = pd.concat(dataframes, ignore_index=True)
+ print(f"\nSuccessfully concatenated {len(dataframes)} files")
+ print(f"Final dataset shape: {combined_df.shape}")
+ print(f"Treatment values: {combined_df['treatment'].unique()}")
+
+ # Save to file if specified
+ if output_file:
+ combined_df.to_csv(output_file, index=False)
+ print(f"Saved concatenated data to: {output_file}")
+
+ return combined_df
+
+ except Exception as e:
+ print(f"Error concatenating dataframes: {e}")
+ return pd.DataFrame()
+
+def load_csvs_with_custom_treatment(root_directory, treatment_mapping=None, output_file=None):
+ """
+ Alternative version that allows custom treatment mapping based on directory paths.
+
+ Args:
+ root_directory (str): Root directory to search for CSV files
+ treatment_mapping (dict, optional): Custom mapping of directory names to treatment names
+ output_file (str, optional): Path to save the concatenated CSV
+
+ Returns:
+ pandas.DataFrame: Concatenated DataFrame with treatment column
+ """
+
+ root_path = Path(root_directory)
+ csv_files = list(root_path.rglob("*.csv"))
+
+ if not csv_files:
+ print(f"No CSV files found in {root_directory}")
+ return pd.DataFrame()
+
+ dataframes = []
+
+ for csv_file in csv_files:
+ try:
+ df = pd.read_csv(csv_file)
+
+ # Get treatment name
+ dir_name = csv_file.parent.name
+
+ # Apply custom mapping if provided
+ if treatment_mapping and dir_name in treatment_mapping:
+ treatment = treatment_mapping[dir_name]
+ else:
+ treatment = dir_name
+
+ df['treatment'] = treatment
+ df['source_file'] = str(csv_file.relative_to(root_path))
+
+ dataframes.append(df)
+ print(f"Loaded: {csv_file.name} with treatment '{treatment}'")
+
+ except Exception as e:
+ print(f"Error loading {csv_file}: {e}")
+ continue
+
+ if dataframes:
+ combined_df = pd.concat(dataframes, ignore_index=True)
+
+ if output_file:
+ combined_df.to_csv(output_file, index=False)
+
+ return combined_df
+
+ return pd.DataFrame()
+
+# Example usage
+if __name__ == "__main__":
+ # Basic usage
+ root_dir = "/Users/nelsschimek/Documents/nancelab/Data/colin_images/li_thresh" # Replace with your directory path
+
+ # Load and concatenate all CSVs
+ df = load_and_concatenate_csvs(root_dir, output_file="concatenated_data.csv")
+
+ # Display basic info about the result
+ if not df.empty:
+ print("\nDataFrame Info:")
+ print(df.info())
+ print("\nFirst few rows:")
+ print(df.head())
+ print("\nTreatment value counts:")
+ print(df['treatment'].value_counts())
+
+ # Example with custom treatment mapping
+ # custom_mapping = {
+ # "control_group": "Control",
+ # "treatment_a": "Treatment_A",
+ # "treatment_b": "Treatment_B"
+ # }
+ #
+ # df_custom = load_csvs_with_custom_treatment(
+ # root_dir,
+ # treatment_mapping=custom_mapping,
+ # output_file="custom_concatenated_data.csv"
+ # )
\ No newline at end of file
diff --git a/scripts/nd2_to_tif.py b/scripts/nd2_to_tif.py
new file mode 100644
index 0000000..59b7a14
--- /dev/null
+++ b/scripts/nd2_to_tif.py
@@ -0,0 +1,51 @@
+from nd2 import ND2File
+import tifffile
+import os
+import sys
+
+if len(sys.argv) != 3:
+ print("Usage: python nd2_to_tif.py ")
+ sys.exit(1)
+
+input_folder = sys.argv[1]
+output_folder = sys.argv[2]
+
+# Validate input folder
+if not os.path.isdir(input_folder):
+ print(f"Error: Input folder '{input_folder}' does not exist.")
+ sys.exit(1)
+
+# Create output folder if it doesn't exist
+os.makedirs(output_folder, exist_ok=True)
+
+# Process all .nd2 files recursively
+for root, _, files in os.walk(input_folder):
+ for file in files:
+ if file.endswith(".nd2"):
+ # Construct full input path
+ input_path = os.path.join(root, file)
+
+ # Create corresponding output subfolder structure
+ relative_path = os.path.relpath(root, input_folder)
+ output_subfolder = os.path.join(output_folder, relative_path)
+ os.makedirs(output_subfolder, exist_ok=True)
+
+ # Construct output file path
+ output_path = os.path.join(output_subfolder, file.replace(".nd2",
+ ".tif"))
+
+ try:
+ # Open the .nd2 file
+ with ND2File(input_path) as nd2_file:
+ # Extract image data as a NumPy array
+ data = nd2_file.asarray()
+
+ # Save the image data as .tif
+ tifffile.imwrite(output_path, data,
+ photometric="minisblack")
+ print(f"Converted: {input_path} -> {output_path}")
+ except Exception as e:
+ print(f"Error converting {input_path}: {e}")
+
+
+print(f"Conversion completed. TIF files are saved in '{output_folder}'.")
diff --git a/scripts/npy_to_tif.py b/scripts/npy_to_tif.py
new file mode 100644
index 0000000..d8d52df
--- /dev/null
+++ b/scripts/npy_to_tif.py
@@ -0,0 +1,88 @@
+import shutil, os, sys, json
+
+from glob import glob
+
+import numpy as np
+import pandas as pd
+from skimage import io
+import matplotlib.pyplot as plt
+from numpy.linalg import inv
+from sklearn.model_selection import train_test_split
+from skimage.segmentation import clear_border
+import skimage
+import tifffile as tiff
+from os.path import isfile, join
+
+
+#--------- Validate passed Arguments -----------
+
+# Check the number of arguments
+if len(sys.argv) < 2:
+ print("Usage: python example.py ...")
+ sys.exit(1)
+
+# Access the arguments
+print("Script name:", sys.argv[0])
+print("Arguments:", sys.argv[1:])
+
+# Example of processing arguments
+for i, arg in enumerate(sys.argv[1:], start=1):
+ print(f"Argument {i}: {arg}")
+
+# ------- process .npy files -------
+
+if len(sys.argv) != 2:
+ print("Usage: python script.py ")
+ sys.exit(1)
+
+# Get the root directory from the command line
+root_directory = sys.argv[1]
+
+# Validate the root directory
+if not os.path.isdir(root_directory):
+ print(f"Error: The directory '{root_directory}' does not exist.")
+ sys.exit(1)
+
+# Directory to save the output TIFF files
+output_dir = os.path.join(root_directory, "converted_tiffs")
+os.makedirs(output_dir, exist_ok=True)
+
+# Walk through the directory tree
+for dirpath, dirnames, filenames in os.walk(root_directory):
+ for file in filenames:
+ if file.endswith(".npy"):
+ # Construct the full path to the .npy file
+ npy_path = os.path.join(dirpath, file)
+
+ try:
+ # Load the .npy file
+ image_data = skimage.measure.label(np.load(npy_path))
+
+ # Normalize to uint8 if needed
+ # Handle boolean arrays
+ if image_data.dtype == np.bool_:
+ # Convert boolean to uint8 (True -> 255, False -> 0)
+ image_data = (image_data * 255).astype(np.uint8)
+ else:
+ # Normalize and scale image data to uint8 if it's not already
+ if image_data.dtype != np.uint8:
+ image_data = (
+ 255 * (image_data - image_data.min()) / (np.ptp(image_data) + 1e-8)
+ ).astype(np.uint8)
+
+ # Construct the output TIFF file path
+ relative_path = os.path.relpath(dirpath, root_directory)
+ tiff_dir = os.path.join(output_dir, relative_path)
+ os.makedirs(tiff_dir, exist_ok=True)
+ tiff_path = os.path.join(tiff_dir, file.replace(".npy", ".tif"))
+
+ # Save as TIFF
+ #Image.fromarray(image_data).save(tiff_path)
+ print(f"Converted: {npy_path} -> {tiff_path}")
+ io.imsave(tiff_path, image_data)
+
+ except Exception as e:
+ print(f"Error processing '{npy_path}': {e}")
+
+print(f"All .npy files have been converted and saved to '{output_dir}'.")
+
diff --git a/scripts/plot_regionprops.py b/scripts/plot_regionprops.py
new file mode 100644
index 0000000..e88134d
--- /dev/null
+++ b/scripts/plot_regionprops.py
@@ -0,0 +1,216 @@
+import pandas as pd
+import matplotlib.pyplot as plt
+import seaborn as sns
+import numpy as np
+from pathlib import Path
+import os
+
+def plot_treatment_means(csv_file, output_dir="plots", figsize=(10, 6)):
+ """
+ Read concatenated CSV file and create separate plots for area, perimeter,
+ and circularity means by treatment condition.
+
+ Args:
+ csv_file (str): Path to the concatenated CSV file
+ output_dir (str): Directory to save the plots
+ figsize (tuple): Figure size for plots
+ """
+
+ # Read the data
+ try:
+ df = pd.read_csv(csv_file)
+ print(f"Loaded data with shape: {df.shape}")
+ except FileNotFoundError:
+ print(f"Error: Could not find file {csv_file}")
+ return
+ except Exception as e:
+ print(f"Error reading CSV: {e}")
+ return
+
+ # Check required columns
+ required_columns = ['treatment', 'area', 'perimeter', 'circularity']
+ missing_columns = [col for col in required_columns if col not in df.columns]
+
+ if missing_columns:
+ print(f"Error: Missing required columns: {missing_columns}")
+ print(f"Available columns: {list(df.columns)}")
+ return
+
+ # Create output directory
+ output_path = Path(output_dir)
+ output_path.mkdir(exist_ok=True)
+
+ # Calculate means by treatment
+ treatment_means = df.groupby('treatment')[['area', 'perimeter', 'circularity']].agg(['mean', 'std', 'count']).reset_index()
+
+ print(f"\nTreatment conditions found: {df['treatment'].unique()}")
+ print(f"Sample sizes: {df['treatment'].value_counts().to_dict()}")
+
+ # Set style
+ plt.style.use('default')
+ sns.set_palette("husl")
+
+ # Metrics to plot
+ metrics = ['area', 'perimeter', 'circularity']
+
+ for metric in metrics:
+ # Create figure
+ fig, ax = plt.subplots(figsize=figsize)
+
+ # Get data for this metric
+ means = treatment_means['treatment']
+ values = treatment_means[(metric, 'mean')]
+ errors = treatment_means[(metric, 'std')]
+
+ # Create bar plot
+ bars = ax.bar(means, values, capsize=5, alpha=0.8, edgecolor='black', linewidth=1)
+
+ # Add error bars
+ ax.errorbar(means, values, yerr=errors, fmt='none', color='black', capsize=5)
+
+ # Customize plot
+ ax.set_xlabel('Treatment Condition', fontsize=12, fontweight='bold')
+ ax.set_ylabel(f'Mean {metric.title()}', fontsize=12, fontweight='bold')
+ ax.set_title(f'Mean {metric.title()} by Treatment Condition', fontsize=14, fontweight='bold')
+
+ # Rotate x-axis labels if there are many treatments
+ if len(means) > 5:
+ plt.xticks(rotation=45, ha='right')
+
+ # Add value labels on bars
+ for i, (bar, value, error) in enumerate(zip(bars, values, errors)):
+ height = bar.get_height()
+ ax.text(bar.get_x() + bar.get_width()/2., height + error + height*0.01,
+ f'{value:.2f}', ha='center', va='bottom', fontweight='bold')
+
+ # Add sample size annotations
+ for i, (bar, treatment) in enumerate(zip(bars, means)):
+ sample_size = treatment_means[treatment_means['treatment'] == treatment][('area', 'count')].iloc[0]
+ ax.text(bar.get_x() + bar.get_width()/2., -max(values)*0.05,
+ f'n={sample_size}', ha='center', va='top', fontsize=9, style='italic')
+
+ # Improve layout
+ plt.tight_layout()
+
+ # Save figure
+ filename = f'{metric}_by_treatment.png'
+ filepath = output_path / filename
+ plt.savefig(filepath, dpi=300, bbox_inches='tight')
+ print(f"Saved: {filepath}")
+
+ # Also save as PDF for publications
+ pdf_filepath = output_path / f'{metric}_by_treatment.pdf'
+ plt.savefig(pdf_filepath, bbox_inches='tight')
+
+ plt.close()
+
+ # Create a summary table and save it
+ create_summary_table(treatment_means, output_path)
+
+ print(f"\nAll plots saved to: {output_path.absolute()}")
+
+def create_summary_table(treatment_means, output_path):
+ """Create and save a summary table of the means and standard deviations."""
+
+ # Reshape the data for a cleaner summary table
+ summary_data = []
+
+ for _, row in treatment_means.iterrows():
+ treatment = row['treatment']
+
+ for metric in ['area', 'perimeter', 'circularity']:
+ mean_val = row[(metric, 'mean')]
+ std_val = row[(metric, 'std')]
+ count_val = row[(metric, 'count')]
+
+ summary_data.append({
+ 'Treatment': treatment,
+ 'Metric': metric.title(),
+ 'Mean': f"{mean_val:.3f}",
+ 'Std Dev': f"{std_val:.3f}",
+ 'Sample Size': int(count_val),
+ 'Mean ± SD': f"{mean_val:.3f} ± {std_val:.3f}"
+ })
+
+ summary_df = pd.DataFrame(summary_data)
+
+ # Save as CSV
+ summary_file = output_path / 'treatment_summary_stats.csv'
+ summary_df.to_csv(summary_file, index=False)
+ print(f"Saved summary statistics: {summary_file}")
+
+ # Print summary to console
+ print("\nSummary Statistics:")
+ print("=" * 60)
+ for treatment in treatment_means['treatment'].unique():
+ print(f"\nTreatment: {treatment}")
+ treatment_data = summary_df[summary_df['Treatment'] == treatment]
+ for _, row in treatment_data.iterrows():
+ print(f" {row['Metric']:12}: {row['Mean ± SD']:15} (n={row['Sample Size']})")
+
+def plot_combined_comparison(csv_file, output_dir="plots", figsize=(15, 5)):
+ """
+ Create a combined plot showing all three metrics side by side.
+ """
+ try:
+ df = pd.read_csv(csv_file)
+ except Exception as e:
+ print(f"Error reading CSV: {e}")
+ return
+
+ # Create output directory
+ output_path = Path(output_dir)
+ output_path.mkdir(exist_ok=True)
+
+ # Calculate means
+ treatment_means = df.groupby('treatment')[['area', 'perimeter', 'circularity']].mean()
+ treatment_stds = df.groupby('treatment')[['area', 'perimeter', 'circularity']].std()
+
+ # Create subplot
+ fig, axes = plt.subplots(1, 3, figsize=figsize)
+ metrics = ['area', 'perimeter', 'circularity']
+
+ for i, metric in enumerate(metrics):
+ ax = axes[i]
+
+ means = treatment_means[metric]
+ stds = treatment_stds[metric]
+
+ bars = ax.bar(means.index, means.values, capsize=5, alpha=0.8, edgecolor='black')
+ ax.errorbar(means.index, means.values, yerr=stds.values, fmt='none', color='black', capsize=5)
+
+ ax.set_title(f'Mean {metric.title()}', fontweight='bold')
+ ax.set_xlabel('Treatment')
+ ax.set_ylabel(f'{metric.title()}')
+
+ # Rotate labels if needed
+ if len(means.index) > 3:
+ ax.tick_params(axis='x', rotation=45)
+
+ plt.tight_layout()
+
+ # Save combined plot
+ combined_file = output_path / 'combined_metrics_comparison.png'
+ plt.savefig(combined_file, dpi=300, bbox_inches='tight')
+ plt.savefig(output_path / 'combined_metrics_comparison.pdf', bbox_inches='tight')
+ print(f"Saved combined plot: {combined_file}")
+
+ plt.close()
+
+# Example usage
+if __name__ == "__main__":
+ # Path to your concatenated CSV file
+ csv_file = "/Users/nelsschimek/Documents/nancelab/software_packages/TURMorIC/concatenated_data.csv" # Change this to your file path
+
+ # Check if file exists
+ if not os.path.exists(csv_file):
+ print(f"File not found: {csv_file}")
+ print("Please update the csv_file variable with the correct path to your data.")
+ else:
+ # Create individual plots for each metric
+ plot_treatment_means(csv_file, output_dir="treatment_plots")
+
+ # Create combined comparison plot
+ plot_combined_comparison(csv_file, output_dir="treatment_plots")
+
+ print("\nPlotting complete! Check the 'treatment_plots' directory for your figures.")
\ No newline at end of file
diff --git a/scripts/split_files.py b/scripts/split_files.py
new file mode 100644
index 0000000..90d1193
--- /dev/null
+++ b/scripts/split_files.py
@@ -0,0 +1,72 @@
+import shutil, os
+
+from glob import glob
+
+import numpy as np
+import pandas as pd
+from skimage import io
+import matplotlib.pyplot as plt
+from numpy.linalg import inv
+from sklearn.model_selection import train_test_split
+from skimage.segmentation import clear_border
+
+def split_files(input_folder, output_folder):
+ """
+
+ """
+ if not os.path.isdir(input_folder):
+ print(f"Error: Input folder '{input_folder}' does not exist.")
+ return
+
+ # Create output folder if it doesn't exist
+ os.makedirs(output_folder, exist_ok=True)
+
+ # Walk through all files and subfolders
+ for root, _, files in os.walk(input_folder):
+ for file in files:
+ if file.endswith(".npy"):
+ # Full input path
+ input_path = os.path.join(root, file)
+
+ # Create corresponding output subfolder
+ relative_path = os.path.relpath(root, input_folder)
+ output_subfolder = os.path.join(output_folder, relative_path)
+ os.makedirs(output_subfolder, exist_ok=True)
+
+ # Full output path
+ output_path_1 = os.path.join(output_subfolder, file.replace(".npy", "_quad1.npy"))
+ output_path_2 = os.path.join(output_subfolder, file.replace(".npy", "_quad2.npy"))
+ output_path_3 = os.path.join(output_subfolder, file.replace(".npy", "_quad3.npy"))
+ output_path_4 = os.path.join(output_subfolder, file.replace(".npy", "_quad4.npy"))
+
+ try:
+ file_to_split = np.load(input_path)
+ quada, quadb = np.array_split(file_to_split, 2)
+ quad1, quad2 = np.array_split(quada, 2, axis=1)
+ quad3, quad4 = np.array_split(quadb, 2, axis=1)
+
+ quad1 = clear_border(quad1)
+ quad2 = clear_border(quad2)
+ quad3 = clear_border(quad3)
+ quad4 = clear_border(quad4)
+
+
+ np.save(output_path_1, quad1)
+ np.save(output_path_2, quad2)
+ np.save(output_path_3, quad3)
+ np.save(output_path_4, quad4)
+
+ except Exception as e:
+ print(f"Error processing {input_path}: {e}")
+
+# Example usage
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) != 3:
+ print("Usage: python split_files.py ")
+ sys.exit(1)
+
+ input_folder = sys.argv[1]
+ output_folder = sys.argv[2]
+ split_files(input_folder, output_folder)
diff --git a/scripts/train_test_split.py b/scripts/train_test_split.py
new file mode 100644
index 0000000..ba45030
--- /dev/null
+++ b/scripts/train_test_split.py
@@ -0,0 +1,67 @@
+import os
+import shutil
+import random
+from collections import defaultdict
+from sklearn.model_selection import train_test_split
+
+# Define your base directory and target directories for training and testing
+base_dir = "/Users/nelsschimek/Documents/nancelab/Data/tommy_data/li_thresh/converted_tiffs"
+train_dir = "/Users/nelsschimek/Documents/nancelab/Data/tommy_data/li_thresh/converted_tiffs/training"
+test_dir = "/Users/nelsschimek/Documents/nancelab/Data/tommy_data/li_thresh/converted_tiffs/testing"
+
+# Define a list of subfolder names or patterns to look for
+treatment_conditions = ["Left_HC", "Right_HC"]
+groups = ["Blast_45_angle_5", "Blast_45_angle_6", "Blast_prone_7", "Blast_prone_8", "Blast_prone_9", "Sham_3"]
+
+# Create training and testing directories if they don't exist
+os.makedirs(train_dir, exist_ok=True)
+os.makedirs(test_dir, exist_ok=True)
+
+# Function to organize files into training and testing folders without slice leakage
+def organize_files_without_leakage(base_dir, train_dir, test_dir, test_size=0.2):
+ for group in groups:
+ for condition in treatment_conditions:
+ condition_path = os.path.join(base_dir, group, condition)
+ if not os.path.exists(condition_path):
+ continue
+
+ print(f'processing {group} {condition} :)')
+
+ # # Group files by brain slice
+ # slice_files = defaultdict(list)
+ # for file in os.listdir(condition_path):
+ # if os.path.isfile(os.path.join(condition_path, file)):
+ # # Extract slice_id based on the naming pattern
+ # slice_id = "_".join(file.split("_")[-6:-5]) # Extract the third element (Slice204)
+ # slice_files[slice_id].append(file)
+ # print(slice_id)
+ # print()
+
+ # # Split slices into training and testing
+ # slice_ids = list(slice_files.keys())
+ # random.seed(42) # For reproducibility
+ # random.shuffle(slice_ids)
+
+ # split_index = int(len(slice_ids) * (1 - test_size))
+ # train_slices = slice_ids[:split_index]
+ # test_slices = slice_ids[split_index:]
+
+ files = os.listdir(condition_path)
+ train_slices, test_slices = train_test_split(files, test_size=test_size)
+
+ # Create subdirectories for training and testing
+ train_subdir = os.path.join(train_dir, group, condition)
+ test_subdir = os.path.join(test_dir, group, condition)
+ os.makedirs(train_subdir, exist_ok=True)
+ os.makedirs(test_subdir, exist_ok=True)
+
+ # Move files to the appropriate folders
+ for file in train_slices:
+ shutil.copy(os.path.join(condition_path, file), os.path.join(train_subdir, file))
+
+ for file in test_slices:
+ shutil.copy(os.path.join(condition_path, file), os.path.join(test_subdir, file))
+
+
+# Call the function to organize the files
+organize_files_without_leakage(base_dir, train_dir, test_dir)
diff --git a/scripts/train_vampire_model.py b/scripts/train_vampire_model.py
new file mode 100644
index 0000000..7216e71
--- /dev/null
+++ b/scripts/train_vampire_model.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+"""
+
+Note: this script was initially written by Claude AI
+and edited by Nels Schimek
+
+VAMPIRE Model Training and Application Script
+
+This script trains a VAMPIRE model on brain imaging data and applies it to test datasets.
+It processes multiple treatment conditions and groups for microglia morphology analysis.
+
+Usage:
+ python train_vampire_model.py [--config config.yaml]
+"""
+
+import os
+import logging
+import argparse
+from pathlib import Path
+from typing import List, Dict, Optional
+import numpy as np
+import pandas as pd
+import vampire
+
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+
+class VampireModelTrainer:
+ """
+ A class to handle VAMPIRE model training and application for brain image analysis.
+ """
+
+ def __init__(self, base_path: str, treatments: List[str], groups: List[str]):
+ """
+ Initialize the VAMPIRE model trainer.
+
+ Args:
+ base_path: Base directory path for image data
+ treatments: List of treatment conditions
+ groups: List of experimental groups
+ """
+ self.base_path = Path(base_path)
+ self.treatments = treatments
+ self.groups = groups
+ self.model_path: Optional[Path] = None
+
+ # Validate base path exists
+ if not self.base_path.exists():
+ raise FileNotFoundError(f"Base path does not exist: {self.base_path}")
+
+ def extract_features(self, image_set_path: Path) -> None:
+ """
+ Extract features from images using VAMPIRE.
+
+ Args:
+ image_set_path: Path to the image dataset
+ """
+ logger.info(f"Extracting features from: {image_set_path}")
+
+ try:
+ vampire.extraction.extract_properties(str(image_set_path))
+ logger.info("Feature extraction completed successfully")
+ except Exception as e:
+ logger.error(f"Error during feature extraction: {e}")
+ raise
+
+ def train_model(self, image_set_path: Path, model_name: str = 'li',
+ num_points: int = 50, num_clusters: int = 5) -> Path:
+ """
+ Train a VAMPIRE model on the extracted features.
+
+ Args:
+ image_set_path: Path to the training image dataset
+ model_name: Name identifier for the model
+ num_points: Number of points for model training
+ num_clusters: Number of clusters for model training
+
+ Returns:
+ Path to the trained model file
+ """
+ logger.info(f"Training VAMPIRE model: {model_name}")
+
+ build_info_df = pd.DataFrame({
+ 'img_set_path': [str(image_set_path)],
+ 'output_path': [str(image_set_path)],
+ 'model_name': [model_name],
+ 'num_points': [num_points],
+ 'num_clusters': [num_clusters],
+ 'num_pc': [np.nan]
+ })
+
+ try:
+ vampire.quickstart.fit_models(build_info_df)
+
+ # Find the generated model file
+ model_pattern = f"model_{model_name}_({num_points}_{num_clusters}_*)__.pickle"
+ model_files = list(image_set_path.glob(model_pattern))
+
+ if not model_files:
+ raise FileNotFoundError(f"No model file found matching pattern: {model_pattern}")
+
+ self.model_path = model_files[0]
+ logger.info(f"Model trained successfully: {self.model_path}")
+ return self.model_path
+
+ except Exception as e:
+ logger.error(f"Error during model training: {e}")
+ raise
+
+ def create_apply_dataframe(self, test_base_path: Path, model_path: Path) -> pd.DataFrame:
+ """
+ Create a DataFrame for applying the trained model to test datasets.
+
+ Args:
+ test_base_path: Base path for test datasets
+ model_path: Path to the trained model
+
+ Returns:
+ DataFrame with application configuration
+ """
+ apply_data = []
+
+ for group in self.groups:
+ for treatment in self.treatments:
+ img_set_path = test_base_path / group / treatment
+
+ # Check if path exists before adding to dataframe
+ if img_set_path.exists():
+ apply_data.append({
+ 'img_set_path': str(img_set_path),
+ 'model_path': str(model_path),
+ 'output_path': str(img_set_path),
+ 'img_set_name': treatment
+ })
+ logger.debug(f"Added to apply list: {treatment}")
+ else:
+ logger.warning(f"Path does not exist, skipping: {img_set_path}")
+
+ if not apply_data:
+ raise ValueError("No valid test datasets found")
+
+ return pd.DataFrame(apply_data)
+
+ def apply_model(self, test_base_path: Path) -> None:
+ """
+ Apply the trained model to test datasets.
+
+ Args:
+ test_base_path: Base path for test datasets
+ """
+ if self.model_path is None:
+ raise ValueError("Model must be trained before applying")
+
+ logger.info(f"Applying model to test datasets in: {test_base_path}")
+
+ try:
+ apply_info_df = self.create_apply_dataframe(test_base_path, self.model_path)
+ logger.info(f"Applying model to {len(apply_info_df)} datasets")
+
+ vampire.quickstart.transform_datasets(apply_info_df)
+ logger.info("Model application completed successfully")
+
+ except Exception as e:
+ logger.error(f"Error during model application: {e}")
+ raise
+
+ def run_full_pipeline(self, training_subpath: str = "training/vampire_data",
+ testing_subpath: str = "testing/vampire_data") -> None:
+ """
+ Run the complete training and application pipeline.
+
+ Args:
+ training_subpath: Relative path to training data
+ testing_subpath: Relative path to testing data
+ """
+ train_path = Path.joinpath(self.base_path, training_subpath)
+ test_path = Path.joinpath(self.base_path, testing_subpath)
+
+ logger.info("Starting VAMPIRE model pipeline")
+ logger.info(f"Training path: {train_path}")
+ logger.info(f"Testing path: {test_path}")
+
+ # Step 1: Extract features
+ self.extract_features(train_path)
+
+ # Step 2: Train model
+ self.train_model(train_path)
+
+ # Step 3: Apply model to test data
+ self.apply_model(test_path)
+
+ logger.info("Pipeline completed successfully")
+
+
+def load_config(config_path: Optional[str] = None) -> Dict:
+ """
+ Load configuration from file or return defaults.
+
+ Args:
+ config_path: Optional path to configuration file
+
+ Returns:
+ Configuration dictionary
+ """
+ # Default configuration
+ config = {
+ 'base_path': "/Users/nelsschimek/Documents/nancelab/Data/tommy_data/li_thresh/converted_tiffs",
+ 'treatments': ["Left_HC", "Right_HC"],
+ 'groups': ["Blast_45_angle_5", "Blast_45_angle_6", "Blast_prone_7", "Blast_prone_8", "Blast_prone_9", "Sham_3"],
+ 'training_subpath': "training/",
+ 'testing_subpath': "testing/",
+ 'model_params': {
+ 'model_name': 'li',
+ 'num_points': 50,
+ 'num_clusters': 5
+ }
+ }
+
+ # TODO: Add YAML config file support if needed
+ if config_path:
+ logger.warning("Config file loading not yet implemented, using defaults")
+
+ return config
+
+
+def main():
+ """
+ Main entry point for the script.
+ """
+ parser = argparse.ArgumentParser(description='Train and apply VAMPIRE model for brain image analysis')
+ parser.add_argument('--config', type=str, help='Path to configuration file')
+ parser.add_argument('--verbose', '-v', action='store_true', help='Enable verbose logging')
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.getLogger().setLevel(logging.DEBUG)
+
+ try:
+ # Load configuration
+ config = load_config(args.config)
+
+ # Initialize trainer
+ trainer = VampireModelTrainer(
+ base_path=config['base_path'],
+ treatments=config['treatments'],
+ groups=config['groups']
+ )
+
+ # Run the pipeline
+ trainer.run_full_pipeline(
+ training_subpath=config['training_subpath'],
+ testing_subpath=config['testing_subpath']
+ )
+
+ except Exception as e:
+ logger.error(f"Pipeline failed: {e}")
+ raise
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/try_all_thresh.py b/scripts/try_all_thresh.py
new file mode 100644
index 0000000..ae46eeb
--- /dev/null
+++ b/scripts/try_all_thresh.py
@@ -0,0 +1,16 @@
+import click
+from turmoric.apply_thresholds import apply_all_thresh
+from pathlib import Path
+
+
+@click.command()
+@click.argument('input_folder', type=click.Path(exists=True, readable=True,
+ path_type=Path))
+@click.argument('output_folder', type=click.Path(exists=False, path_type=Path))
+def apply_all_thresholds(input_folder, output_folder):
+
+ apply_all_thresh(input_folder, output_folder)
+
+
+if __name__ == "__main__":
+ apply_all_thresholds()
diff --git a/src/turmoric/GUI_components/BuildModel.py b/src/turmoric/GUI_components/BuildModel.py
deleted file mode 100644
index fad4fb3..0000000
--- a/src/turmoric/GUI_components/BuildModel.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from PyQt6.QtCore import pyqtSignal, QThread
-from datetime import datetime
-
-
-class BuildModel(QThread):
- """
- Worker class for performing background tasks in a separate thread.
- This class inherits from `QThread` and is designed to handle long-running operations
- (such as building or applying a model) without blocking the main GUI thread.
-
- The worker communicates progress and status updates via the `progress_changed` and
- `status_updated` signals.
-
- Signals:
- progress_changed (int): Emitted to indicate progress (percentage) of the task.
- status_updated (str): Emitted to update the status message during task execution.
-
- Attributes:
- build_model (bool): Flag indicating whether the worker is building a model (True)
- or applying a model (False).
- csv (str): Path to the CSV file containing image set information.
- entries (dict): Dictionary containing user input fields, such as output paths and model settings.
- outpth (str, optional): Output path for saving results, defaults to `None`.
- clnum (int, optional): The number of clusters for clustering the data, defaults to `None`.
- """
- progress_changed = pyqtSignal(int)
- status_updated = pyqtSignal(str)
-
- def __init__(self, build_model, csv, entries, outpth=None, clnum=None):
- """
- Initializes the Worker object with the specified parameters for background processing.
-
- Args:
- build_model (bool): A flag indicating whether to build a model (`True`) or apply a model (`False`).
- csv (str): Path to the CSV file containing image set information.
- entries (dict): Dictionary containing user input fields such as output folder paths, model settings, etc.
- outpth (str, optional): Output path for saving results, defaults to `None`.
- n_clusters(int, optional): The number of clusters to use for clustering, defaults to `None`.
- """
- super().__init__()
- self.build_model = build_model
- self.csv = csv
- self.entries = entries
- self.outpth = outpth
- self.clnum = clnum
-
- def run(self):
- """
- Runs the model-building or applying process in a separate thread.
- Depending on the value of `build_model`, it either builds or applies the model.
- """
- self.mainbody()
-
- def mainbody(self):
- """
- Core logic for the model-building or applying process.
- Updates progress and status during the process.
- """
- progress = 50
- experimental = True
- realtimedate = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
- N = int(self.entries['Number of coordinates'].text())
-
- if self.build_model:
- self.status_updated.emit("Modeling initiated...")
- self.progress_changed.emit(progress + 15)
- progress += 20
- self.progress_changed.emit(progress)
- progress += 25
- self.progress_changed.emit(progress)
- self.status_updated.emit('Modeling completed.')
- else:
- self.status_updated.emit("Applying model...")
- progress += 100
- self.progress_changed.emit(progress)
- self.status_updated.emit("Model applied successfully.")
-
- def apply_clustering(self):
- print('')
\ No newline at end of file
diff --git a/src/turmoric/GUI_components/CentralNode.py b/src/turmoric/GUI_components/CentralNode.py
deleted file mode 100644
index 444a969..0000000
--- a/src/turmoric/GUI_components/CentralNode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from PyQt6.QtCore import Qt, pyqtSignal,pyqtSlot, QThread, QObject
-from PyQt6.QtGui import QImage, QPixmap
-
-import sys
-import pickle
-
-import ImageHandler
-import FunctionHandler
-import ModelHandler
-
-
-class CentralNode(QObject):
- """
- The CentralNode is responsible for facilitating communication between the main window and QThread-based worker classes.
-
- This class sends information to the worker threads, and receives updates from them to forward back to the main window.
- It serves as an intermediary between the GUI and the backend processing threads.
-
- Attributes:
- """
- def __init__(self, main_window):
- super(CentralNode, self).__init__()
- self.main_window = main_window
- self.current_control=0 # current set of controls
- self.stuff = None
-
- # Instantiate workers
- self.image_handler = ImageHandler(data="some data")
- self.function_handler=FunctionHandler(data="some data")
- self.model_handler = ModelHandler(data="some data")
-
- # Connections to MainWindow
- self.function_handler.update_image.connect()
- self.image_handler.update_image.connect(self.main_window.displayImage)
- self.function_handler.update_data.connect()
- self.build_model.progress_changed.connect(self.main_window.update_progress)
- self.build_model.status_updated.connect(self.main_window.update_status)
- self.function_handler.update_model.connect()
-
- def update_controls(self):
- self.current_control+=1
-
- def update_status(self, new_status):
- self.update_status_signal.emit(new_status)
-
- def start_processing(self, process, **param):
- """
- Start a process.
- """
- process.start(process)
-
- def stop_process(self, process):
- """
- Stop a process.
- """
- process.requestInterruption()
- process.wait()
\ No newline at end of file
diff --git a/src/turmoric/GUI_components/FunctionHandler.py b/src/turmoric/GUI_components/FunctionHandler.py
deleted file mode 100644
index 5c9ae55..0000000
--- a/src/turmoric/GUI_components/FunctionHandler.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-from PyQt6.QtCore import pyqtSignal, QThread
-
-class FunctionHandler(QThread):
- def __init__(self, FunctionHandler):
- """ doc string"""
- super().__init__()
-
- def run(self):
- print("executing function")
\ No newline at end of file
diff --git a/src/turmoric/GUI_components/ImageHandler.py b/src/turmoric/GUI_components/ImageHandler.py
deleted file mode 100644
index 5f9deaa..0000000
--- a/src/turmoric/GUI_components/ImageHandler.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from PyQt6.QtCore import pyqtSignal, QThread
-from PyQt6.QtGui import QImage, QPixmap
-
-
-class ImageHandler(QThread):
- """
- Takes in parameters from CentralNode, modifies them, and sends them to the main window for display.
-
- This class performs image manipulation tasks in a separate thread. It receives parameters from the CentralNode,
- processes images, and sends the results back to the main window to be displayed.
-
- Signals:
- update_image (QImage): Emitted to send the processed image back to the main window for display.
-
- Attributes:
- image_data (QImage): The image data to be processed.
- modify_type (str): The type of modification (e.g., "rotate", "filter", etc.).
- """
- update_image = pyqtSignal(QImage)
-
- def __init__(self, image_data, modify_type):
- super().__init__()
- self.image_data = image_data
- self.modify_type = modify_type
-
- def run(self):
- """
- Runs the image manipulation process in a separate thread.
- Based on the `modify_type`, it performs different image transformations.
- """
- if self.modify_type == "rotate":
- self.process_rotate()
- elif self.modify_type == "filter":
- self.process_filter()
- # Add more processing types as needed
-
- def process_filter(self): # Sample function to be replaced with actual filters
- """
- Applies a filter to the image (e.g., grayscale or blur).
- """
- # Example: apply a grayscale filter
- gray_image = self.image_data.convertToFormat(QImage.Format_Grayscale8)
- self.update_image.emit(gray_image)
\ No newline at end of file
diff --git a/src/turmoric/GUI_components/MainWindow.py b/src/turmoric/GUI_components/MainWindow.py
deleted file mode 100644
index 7705163..0000000
--- a/src/turmoric/GUI_components/MainWindow.py
+++ /dev/null
@@ -1,221 +0,0 @@
-from PyQt6.QtWidgets import (QApplication, QWidget,
- QVBoxLayout, QPushButton, QLabel, QLineEdit,
- QProgressBar, QFileDialog, QHBoxLayout,
- QGridLayout, QStackedWidget,QIntValidator)
-from PyQt6.QtCore import Qt, pyqtSignal,pyqtSlot, QThread, QObject
-from PyQt6.QtGui import QImage, QPixmap
-
-import sys
-import pickle
-
-# Imports for MainWindow
-from central_node import CentralNode
-from function_handler import FunctionHandler
-from image_handler import ImageHandler
-from build_model import BuildModel
-
-class MainWindow(QWidget):
- """
- The main window of the PyQt6 GUI application.
-
- This class sets up the main user interface (UI) for the application, including the layout and controls.
- It handles user interaction and connects buttons to specific functionalities such as loading CSV files,
- selecting output folders, and building models. It also manages the progress bar and status label.
-
- Attributes:
- number_of_pages (int): The number of pages in the GUI (used for stacked widgets).
- current_control (int): The index of the current page being displayed in the GUI.
- controlStack (QStackedWidget): Stack widget used to manage different pages.
- Image_Display1 (QLabel): Label for displaying images.
- load_csv_button (QPushButton): Button to load a CSV file.
- select_folder_button (QPushButton): Button to select an output folder.
- build_model_button (QPushButton): Button to trigger model building.
- progress_bar (QProgressBar): Progress bar to indicate task progress.
- status_label (QLabel): Label to display the current status message.
- next_button (QPushButton): Button to navigate to the next page when the task is complete.
- """
-
- def __init__(self):
- """
- Initializes the MainWindow and sets up the UI.
-
- This method sets the window title, size, and initializes the layout for the GUI.
- It also creates a stacked widget to manage multiple pages and prepares the initial layout.
-
- It does not take any parameters and sets up default values for essential attributes such as:
- - `number_of_pages`: The number of pages in the GUI.
- - `current_control`: The current page index (default is 0).
- - `controlStack`: A stacked widget to manage different pages.
- """
- super().__init__()
- self.setWindowTitle("PyQt6 GUI Template")
- self.setGeometry(0, 0, 1000, 800)
- self.number_of_pages = 3 # Number of pages in the GUI
- self.current_control = 0 # Current page of the GUI
- self.controlStack = QStackedWidget(self)
- self.default_values={'n_clusters':10}
- # initializing empty values
- self.Central_Connection=None, self.Image
- self.init_ui()
-
- def init_ui(self):
- # Main Layout Setup
- outerLayout = QGridLayout()
- displayLayout = QHBoxLayout()
- sidebuttonLayout = QVBoxLayout()
-
- # Universal widgets
- ###################
-
- # Displays images
- self.Image_Display1 = QLabel(self)
- self.Image_Display1.setFixedSize(640, 480)
- displayLayout.addWidget(self.Image_Display1)
-
- # Page navigation Button
- self.next_button = self.make_button(self,'Next Page', self.update_control_stack)
- self.next_button.setVisible(False)
-
-
- # Page specific widgets
- #######################
- self.page1 = QWidget()
- self.page2 = QWidget()
- self.page3 = QWidget()
-
- # Add pages to Navigation
- self.controlStack.addWidget(self.page1)
- self.controlStack.addWidget(self.page2)
- self.controlStack.addWidget(self.page3)
-
- # Initiate page layouts
- self.page1_layout = QVBoxLayout()
- self.page2_layout = QVBoxLayout()
- self.page3_layout = QVBoxLayout()
-
- # page1
- self.page1_layout.addWidget(QLabel("Page 1 "))
- self.load_csv_button = self.make_button(self,'Load CSV', self.load_csv)
- self.select_folder_button = self.make_button(self, 'Select Folder', self.select_folder)
- sidebuttonLayout.addWidget(self.select_folder_button)
-
-
- self.build_model_button = self.make_button(self,'Build Model', self.build_model)
- sidebuttonLayout.addWidget(self.build_model_button)
- sidebuttonLayout.addWidget(self.load_csv_button)
- self.page1_layout.addWidget(self.next_button)
- self.page1.setLayout(self.page1_layout)
-
- # page2
- self.page2_layout.addWidget(QLabel("Page 2 Content Here"))
- self.page2.setLayout(self.page2_layout)
-
- # page 3: Model Building
- self.clusters_input = QLineEdit(self)
- self.clusters_input.setPlaceholderText("Enter Number of Clusters")
- self.clusters_input.setValidator(QIntValidator(self)) # Only allows integer input
- self.clusters_input.setText(self.default_values["n_clusters"])
- self.clusters_input.setPlaceholderText("Enter Number of Clusters")
- self.clusters_input.setValidator(QIntValidator(self))
-
- # Adds layouts to the main layout
- outerLayout.addWidget(self.controlStack, 0, 0)
- outerLayout.addLayout(displayLayout, 0, 0)
- outerLayout.addLayout(sidebuttonLayout, 0, 1)
- self.setLayout(outerLayout)
-
- # Initializes connection to CentralNode
- self.Central_Connection= CentralNode()
- self.Central_Connection.updateImage.connect(lambda image: self.displayImage(image))
-
- # UI helper functions
- #####################
- def update_control_stack(self):
- """Updates current set of pages and controls"""
- self.current_control+=1
-
- def make_button(self,name, action):
- button = QPushButton(name, self)
- button.clicked.connect(action)
- return button
-
- def progress_bar(self, Layout):
- # Progress bar
- self.progress_bar = QProgressBar(self)
- self.progress_bar.setRange(0, 100)
- Layout.addWidget(self.progress_bar)
- self.status_label = QLabel('Status: Ready', self)
- Layout.addWidget(self.status_label)
-
- def image_display_visable(self):
- if self.Image_Display1.isVisible:
- self.Image_Display1.setVisible(False)
- else:
- self.Image_Display1.setVisible(True)
-
- # Functions that communicate with other classes
- ###############################################
- @ pyqtSlot(QImage)
- def displayImage(self, Image):
- self.Image_Display1.setPixmap(QPixmap.fromImage(Image))
-
- @ pyqtSlot()
- def load_csv(self):
- """Initializes file selection window
- """
- file, _ = QFileDialog.getOpenFileName(self, "Open CSV File", "", "CSV Files (*.csv)")
- if file:
- self.entries['Image sets to build'].setText(file)
-
- @ pyqtSlot()
- def select_folder(self):
- folder = QFileDialog.getExistingDirectory(self, "Select Folder")
- if folder:
- self.entries['Model output folder'].setText(folder)
- @ pyqtSlot()
- def update_controls(self):
- self.Central_Connection.update_controls()
-
- @ pyqtSlot()
- def build_model(self):
- """ To do: add communication to central node"""
- build_model = True
- csv = self.entries['Image sets to build'].text()
- entries = self.entries
- outpth = self.entries['Model output folder'].text()
- self.worker = build_model
- self.worker.progress_changed.connect(self.update_progress)
- self.worker.status_updated.connect(self.update_status)
- self.worker.start()
-
- @ pyqtSlot()
- def update_progress(self, progress):
- """
- Updates the progress bar with the current progress value.
-
- This method receives the current progress value (an integer) from the worker thread and updates the progress bar
- accordingly.
-
- Args:
- progress (int): The current progress value (0 to 100).
- """
- self.progress_bar.setValue(progress)
-
- @ pyqtSlot()
- def update_status(self, status):
- """
- Updates the status label with a new status message.
-
- This method receives a status message from the worker thread and updates the status label to reflect the current
- state of the process (e.g., "Modeling initiated", "Modeling completed").
-
- Args:
- status (str): The new status message to display."""
- self.status_label.setText(f"Status: {status}")
-
-
-if __name__ == "__main__":
- app = QApplication(sys.argv)
- window = MainWindow()
- window.show()
- sys.exit(app.exec())
diff --git a/src/turmoric/apply_thresholds.py b/src/turmoric/apply_thresholds.py
new file mode 100644
index 0000000..7d14dd1
--- /dev/null
+++ b/src/turmoric/apply_thresholds.py
@@ -0,0 +1,280 @@
+import os
+import numpy as np
+from skimage import io, filters, morphology
+from scipy import ndimage
+from turmoric.utils import recursively_get_all_filepaths
+import matplotlib.pyplot as plt
+from typing import Callable
+from skimage.filters import threshold_isodata
+from skimage.filters import threshold_li
+from skimage.filters import threshold_mean
+from skimage.filters import threshold_minimum
+from skimage.filters import threshold_otsu
+from skimage.filters import threshold_triangle
+from skimage.filters import threshold_yen
+from collections import OrderedDict
+from collections.abc import Iterable
+
+
+def apply_all_thresh(input_folder: str, output_folder: str, channel: int=1, figsize: tuple=(10, 8)) -> None:
+ """
+ Apply multiple thresholding algorithms to .tif images and save comparison plots.
+
+ This function loads all `.tif` images found recursively in the input folder.
+ For each image, it extracts the specified channel (if multi-channel), applies a suite
+ of thresholding methods using `skimage.filters.try_all_threshold`, and saves the resulting
+ comparison figure to the output folder.
+
+ Parameters
+ ----------
+ input_folder : str
+ Path to the folder containing input `.tif` images.
+ output_folder : str
+ Path to the folder where output thresholding comparison plots will be saved.
+ channel : int, optional
+ Index of the image channel to process if the image is multi-channel (default is 1).
+ figsize : tuple of int, optional
+ Size of the matplotlib figure for the thresholding comparison plot (default is (10, 8)).
+
+ Returns
+ -------
+ None
+ This function does not return anything. It saves output images to the specified folder.
+
+ Notes
+ -----
+ - Requires `recursively_get_all_filepaths` function to retrieve file paths recursively.
+ - Uses `skimage.io.imread` to read images and `skimage.filters.try_all_threshold` to apply
+ thresholding methods.
+ - Output images are saved with filenames ending in `_all_thresh.tif`.
+
+ Examples
+ --------
+ >>> from my_thresholding_module import apply_all_thresh
+ >>> input_dir = "data/microscopy_images"
+ >>> output_dir = "results/thresholding_plots"
+ >>> apply_all_thresh(input_dir, output_dir, channel=0, figsize=(12, 10))
+
+ This will process all `.tif` images in 'data/microscopy_images', apply thresholding to channel 0,
+ and save the comparison plots in 'results/thresholding_plots'.
+ """
+
+ def thresh(func):
+ """
+ A wrapper function to return a thresholded image.
+ """
+
+ def wrapper(im):
+ return im > func(im)
+
+ try:
+ wrapper.__orifunc__ = func.__orifunc__
+ except AttributeError:
+ wrapper.__orifunc__ = func.__module__ + '.' + func.__name__
+ return wrapper
+
+ # Global algorithms.
+ methods = OrderedDict(
+ {
+ 'Isodata': thresh(threshold_isodata),
+ 'Li': thresh(threshold_li),
+ 'Mean': thresh(threshold_mean),
+ 'Minimum': thresh(threshold_minimum),
+ 'Otsu': thresh(threshold_otsu),
+ 'Triangle': thresh(threshold_triangle),
+ 'Yen': thresh(threshold_yen),
+ }
+ )
+ os.makedirs(output_folder, exist_ok=True)
+
+ file_list = recursively_get_all_filepaths(input_folder, ".tif")
+ for file in file_list:
+ im = io.imread(file)
+ microglia_im = im[:, :, channel] if im.ndim == 3 else im
+ # fig, ax = filters.try_all_threshold(microglia_im,
+ # figsize=figsize,
+ # verbose=False)
+
+ fig, ax = plt.subplots(nrows=4, ncols=2, figsize=figsize, sharex=True, sharey=True)
+ ax = ax.flatten()
+ ax[0].imshow(microglia_im)
+ ax[0].set_title('Original Image')
+ ax[0].axis('off')
+
+ i = 1
+ for name, func in methods.items():
+ ax[i].set_title(f'{name}')
+ try:
+ ax[i].imshow(func(microglia_im))
+ ax[i].axis('off')
+ except Exception as e:
+ ax[i].text(
+ 0.5,
+ 0.5,
+ f"{type(e).__name__}",
+ ha="center",
+ va="center",
+ transform=ax[i].transAxes,
+ )
+ i += 1
+
+ ax[-1].axis('off') # Hide the last unused subplot
+ fig.suptitle(f'Thresholding Comparison for {os.path.basename(file)}', fontsize=16)
+ output_path = os.path.join(output_folder,
+ os.path.basename(file).replace(
+ '.tif', '_all_thresh.tif'))
+ fig.savefig(output_path)
+ plt.close(fig)
+ return
+
+
+def apply_li_threshold(file: str, channel: int=1) -> np.ndarray[bool]:
+ """
+ Apply Li thresholding to an .tif image and returns a binary mask.
+
+ This function reads an image from the specified file path, selects the specified
+ channel, and applies Li's thresholding method to generate a binary image. Pixels
+ with intensity values greater than the threshold are set to True.
+
+ Parameters
+ ----------
+ file : str
+ Path to the image file to be processed.
+ channel : int, optional
+ Index of the color channel to use if the image is RGB or multi-channel.
+ Default is 1 (typically the green channel in RGB images).
+
+ Returns
+ -------
+ binary_li : ndarray of bool
+ A 2D binary image where True indicates pixels above the Li threshold.
+
+ Notes
+ -----
+ Li thresholding is an iterative method that minimizes the cross-entropy
+ between the foreground and background pixel distributions.
+
+ References
+ ----------
+ Li, C.H. and Lee, C.K., 1993. Minimum cross entropy thresholding.
+ Pattern Recognition, 26(4), pp.617-625.
+
+ Examples
+ --------
+ >>> from skimage import io
+ >>> import matplotlib.pyplot as plt
+ >>> binary_mask = apply_li_threshold("microglia_image.tif", channel=1)
+ >>> plt.imshow(binary_mask, cmap='gray')
+ >>> plt.title("Li Thresholded Image")
+ >>> plt.axis('off')
+ >>> plt.show()
+
+ """
+
+ # Check if the file exists
+ if not os.path.isfile(file):
+ raise FileNotFoundError(f"File {file} does not exist.")
+ # Check if the file is a .tif image
+ if not file.lower().endswith('.tif'):
+ raise ValueError(f"File {file} is not a .tif image.")
+ # Check if the channel is valid
+ if not isinstance(channel, int) or channel < 0:
+ raise ValueError("Channel must be a non-negative integer.")
+ # Check if the file is a valid image
+ try:
+ im = io.imread(file)
+ except Exception as e:
+ raise ValueError(f"Could not read image {file}: {e}")
+ # Check if the image is multi-channel
+ if im.ndim not in [2, 3]:
+ raise ValueError(f"Image {file} is not a valid 2D or 3D image.")
+ # Check if the channel is valid for the image
+ if im.ndim == 3 and (channel < 0 or channel >= im.shape[2]):
+ raise ValueError(f"Channel {channel} is out of bounds for image {file}.")
+ # Check if the image is grayscale
+ if im.ndim == 2 and channel != 0:
+ raise ValueError(f"Image {file} is grayscale, channel must be 0.")
+ # Check if the image is RGB
+ if im.ndim == 3 and im.shape[2] != 3:
+ raise ValueError(f"Image {file} is not RGB, channel must be 0, 1 or 2.")
+ # Check if the image is RGBA
+ if im.ndim == 3 and im.shape[2] != 4:
+ raise ValueError(f"Image {file} is not RGBA, channel must be 0, 1, 2 or 3.")
+ # Read the image
+ im = io.imread(file)
+
+ microglia_im = im[:, :, channel] if im.ndim == 3 else im
+
+ # Apply Li threshold
+ thresh_li = filters.threshold_li(microglia_im)
+ binary_li = microglia_im > thresh_li
+
+ return binary_li
+
+
+def apply_threshold_recursively(input_folder: str,
+ output_folder: str='./thresh_output/',
+ threshold_function: Callable[[str], np.ndarray]=apply_li_threshold) -> None:
+
+ """
+ Recursively applies a thresholding function to all `.tif` images in a directory
+ and saves the resulting binary images as `.npy` files.
+
+ Parameters
+ ----------
+ input_folder : str
+ Path to the input directory containing `.tif` image files.
+ output_folder : str, optional
+ Path to the output directory where thresholded `.npy` files will be saved.
+ Defaults to './thresh_output/'.
+ threshold_funcion : callable, optional
+ A function that takes a file path as input and returns a binary NumPy array.
+ Defaults to `apply_li_threshold`.
+
+ Returns
+ -------
+ None
+ This function does not return a value. It saves the binary images as `.npy` files
+ in the specified output directory.
+
+
+ Raises
+ ------
+ Prints error messages if:
+ - The input folder does not exist.
+ - Any file fails to process due to an exception.
+
+ Notes
+ -----
+ - The function assumes that `recursively_get_all_filepaths` is defined elsewhere and
+ returns a list of `.tif` file paths.
+ - The thresholding function should return a binary NumPy array.
+
+ Examples
+ --------
+ >>> def dummy_threshold(file_path):
+ ... import numpy as np
+ ... return np.ones((100, 100), dtype=bool) # Dummy binary image
+ ...
+ >>> apply_threshold_recursively('path/to/tif_images',
+ ... output_folder='path/to/output',
+ ... threshold_funcion=dummy_threshold)
+
+ """
+ if not os.path.isdir(input_folder):
+ print(f"Error: Input folder '{input_folder}' does not exist.")
+ return
+
+ # Create output folder if it doesn't exist
+ os.makedirs(output_folder, exist_ok=True)
+
+ file_list = recursively_get_all_filepaths(input_folder, ".tif")
+
+ for file in file_list:
+ try:
+ binary_image = threshold_function(file)
+ output_path = os.path.join(output_folder,
+ file.replace('.tif', '.npy'))
+ np.save(output_path, binary_image)
+ except Exception as e:
+ print(f"Error processing {file}: {e}")
diff --git a/src/turmoric/cell_analysis.py b/src/turmoric/cell_analysis.py
new file mode 100644
index 0000000..a811c7d
--- /dev/null
+++ b/src/turmoric/cell_analysis.py
@@ -0,0 +1,136 @@
+import os
+import numpy as np
+import pandas as pd
+from skimage.measure import label, regionprops_table
+from turmoric.utils import recursively_get_all_filepaths
+
+
+def apply_regionprops(file: str, properties_list: list) -> pd.DataFrame:
+ """
+ Compute region properties from a binary mask stored in a .npy file.
+
+ This function loads a binary mask from a NumPy `.npy` file, labels connected regions,
+ computes specified region properties using `skimage.measure.regionprops_table`, and
+ returns the results as a pandas DataFrame.
+
+ Parameters
+ ----------
+ file : str
+ Path to the `.npy` file containing a binary mask (2D NumPy array of 0s and 1s).
+ properties_list : list of str
+ List of region properties to compute. These should be valid property names
+ accepted by `skimage.measure.regionprops_table`, such as 'area', 'centroid',
+ 'eccentricity', etc.
+
+ Returns
+ -------
+ props_df : pandas.DataFrame
+ A DataFrame containing the computed region properties for each labeled region.
+ Includes an additional column `'filename'` with the source file path.
+
+ Notes
+ -----
+ - The binary mask should be a 2D NumPy array saved with `np.save`.
+ - Each row in the resulting DataFrame corresponds to a connected region in the mask.
+ - For valid property names, see the documentation for `skimage.measure.regionprops_table`.
+ - The binary mask should be a 2D array where foreground regions are marked with 1s.
+ - Connected components are labeled using 8-connectivity by default.
+ - This function is useful for batch processing of image masks in segmentation tasks.
+
+ Examples
+ --------
+ >>> props_df = apply_regionprops("mask_01.npy", ["area", "centroid"])
+ >>> print(props_df.head())
+ """
+
+ # Load the binary mask
+ binary_mask = np.load(file)
+
+ # Label connected regions in the binary mask
+ label_image = label(binary_mask)
+
+ # Measure properties
+ props = regionprops_table(label_image, properties=properties_list)
+
+ # Create a DataFrame for the current file
+ props_df = pd.DataFrame(props)
+ props_df['filename'] = file # Add filename column
+
+ return props_df
+
+
+def apply_regionprops_recursively(input_folder: str, properties_list: tuple=(
+ 'area', 'bbox_area', 'centroid', 'convex_area',
+ 'eccentricity', 'equivalent_diameter',
+ 'euler_number', 'extent', 'filled_area',
+ 'major_axis_length', 'minor_axis_length',
+ 'orientation', 'perimeter', 'solidity')) -> pd.DataFrame:
+ """
+ Recursively applies region properties extraction to all `.npy` files in a directory.
+
+ This function traverses the given input folder and its subfolders to find all `.npy` files.
+ Each file is assumed to contain a labeled image (e.g., from segmentation). The function uses
+ `apply_regionprops` to extract specified properties from each labeled region and returns a combined
+ pandas DataFrame of all results.
+
+ Parameters
+ ----------
+ input_folder : str
+ Path to the root directory containing `.npy` binary mask files.
+ properties_list : tuple of str, optional
+ A tuple of region properties to compute for each labeled region in the binary masks.
+ Default includes a comprehensive set of geometric properties such as 'area',
+ 'centroid', 'eccentricity', etc. Valid property names must be accepted by
+ `skimage.measure.regionprops_table`.
+
+ Returns
+ -------
+ pandas.DataFrame
+ A concatenated DataFrame containing region properties from all processed files.
+ Each row corresponds to a labeled region and includes a 'filename' column indicating
+ the source file.
+
+ Raises
+ ------
+ FileNotFoundError
+ If the input folder does not exist.
+ Exception
+ If an error occurs while processing a file, it is caught and printed, but processing continues.
+
+ Notes
+ -----
+ - Binary masks must be stored as `.npy` files containing 2D NumPy arrays.
+ - If a file cannot be processed, an error message is printed and processing continues.
+ - Requires `recursively_get_all_filepaths` to collect `.npy` file paths.
+ - Uses `apply_regionprops` to compute the region properties and returns a DataFrame.
+
+ Examples
+ --------
+ >>> df = apply_regionprops_recursively('/path/to/npy_files')
+ >>> print(df.head())
+
+ >>> df = apply_regionprops_recursively('/data/images', properties_list=('area', 'centroid'))
+ >>> df[['area', 'centroid-0', 'centroid-1']].plot.scatter(x='centroid-0', y='centroid-1', c='area', colormap='viridis')
+ >>> plt.show()
+ """
+
+ if not os.path.isdir(input_folder):
+ print(f"Error: Input folder '{input_folder}' does not exist.")
+ return
+
+ all_dataframes = [] # List to store individual DataFrames
+
+ # Recursively walk through input folder and process .npy files
+ for root, _, files in os.walk(input_folder):
+ for file in files:
+ if file.endswith("li_thresh.npy"):
+ file_path = os.path.join(root, file)
+
+ try:
+ df = apply_regionprops(file_path, properties_list)
+ all_dataframes.append(df)
+
+ except Exception as e:
+ print(f"Error processing {file_path}: {e}")
+
+ return pd.concat(all_dataframes, ignore_index=True)
diff --git a/src/turmoric/image_process.py b/src/turmoric/image_process.py
new file mode 100644
index 0000000..0342b61
--- /dev/null
+++ b/src/turmoric/image_process.py
@@ -0,0 +1,184 @@
+import os
+import skimage
+import numpy as np
+from nd2 import ND2File
+import tifffile
+
+def load_npy_file(path: str, file_name: str) -> np.ndarray:
+ """
+ Load a NumPy `.npy` file and apply connected component labeling.
+
+ This function loads a binary or labeled image stored in a `.npy` file,
+ applies connected component labeling using `skimage.measure.label`, and
+ returns the labeled image data.
+
+ Parameters
+ ----------
+ path : str
+ Directory containing the `.npy` file.
+ file_name : str
+ Name of the `.npy` file to load.
+
+ Returns
+ -------
+ image_data : ndarray
+ A 2D NumPy array where connected components in the binary mask are labeled with
+ unique integer values. The background is labeled as 0.
+
+ Raises
+ ------
+ Exception
+ If the file cannot be loaded or processed, an error message is printed
+ and the exception is raised.
+
+ Notes
+ -----
+ - If an error occurs while loading or processing the file, an error message is printed.
+ - The function assumes the `.npy` file contains a 2D binary mask (e.g., values of 0 and 1).
+ - Uses `skimage.measure.label` for connected component labeling.
+Examples
+ --------
+ >>> import numpy as np
+ >>> from skimage import measure
+ >>> import os
+ >>> image = np.zeros((5, 5), dtype=int)
+ >>> image[1:3, 1:3] = 1
+ >>> image[3:5, 3:5] = 1
+ >>> np.save('example.npy', image)
+ >>> labeled = load_npy_file('.', 'example.npy')
+ >>> print(labeled)
+ [[0 0 0 0 0]
+ [0 1 1 0 0]
+ [0 1 1 0 0]
+ [0 0 0 2 2]
+ [0 0 0 2 2]]
+ """
+ npy_path = os.path.join(path, file_name)
+
+ try:
+ image_data = skimage.measure.label(np.load(npy_path))
+ except Exception as e:
+ print(f"error processing '{npy_path}': {e}")
+
+ return image_data
+
+def normalize_npy_data(npy_image_data: np.ndarray) -> np.ndarray:
+ """
+ Normalize NumPy image data array to uint8 format.
+
+ This function converts a NumPy array representing image data into a
+ normalized 8-bit unsigned integer format (`uint8`), suitable for image
+ processing and visualization. It handles boolean arrays by mapping
+ `True` to 255 and `False` to 0. For non-uint8 numeric arrays, it scales
+ the values to the [0, 255] range.If the input is not already `uint8`, it
+ is linearly scaled to the range [0, 255].
+
+ Parameters
+ ----------
+ npy_image_data : np.ndarray
+ A NumPy array containing image data. Can be of dtype `bool`,
+ `float`, `int`, etc. The array must be 2D or compatible with
+ image-like data.
+
+ Returns
+ -------
+ normalized_image_data : np.ndarray
+ A NumPy array of dtype `uint8` with values scaled to the range [0, 255].
+
+ Notes
+ -----
+ - Boolean arrays are multiplied by 255 to convert to `uint8`.
+ - For non-boolean arrays, the data is scaled as:
+ `(array - array.min()) / (array.max() - array.min()) * 255`
+ - A small epsilon (`1e-8`) is added to the denominator to prevent division by zero.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from your_module import normalize_npy_data
+
+ # Example with float data
+ >>> float_image = np.random.rand(100, 100) # Values in [0, 1]
+ >>> normalized = normalize_npy_data(float_image)
+ >>> normalized.dtype
+ dtype('uint8')
+ >>> normalized.min(), normalized.max()
+ (0, 255)
+
+ # Example with boolean mask
+ >>> bool_mask = np.array([[True, False], [False, True]])
+ >>> normalized_mask = normalize_npy_data(bool_mask)
+ >>> normalized_mask
+ array([[255, 0],
+ [ 0, 255]], dtype=uint8)
+ """
+ if npy_image_data.dtype == np.bool_:
+ # Convert boolean to uint8 (True -> 255, False -> 0)
+ normalized_image_data = (npy_image_data * 255).astype(np.uint8)
+ else:
+ # Normalize and scale image data to uint8 if it's not already
+ if npy_image_data.dtype != np.uint8:
+ normalized_image_data = (
+ 255 * (npy_image_data - npy_image_data.min()) / (npy_image_data.ptp() + 1e-8)
+ ).astype(np.uint8)
+
+ return normalized_image_data
+
+def save_npy_as_tif():
+ # Construct the output TIFF file path
+ #relative_path = os.path.relpath(dirpath, root_directory)
+ # tiff_dir = os.path.join(output_dir, relative_path)
+ # os.makedirs(tiff_dir, exist_ok=True)
+ # tiff_path = os.path.join(tiff_dir, file.replace(".npy", ".tif"))
+
+ # # Save as TIFF
+ # #Image.fromarray(image_data).save(tiff_path)
+ # print(f"Converted: {npy_path} -> {tiff_path}")
+ # io.imsave(tiff_path, image_data)
+ pass
+
+def nd2_to_tif(path: str, file_name: str) -> None:
+ """
+ Convert an `.nd2` microscopy image file to a `.tif` file.
+
+ This function reads a Nikon ND2 image file from the specified path and converts it
+ to a TIFF file using `tifffile.imwrite`. The output `.tif` file is saved in the
+ same directory with the same base name.
+
+ Parameters
+ ----------
+ path : str
+ Directory containing the `.nd2` file.
+ file_name : str
+ Name of the `.nd2` file to convert.
+
+ Returns
+ -------
+ None
+ The function saves the converted `.tif` file to disk and does not return a value.
+
+ Notes
+ -----
+ - Requires `ND2File` from the `nd2` or `nd2reader` library to read ND2 files.
+ - The converted `.tif` file is saved to the same directory as the input file,
+ with the `.nd2` extension replaced by `.tif`.
+ - Ensure that the ND2 file contains a data format compatible with TIFF output.
+
+ Examples
+ --------
+ >>> from my_module import nd2_to_tif
+ >>> nd2_to_tif("/data/images", "sample_image.nd2")
+ # This will create a file named 'sample_image.tif' in the '/data/images' directory.
+ """
+ nd2_path = os.path.join(path, file_name)
+ tif_path = os.path.join(path, file_name.replace(".nd2", ".tif"))
+
+ with ND2File(nd2_path) as nd2_file:
+ nd2_data = nd2_file.asarray()
+
+ tifffile.imwrite(tif_path, nd2_data)
+
+
+
+def load_tif_file():
+ pass
\ No newline at end of file
diff --git a/src/turmoric/utils.py b/src/turmoric/utils.py
new file mode 100644
index 0000000..a3c730d
--- /dev/null
+++ b/src/turmoric/utils.py
@@ -0,0 +1,181 @@
+import os
+import shutil
+import random
+from collections import defaultdict
+# from skimage.filters import try_all_threshold
+# from skimage.filters import threshold_isodata
+# from skimage.filters import threshold_li
+# from skimage.filters import threshold_mean
+# from skimage.filters import threshold_minimum
+# from skimage.filters import threshold_otsu
+# from skimage.filters import threshold_triangle
+# from skimage.filters import threshold_yen
+# from turmoric.image_process import save_npy_as_tif, nd2_to_tif
+# from turmoric.cell_analysis import apply_regionprops
+
+"""
+Takes in a directory of images to separate them into training and testing data.
+
+The image directory is passed into the function along with groups like
+brain region or sex as well as treatment conditions of the slices. The
+function then splits the images into a 80:20 training and testing data
+without data leakage. The data is grouped by slice and treatment
+conditions in new training and testing directories.
+
+Parameters:
+ base_dir: The directory of all images to be used for training and testing.
+ groups: The variable groups of the images like brain region or subject sex.
+ treatment_conditions: The treatment applied to the slices.
+
+Returns:
+ train_dir: The directory of training files
+ from an 80:20 split from the base directory.
+ test_dir: The directory of testing files
+ from an 80:20 split from the base directory.
+"""
+
+# Function to organize files into training and testing folders
+# without slice leakage
+
+
+def organize_files_without_leakage(base_dir: str, train_dir: str, test_dir: str, groups: list,
+ treatment_conditions: list, test_size: float=0.2) -> None:
+ """
+ Organize files into training and testing sets without data leakage across brain slices.
+
+ This function walks through a directory structure organized by experimental `groups`
+ and `treatment_conditions`, groups image files by brain slice ID (extracted from filenames),
+ and splits slices into training and testing sets. It ensures that all files from the
+ same brain slice are allocated to only one set, avoiding data leakage.
+
+ Parameters
+ ----------
+ base_dir : str
+ Root directory containing raw data files structured as `base_dir/group/condition/`.
+ train_dir : str
+ Output directory where training files will be copied.
+ test_dir : str
+ Output directory where testing files will be copied.
+ groups : list of str
+ List of experimental groups (e.g., ["control", "treated"]).
+ treatment_conditions : list of str
+ List of treatment condition subfolders under each group (e.g., ["vehicle", "drug"]).
+ test_size : float, optional
+ Proportion of slices to use for testing. Default is 0.2 (20%).
+
+ Returns
+ -------
+ None
+ The function performs file copying to organize training and testing sets
+ but does not return a value.
+
+ Notes
+ -----
+ - Slice IDs are extracted from the third underscore-separated token in each filename.
+ Adjust the parsing logic if your filename structure differs.
+ - Files are grouped and split at the slice level, not the file level, to prevent data leakage.
+ - The train/test split is deterministic due to a fixed random seed (`random.seed(42)`).
+ - Existing files in the output directories will not be overwritten.
+ - Uses `shutil.copy` to duplicate files into the train/test directories.
+
+ Examples
+ --------
+ >>> base_dir = "/data/brain_slices"
+ >>> train_dir = "/data/split/train"
+ >>> test_dir = "/data/split/test"
+ >>> groups = ["control", "treated"]
+ >>> treatment_conditions = ["drugA", "drugB"]
+ >>> organize_files_without_leakage(base_dir, train_dir, test_dir, groups, treatment_conditions)
+ processing control drugA :)
+ processing control drugB :)
+ processing treated drugA :)
+ processing treated drugB :)
+ """
+ for group in groups:
+ for condition in treatment_conditions:
+ condition_path = os.path.join(base_dir, group, condition)
+ if not os.path.exists(condition_path):
+ continue
+
+ print(f'processing {group} {condition} :)')
+
+ # Group files by brain slice
+ slice_files = defaultdict(list)
+ for file in os.listdir(condition_path):
+ if os.path.isfile(os.path.join(condition_path, file)):
+ # Extract slice_id based on the naming pattern
+ # Extract the third element
+ slice_id = "_".join(file.split("_")[2:3])
+ slice_files[slice_id].append(file)
+
+ # Split slices into training and testing
+ slice_ids = list(slice_files.keys())
+ random.seed(42) # For reproducibility
+ random.shuffle(slice_ids)
+
+ split_index = int(len(slice_ids) * (1 - test_size))
+ train_slices = slice_ids[:split_index]
+ test_slices = slice_ids[split_index:]
+
+ # Create subdirectories for training and testing
+ train_subdir = os.path.join(train_dir, group, condition)
+ test_subdir = os.path.join(test_dir, group, condition)
+ os.makedirs(train_subdir, exist_ok=True)
+ os.makedirs(test_subdir, exist_ok=True)
+
+ # Move files to the appropriate folders
+ for slice_id in train_slices:
+ for file in slice_files[slice_id]:
+ shutil.copy(os.path.join(condition_path, file),
+ os.path.join(train_subdir, file))
+
+ for slice_id in test_slices:
+ for file in slice_files[slice_id]:
+ shutil.copy(os.path.join(condition_path, file),
+ os.path.join(test_subdir, file))
+
+
+def recursively_get_all_filepaths(input_folder: str, file_type: str) -> list:
+ """
+ Recursively retrieve all file paths of a specific type from a directory.
+
+ This function walks through a directory tree starting from `input_folder` and collects
+ the full paths of all files that end with the specified `file_type` extension.
+
+ Parameters
+ ----------
+ input_folder : str
+ Root directory to search for files.
+ file_type : str
+ File extension to filter by (e.g., '.tif', '.npy').
+
+ Returns
+ -------
+ file_list : list of str
+ List containing full paths to all files in `input_folder` and its subdirectories
+ that match the specified file extension.
+
+ Notes
+ -----
+ - The function performs a case-sensitive match on the file extension.
+ - Subdirectories are traversed using `os.walk`.
+
+ Examples
+ --------
+ >>> file_paths = recursively_get_all_filepaths('/path/to/folder', '.txt')
+ >>> for path in file_paths:
+ ... print(path)
+ /path/to/folder/file1.txt
+ /path/to/folder/subfolder/file2.txt
+ """
+ # Walk through all files and subfolders
+ file_list = []
+ for root, _, files in os.walk(input_folder):
+ for file in files:
+ # ensure only the correct file type is processed
+ if file.endswith(file_type):
+ # Full input path
+ input_path = os.path.join(root, file)
+ file_list.append(input_path)
+
+ return file_list
diff --git a/src/turmoric/vampire_model.py b/src/turmoric/vampire_model.py
new file mode 100644
index 0000000..c85271e
--- /dev/null
+++ b/src/turmoric/vampire_model.py
@@ -0,0 +1,172 @@
+from pathlib import Path
+from typing import List, Optional
+import numpy as np
+import pandas as pd
+import vampire
+
+
+class VampireModelTrainer:
+ """
+ A class to handle VAMPIRE model training and application for brain image analysis.
+ """
+
+ def __init__(self, base_path: str, treatments: List[str], groups: List[str]):
+ """
+ Initialize the VAMPIRE model trainer.
+
+ Args:
+ base_path: Base directory path for image data
+ treatments: List of treatment conditions
+ groups: List of experimental groups
+ """
+ self.base_path = Path(base_path)
+ self.treatments = treatments
+ self.groups = groups
+ self.model_path: Optional[Path] = None
+
+ # Validate base path exists
+ if not self.base_path.exists():
+ raise FileNotFoundError(f"Base path does not exist: {self.base_path}")
+
+ def extract_features(self, image_set_path: Path) -> None:
+ """
+ Extract features from images using VAMPIRE.
+
+ Args:
+ image_set_path: Path to the image dataset
+ """
+ logger.info(f"Extracting features from: {image_set_path}")
+
+ try:
+ vampire.extraction.extract_properties(str(image_set_path))
+ logger.info("Feature extraction completed successfully")
+ except Exception as e:
+ logger.error(f"Error during feature extraction: {e}")
+ raise
+
+ def train_model(self, image_set_path: Path, model_name: str = 'li',
+ num_points: int = 50, num_clusters: int = 5) -> Path:
+ """
+ Train a VAMPIRE model on the extracted features.
+
+ Args:
+ image_set_path: Path to the training image dataset
+ model_name: Name identifier for the model
+ num_points: Number of points for model training
+ num_clusters: Number of clusters for model training
+
+ Returns:
+ Path to the trained model file
+ """
+ logger.info(f"Training VAMPIRE model: {model_name}")
+
+ build_info_df = pd.DataFrame({
+ 'img_set_path': [str(image_set_path)],
+ 'output_path': [str(image_set_path)],
+ 'model_name': [model_name],
+ 'num_points': [num_points],
+ 'num_clusters': [num_clusters],
+ 'num_pc': [np.nan]
+ })
+
+ try:
+ vampire.quickstart.fit_models(build_info_df)
+
+ # Find the generated model file
+ model_pattern = f"model_{model_name}_({num_points}_{num_clusters}_*)__.pickle"
+ model_files = list(image_set_path.glob(model_pattern))
+
+ if not model_files:
+ raise FileNotFoundError(f"No model file found matching pattern: {model_pattern}")
+
+ self.model_path = model_files[0]
+ logger.info(f"Model trained successfully: {self.model_path}")
+ return self.model_path
+
+ except Exception as e:
+ logger.error(f"Error during model training: {e}")
+ raise
+
+ def create_apply_dataframe(self, test_base_path: Path, model_path: Path) -> pd.DataFrame:
+ """
+ Create a DataFrame for applying the trained model to test datasets.
+
+ Args:
+ test_base_path: Base path for test datasets
+ model_path: Path to the trained model
+
+ Returns:
+ DataFrame with application configuration
+ """
+ apply_data = []
+
+ for group in self.groups:
+ for treatment in self.treatments:
+ img_set_path = test_base_path / group / treatment
+
+ # Check if path exists before adding to dataframe
+ if img_set_path.exists():
+ apply_data.append({
+ 'img_set_path': str(img_set_path),
+ 'model_path': str(model_path),
+ 'output_path': str(img_set_path),
+ 'img_set_name': treatment
+ })
+ logger.debug(f"Added to apply list: {treatment}")
+ else:
+ logger.warning(f"Path does not exist, skipping: {img_set_path}")
+
+ if not apply_data:
+ raise ValueError("No valid test datasets found")
+
+ return pd.DataFrame(apply_data)
+
+ def apply_model(self, test_base_path: Path) -> None:
+ """
+ Apply the trained model to test datasets.
+
+ Args:
+ test_base_path: Base path for test datasets
+ """
+ if self.model_path is None:
+ raise ValueError("Model must be trained before applying")
+
+ logger.info(f"Applying model to test datasets in: {test_base_path}")
+
+ try:
+ apply_info_df = self.create_apply_dataframe(test_base_path, self.model_path)
+ logger.info(f"Applying model to {len(apply_info_df)} datasets")
+
+ vampire.quickstart.transform_datasets(apply_info_df)
+ logger.info("Model application completed successfully")
+
+ except Exception as e:
+ logger.error(f"Error during model application: {e}")
+ raise
+
+ def run_full_pipeline(self, training_subpath: str = "training/vampire_data",
+ testing_subpath: str = "testing/vampire_data") -> None:
+ """
+ Run the complete training and application pipeline.
+
+ Args:
+ training_subpath: Relative path to training data
+ testing_subpath: Relative path to testing data
+ """
+ train_path = self.base_path / training_subpath
+ test_path = self.base_path / testing_subpath
+
+ logger.info("Starting VAMPIRE model pipeline")
+ logger.info(f"Training path: {train_path}")
+ logger.info(f"Testing path: {test_path}")
+
+ # Step 1: Extract features
+ self.extract_features(train_path)
+
+ # Step 2: Train model
+ self.train_model(train_path)
+
+ # Step 3: Apply model to test data
+ self.apply_model(test_path)
+
+ logger.info("Pipeline completed successfully")
\ No newline at end of file
diff --git a/temp_notebooks/.gitkeep b/temp_notebooks/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/temp_notebooks/notebooks_from_existing_codebase/.gitkeep b/temp_notebooks/notebooks_from_existing_codebase/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/temp_notebooks/notebooks_from_existing_codebase/1_Phuong_try_all_thresholds_pipeline.ipynb b/temp_notebooks/notebooks_from_existing_codebase/1_Phuong_try_all_thresholds_pipeline.ipynb
deleted file mode 100644
index 4aa559e..0000000
--- a/temp_notebooks/notebooks_from_existing_codebase/1_Phuong_try_all_thresholds_pipeline.ipynb
+++ /dev/null
@@ -1,327 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Purpose: To Test the Sci-kit Image Thresholds for the Phuong_BEV Collaboration Images"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Date Created: October 12th, 2021"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Dates Edited: October 12th, 2021 - Converted from the original notebook for the microFIBER paper\n",
- "Edited: April 13th, 2022 - Changed some markdown documentation stating the wrong image set\n",
- "\n",
- "October 27th, 2021 - Downloaded all the .tiff files and changed the imports"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 1: Import necessary packages*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#import operating system\n",
- "import os\n",
- "\n",
- "# import major packages\n",
- "import numpy as np\n",
- "import matplotlib.pyplot as plt\n",
- "import skimage\n",
- "import PIL as Image"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Separating the two imports so that it is a cleaner workflow"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# import specific package functions\n",
- "from skimage import io\n",
- "from skimage.filters import try_all_threshold\n",
- "from skimage.filters import threshold_isodata\n",
- "from skimage.filters import threshold_li\n",
- "from skimage.filters import threshold_mean\n",
- "from skimage.filters import threshold_minimum\n",
- "from skimage.filters import threshold_otsu\n",
- "from skimage.filters import threshold_triangle\n",
- "from skimage.filters import threshold_yen\n",
- "\n",
- "from skimage import morphology\n",
- "from scipy import ndimage\n",
- "from skimage.measure import label"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Phuong's Data Set"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 2: Import image for testing thresholds*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Defining the local folder location for all files\n",
- "im_folder_location = '/Users/hhelmbre/Desktop/Phuong_Bev'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def folder_cleaner(folder, image_type):\n",
- " k=0\n",
- " for files in folder:\n",
- " if image_type in str(files):\n",
- " k+=1\n",
- " else:\n",
- " folder = np.delete(folder, np.argwhere(folder == str(files)))\n",
- " return folder"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Getting a list of all the seprate animal model folders\n",
- "arr = os.listdir(im_folder_location)\n",
- "animal_list = np.asarray(arr)\n",
- "\n",
- "#Since I am working on a mac operating system, this removes the DS_store file\n",
- "animal_list = animal_list[1:]\n",
- "animal_list"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Getting a list of all the images we want to run and test thresholds on\n",
- "try_all_im_list = []\n",
- "for folders in animal_list:\n",
- " sub_folder_location = str(im_folder_location + '/' + folders)\n",
- " arr = os.listdir(sub_folder_location)\n",
- " image_list = np.asarray(arr)\n",
- " image_list = [ x for x in image_list if \"TILE\" not in x ]\n",
- " image_list = [ x for x in image_list if \"all_thresh\" not in x ]\n",
- " image_list = [x for x in image_list if \".tif\" in x]\n",
- " for images in image_list:\n",
- " file_name = str(im_folder_location + '/' + folders + '/' + images)\n",
- " try_all_im_list.append(file_name)\n",
- "\n",
- "try_all_im_list"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#Testing which channel the iba stain is on\n",
- "im = io.imread(try_all_im_list[0])\n",
- "green_im = im[:,:,1]\n",
- "plt.imshow(green_im, cmap='Greys_r')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 3: Trying multiple tresholds at once and saving the composite image*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "j = 0 #initialize a counter\n",
- "for images in try_all_im_list:\n",
- " im_name = try_all_im_list[j]\n",
- " im = io.imread(im_name)\n",
- " microglia_im = im[:,:,1]\n",
- " fig, ax = try_all_threshold(microglia_im, figsize=(10, 8), verbose=False)\n",
- "\n",
- " \n",
- " j += 1\n",
- " fig_name = str(im_name[:-4] + '_all_thresh.tif')\n",
- " fig.savefig(fig_name)\n",
- " plt.close('all')\n",
- " print(j)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Move all images with try_all_thresh into their own folder*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import shutil\n",
- "import os\n",
- "\n",
- "source_dir = '/Users/hhelmbre/Desktop/Phuong_Bev/Healthy_OGD_Controls_tif'\n",
- "target_dir = '/Users/hhelmbre/Desktop/Phuong_Bev/try_all_thresh'\n",
- "\n",
- "arr = os.listdir(source_dir)\n",
- "file_list = np.asarray(arr)\n",
- "\n",
- "file_list = [x for x in file_list if \"all_thresh\" in x]\n",
- "\n",
- " \n",
- "for file_name in file_list:\n",
- " shutil.move(os.path.join(source_dir, file_name), target_dir)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 4: Based on visual inspection of the try all thresholds, Li threshold does the best*"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To determine the size of the small object we should remove we took the size of microglia which are approximately 1600 um^2 according to (https://jneuroinflammation.biomedcentral.com/articles/10.1186/s12974-014-0182-7)\n",
- "\n",
- "The confocal settings for the OGD severity study provided a 1 pixel = 3.4527 um conversion. To ensure we didn't cut off any potential microglia, we chose a cutoff on the small end to be 1/2 of the average which converts to 71 pixels ^2 used in the min_size of the remove small objects function from sci-kit image."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "im_folder_location = '/Users/hhelmbre/Desktop/Phuong_Bev'\n",
- "\n",
- "#Getting a list of all the seprate animal model folders\n",
- "arr = os.listdir(im_folder_location)\n",
- "animal_list = np.asarray(arr)\n",
- "\n",
- "#Since I am working on a mac operating system, this removes the DS_store file\n",
- "animal_list = animal_list[1:]\n",
- "animal_list = folder_cleaner(animal_list, 'tif')\n",
- "\n",
- "#Getting a list of all the images we want to run and test thresholds on\n",
- "try_all_im_list = []\n",
- "for folders in animal_list:\n",
- " sub_folder_location = str(im_folder_location + '/' + folders)\n",
- " arr = os.listdir(sub_folder_location)\n",
- " image_list = np.asarray(arr)\n",
- " image_list = [ x for x in image_list if \"TILE\" not in x ]\n",
- " image_list = [ x for x in image_list if \"all_thresh\" not in x ]\n",
- " image_list = [x for x in image_list if \".tif\" in x]\n",
- " for images in image_list:\n",
- " file_name = str(im_folder_location + '/' + folders + '/' + images)\n",
- " try_all_im_list.append(file_name)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#li threshold\n",
- "k = 0\n",
- "source_dir = '/Users/hhelmbre/Desktop/Phuong_Bev/'\n",
- "\n",
- "for images in try_all_im_list:\n",
- " im_name = try_all_im_list[k]\n",
- " im = io.imread(im_name)\n",
- " microglia_im = im[:,:,1]\n",
- " thresh_li = skimage.filters.threshold_li(microglia_im)\n",
- " binary_li = microglia_im > thresh_li\n",
- " new_binary_li = morphology.remove_small_objects(binary_li, min_size=71)\n",
- " new_binary_li = ndimage.binary_fill_holes(new_binary_li)\n",
- " \n",
- " short_im_name = im_name.split('/')[6]\n",
- "\n",
- " np.save(str(source_dir + 'li_thresh/' + short_im_name[:-4] + '_li_thresh'), new_binary_li)\n",
- " \n",
- " print(k)\n",
- " k += 1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/temp_notebooks/notebooks_from_existing_codebase/2_Phuong_collab_segmentation.ipynb b/temp_notebooks/notebooks_from_existing_codebase/2_Phuong_collab_segmentation.ipynb
deleted file mode 100644
index 6fc21af..0000000
--- a/temp_notebooks/notebooks_from_existing_codebase/2_Phuong_collab_segmentation.ipynb
+++ /dev/null
@@ -1,293 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Purpose: Phuong BeV Microglia OGD Cell Analysis"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Purpose: To quantify cell features of already segmented microglia images from step 1 in this folder"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Created by: Hawley Helmbrecht"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Creation Date: 06/4/2021 (Original file)\n",
- "\n",
- "Edit Date: 10/27/2021 - Change the file to be for Phuongs BeV Analysis rather than the Australian Pig Project"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 1: Import Necessary Packages*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "import pandas as pd\n",
- "from scipy import ndimage\n",
- "\n",
- "import skimage.filters\n",
- "from skimage import morphology\n",
- "from skimage.measure import label, regionprops, regionprops_table\n",
- "from skimage.color import label2rgb\n",
- "from skimage import io\n",
- "from skimage import measure \n",
- "\n",
- "import matplotlib.pyplot as plt\n",
- "import matplotlib.patches as mpatches\n",
- "\n",
- "import watermark\n",
- "import os\n",
- "from PIL import Image"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 2: User Inputs*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#replace the example path from my computer with the path to the image on your computer\n",
- "\n",
- "cell_folder = '/Users/hhelmbre/Desktop/Phuong_Bev/li_thresh'\n",
- "\n",
- "file_type = '.npy'"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 3: Defining a Folder Cleaner Function to only Return Tif Images*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def folder_cleaner(folder, image_type):\n",
- " k=0\n",
- " for files in folder:\n",
- " if image_type in str(files):\n",
- " k+=1\n",
- " else:\n",
- " folder = np.delete(folder, np.argwhere(folder == str(files)))\n",
- " return folder"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 4: Get All Images in the Folder*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "arr = os.listdir(cell_folder)\n",
- "file_list = np.asarray(arr)\n",
- "file_list = folder_cleaner(file_list, file_type)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "file_list"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 5: Segmenting and Calculating Region Features on All Images*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "properties_list = ('area', 'bbox_area', 'centroid', 'convex_area', \n",
- " 'eccentricity', 'equivalent_diameter', 'euler_number', \n",
- " 'extent', 'filled_area', 'major_axis_length', \n",
- " 'minor_axis_length', 'orientation', 'perimeter', 'solidity')\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "j = 0\n",
- "for names in file_list:\n",
- " file_name = str(cell_folder + '/' + names)\n",
- " new_binary_li = np.load(file_name)\n",
- " label_image = label(new_binary_li)\n",
- " \n",
- " #Feel free to add them here as well. The computational time is pretty efficient\n",
- " props = measure.regionprops_table(label_image, properties=(properties_list))\n",
- "\n",
- " if j == 0:\n",
- " df = pd.DataFrame(props)\n",
- " df['filename'] = names\n",
- " else:\n",
- " df2 = pd.DataFrame(props)\n",
- " df2['filename'] = names\n",
- " df = df.append(df2)\n",
- "\n",
- " j = 1"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 6: Caculating the Circularity*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "df['circularity'] = 4*np.pi*df.area/df.perimeter**2"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 7: Calculating the Aspect Ratio*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "df['aspect_ratio'] = df.major_axis_length/df.minor_axis_length"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 8: Add in a column for the ID*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "df"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 10: Saving as a CSV file*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "df.to_csv('/Users/hhelmbre/Desktop/Phuong_BeV/10_27_2021_li_threshold_all_data.csv')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 11: Print Dependencies and State*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "%load_ext watermark\n",
- "\n",
- "%watermark -v -m -p numpy,pandas,scipy,skimage,matplotlib,wget\n",
- "\n",
- "%watermark -u -n -t -z"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.4"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/temp_notebooks/notebooks_from_existing_codebase/3_Brendan_VAMPIRE_workflow.ipynb b/temp_notebooks/notebooks_from_existing_codebase/3_Brendan_VAMPIRE_workflow.ipynb
deleted file mode 100644
index 175f646..0000000
--- a/temp_notebooks/notebooks_from_existing_codebase/3_Brendan_VAMPIRE_workflow.ipynb
+++ /dev/null
@@ -1,424 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# VAMPIRE WORKFLOW"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Purpose: To split images into quadrants, pick training and testing image sets, and in the future run the full VAMPIRE workflow"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Edited: October 28th, 2021 to specifically refer to Phuong's BEV Treatment data and split them for VAMPIRE"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 1: Import necessary packages*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import shutil, os\n",
- "\n",
- "from glob import glob\n",
- "\n",
- "import numpy as np\n",
- "import pandas as pd\n",
- "from skimage import io\n",
- "import matplotlib.pyplot as plt\n",
- "from PIL import Image\n",
- "from numpy.linalg import inv\n",
- "from sklearn.model_selection import train_test_split\n",
- "from skimage.segmentation import clear_border\n",
- "\n",
- "\n",
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 2: User Inputs*"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Manual Step:\n",
- "Beginning with already segmented images saved as .npy arrays from the \"2_Phuong_collab_segmentation.ipynb\" Jupyter Notebook also within this folder. \n",
- "\n",
- "Not a blind study. \n",
- "\n",
- "Images already exist in a folder tree based on the overall slice treatment time and then the group subset for example:\n",
- "\n",
- "48_hr_exposure_time > (1) BEV_treatment (2) healthy_control (3) OGD_control"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#file names should be in the current working directory\n",
- "folder_location = '/Users/hhelmbre/Desktop/Phuong_Bev/li_thresh'\n",
- "\n",
- "file_type_init = '.npy'\n",
- "\n",
- "slice_number = 4\n",
- "random_state_num = 3"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 5: Getting the List of Images to Split*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "arr = os.listdir(folder_location)\n",
- "folder_list = np.asarray(arr)\n",
- "folder_list = [ x for x in folder_list if \"DS\" not in x ]\n",
- "folder_list"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "files_to_split_list = []\n",
- "for folders in folder_list:\n",
- " arr = os.listdir(str(folder_location + '/' + folders))\n",
- " subfolder_list = np.asarray(arr)\n",
- " subfolder_list = [ x for x in subfolder_list if \"DS\" not in x]\n",
- " for subfolders in subfolder_list:\n",
- " arr = os.listdir(str(folder_location + '/' + folders + '/' + subfolders))\n",
- " files_list = np.asarray(arr)\n",
- " files_list = [ x for x in files_list if \"DS\" not in x]\n",
- " for files in files_list:\n",
- " name = str(folder_location + '/' + folders + '/' + subfolders + '/' + files)\n",
- " files_to_split_list.append(name)\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 6: Split the Images*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- " name = files_to_split_list[0]\n",
- " file = np.load(name)\n",
- " quada, quadb = np.array_split(file, 2)\n",
- " quad1, quad2 = np.hsplit(quada, 2)\n",
- " quad3, quad4 = np.hsplit(quadb, 2)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "for files in files_to_split_list:\n",
- " file = np.load(files)\n",
- " quada, quadb = np.array_split(file, 2)\n",
- " quad1, quad2 = np.array_split(quada, 2, axis=1)\n",
- " quad3, quad4 = np.array_split(quadb, 2, axis=1)\n",
- " \n",
- " quad1 = clear_border(quad1)\n",
- " quad2 = clear_border(quad2)\n",
- " quad3 = clear_border(quad3)\n",
- " quad4 = clear_border(quad4)\n",
- "\n",
- " \n",
- " np.save(str(files[:-4] + '_quad1.npy'), quad1)\n",
- " np.save(str(files[:-4] + '_quad2.npy'), quad2)\n",
- " np.save(str(files[:-4] + '_quad3.npy'), quad3)\n",
- " np.save(str(files[:-4] + '_quad4.npy'), quad4)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 4: Choose training and testing data sets*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "ttsplit_list_files = []\n",
- "for folders in folder_list:\n",
- " arr = os.listdir(str(folder_location + '/' + folders))\n",
- " subfolder_list = np.asarray(arr)\n",
- " subfolder_list = [ x for x in subfolder_list if \"DS\" not in x]\n",
- " for subfolders in subfolder_list:\n",
- " print(subfolders)\n",
- " arr = os.listdir(str(folder_location + '/' + folders + '/' + subfolders))\n",
- " files_list = np.asarray(arr)\n",
- " files_list = [ x for x in files_list if \"DS\" not in x]\n",
- " files_list = [ x for x in files_list if \"quad\" in x]\n",
- " X_train, X_test= train_test_split(files_list, test_size=0.20, random_state=random_state_num)\n",
- " for files in files_list:\n",
- " if files in X_train[:]:\n",
- " shutil.move(str(folder_location + '/' + folders + '/' + subfolders + '/' + files), '/Users/hhelmbre/Desktop/Phuong_Bev/vampire/train')\n",
- " else:\n",
- " shutil.move(str(folder_location + '/' + folders + '/' + subfolders + '/' + files), '/Users/hhelmbre/Desktop/Phuong_Bev/vampire/test')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step Y: Renaming the data sets according to VAMPIRE naming mechanism*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "arr_train1 = os.listdir('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/train')\n",
- "file_list_train1 = np.asarray(arr_train1)\n",
- "file_list_train1 = [ x for x in file_list_train1 if \"DS\" not in x]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "im_number= 1\n",
- "for names in file_list_train1:\n",
- " print(names)\n",
- " \n",
- " file_location = str('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/train/' + names)\n",
- " array = np.load(file_location)\n",
- " im = Image.fromarray(array)\n",
- " \n",
- " if im_number < 10:\n",
- " im.save(str('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/images/train/' + names[:-4] + 'xy' + '0' + str(im_number) + 'c1.png'))\n",
- " \n",
- " else:\n",
- " im.save(str('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/images/train/' + names[:-4] + 'xy' + str(im_number) + 'c1.png'))\n",
- " \n",
- " \n",
- " im_number +=1"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Splitting the test group into the appropriate conditions*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "arr_test = os.listdir('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/test')\n",
- "file_list_test = np.asarray(arr_test)\n",
- "file_list_test = [ x for x in file_list_test if \"DS\" not in x]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "im_number= 1\n",
- "for names in file_list_test:\n",
- " print(names)\n",
- " \n",
- " file_location = str('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/test/' + names)\n",
- " array = np.load(file_location)\n",
- " im = Image.fromarray(array)\n",
- " \n",
- " if im_number < 10:\n",
- " im.save(str('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/images/test/' + names[:-4] + 'xy' + '0' + str(im_number) + 'c1.png'))\n",
- " \n",
- " else:\n",
- " im.save(str('/Users/hhelmbre/Desktop/Phuong_Bev/vampire/images/test/' + names[:-4] + 'xy' + str(im_number) + 'c1.png'))\n",
- " \n",
- " \n",
- " im_number +=1"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Creating the information necessary for VAMPIRE Analysis"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "data_folder = '/Users/hhelmbre/Desktop/Phuong_Bev/vampire/images'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#creates the directory in your data folder to put all information related to the model\n",
- "os.mkdir(str(data_folder + '/' + '10_29_21_model'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "#creates the csv for building a model\n",
- "data = [['all_training_images', '1', str(data_folder + '/' + 'train'), 'please work', 'c1']]\n",
- "build_model_csv = pd.DataFrame(data, columns = ['condition', 'set number', 'set location', 'note', 'ch1']) \n",
- "\n",
- "#saves csv to newly created model directory\n",
- "build_model_csv.to_csv(data_folder + '/' + '10_29_21_model/' + 'images_to_build_model.csv', index=False)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "treatments = ['48_hr_exposure_0_hr_application_pngs', '24_hr_exposure_0_hr_application_pngs', '4_hr_exposure_0_hr_application_pngs', 'controls']\n",
- "groups = ['bev_treatment', 'healthy_control', 'ogd_control']"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "apply_model_paths = []\n",
- "apply_model_csv = pd.DataFrame(columns = ['condition', 'set number', 'set location', 'note', 'ch1'])\n",
- "set_number = 1\n",
- "for treatment in treatments:\n",
- " for group in groups:\n",
- " path = data_folder + '/test/' + treatment + '/' + group\n",
- " df2 = pd.DataFrame({'condition': [treatment], 'set number': [set_number], 'set location': [path], 'note': ['Phuong BeV analysis'], 'tag': ['c1']})\n",
- " apply_model_csv = apply_model_csv.append(df2)\n",
- "\n",
- " set_number += 1\n",
- "\n",
- "apply_model_csv.to_csv(data_folder + '/' + '10_29_21_model/' + 'images_to_apply_model.csv', index=False)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print('Build Model CSV Path:', str(data_folder + '/' + 'model/' + 'images_to_build_model.csv'))\n",
- "\n",
- "print('Conda Environment: tiredvampires)\n",
- "print('Number of Shape Models (Recommended):', '5')\n",
- "print('Number of Shape Coordinates (Recommended):', '50')\n",
- "print('Model Name:', '10_29_21_model')\n",
- "\n",
- "print('Apply Model CSV Path:', str(data_folder + '/' + 'model/' + 'images_to_apply_model.csv'))\n",
- "print('Model to Apply:', str(data_folder + '/' + 'model/' + '10_20_21_model'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import vampireanalysis\n",
- "from vampireanalysis import vampire"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "vampire()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.8"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/temp_notebooks/notebooks_from_existing_codebase/4_Phuong_Python_coloring_book.ipynb b/temp_notebooks/notebooks_from_existing_codebase/4_Phuong_Python_coloring_book.ipynb
deleted file mode 100644
index 7c5e6a8..0000000
--- a/temp_notebooks/notebooks_from_existing_codebase/4_Phuong_Python_coloring_book.ipynb
+++ /dev/null
@@ -1,415 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Purpose: Coloring Segmented Images According to Shape Mode"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Date Created: 1-26-2021"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Date Updated: 11-15-2021: Updated to change from the ferret paper to the Microglia/BeV paper"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Author: Hawley Helmbrecht"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "**Colors:** \n",
- "\n",
- "shape mode 1: Blue\n",
- "\n",
- "shape mode 2: Orange\n",
- "\n",
- "shape mode 3: Green\n",
- "\n",
- "shape mode 4: Red\n",
- "\n",
- "shape mode 5: Purple"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 1: Import Necessary Packages*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "import matplotlib.image as mpimg \n",
- "import matplotlib.pyplot as plt\n",
- "import pandas as pd\n",
- "import cv2\n",
- "from PIL import Image\n",
- "import os"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 2: User input variables*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "im_file_path = '/Users/hhelmbre/Desktop/Phuong_Bev/vampire/images/test/4_hr_exposure_0_hr_application_pngs/bev_treatment/EV_MICROGLIA_STUDY_4HR_EXPOSURE_1-2-1-1_li_thresh_quad2xy47c1.png'\n",
- "csv_file_path = '/Users/hhelmbre/Desktop/Phuong_Bev/visualization/shape_mode_vs_feature_all.csv'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 3: Reading in the Image and CSV with Labels*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "img = mpimg.imread(im_file_path)\n",
- "csv_df = pd.read_csv(csv_file_path)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "csv_df"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 4: Getting the file name*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "im_file_name_split = im_file_path.split('/')\n",
- "length = len(im_file_name_split)\n",
- "file_name = im_file_name_split[length-1]\n",
- "file_name"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 5: Creating a mask for labeling*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "mask_b = np.zeros(np.asarray(img.shape)+2, dtype=np.uint8)\n",
- "mask_o = np.zeros(np.asarray(img.shape)+2, dtype=np.uint8)\n",
- "mask_g = np.zeros(np.asarray(img.shape)+2, dtype=np.uint8)\n",
- "mask_r = np.zeros(np.asarray(img.shape)+2, dtype=np.uint8)\n",
- "mask_p = np.zeros(np.asarray(img.shape)+2, dtype=np.uint8)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "start_pt = (14,111)\n",
- "cv2.floodFill(img, mask_o, start_pt, (0,0,255), flags=4)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "mask_o = mask_o[1:-1, 1:-1]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "plt.imshow(mask_o, cmap='gray')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step X: Automating the Process*"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 1: Import need_to_label_list*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "need_to_label_list_file_path = '/Users/hhelmbre/Desktop/Phuong_Bev/visualization/images_to_color.csv'\n",
- "need_to_label_list = pd.read_csv(need_to_label_list_file_path)\n",
- "need_to_label_list"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "need_to_label_list2 = []\n",
- "j = 0\n",
- "for file_paths in range(0,need_to_label_list.shape[0]):\n",
- " arr = os.listdir(need_to_label_list['image_file_path'][j])\n",
- " image_list = np.asarray(arr)\n",
- " image_list = [x for x in image_list if \".png\" in x]\n",
- " for images in image_list:\n",
- " file_name = str(need_to_label_list['image_file_path'][j] + '/' + images)\n",
- " need_to_label_list2.append(file_name)\n",
- " \n",
- " j += 1\n",
- " "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Add in a step here to visualize the shape mode labels with the colors I chose*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def c(x):\n",
- " col = plt.cm.twilight(x)\n",
- " fig, ax = plt.subplots(figsize=(1,1))\n",
- " fig.set_facecolor(col)\n",
- " ax.axis(\"off\")\n",
- " plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "color_list = np.linspace(0,1,20)\n",
- "color_list = [0.5, 0.047, 0.7, 0.33, 0.9]\n",
- "for options in color_list:\n",
- " print(options)\n",
- " c(options)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*The code to actually recolor the images*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "label_save_file_path = '/Users/hhelmbre/Desktop/Phuong_Bev/vampire/color_coded'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "k = 0\n",
- "cmap = plt.get_cmap('twilight')\n",
- "cmap.set_bad(color = 'white')\n",
- "for images in need_to_label_list2:\n",
- " \n",
- " #Reading in the image and its labels\n",
- " im_file_path = images\n",
- " img = mpimg.imread(im_file_path)\n",
- " csv_df = pd.read_csv('/Users/hhelmbre/Desktop/Phuong_Bev/visualization/shape_mode_vs_feature_all.csv')\n",
- " \n",
- " #Getting the file path from the file name\n",
- " im_file_name_split = im_file_path.split('/')\n",
- " length = len(im_file_name_split)\n",
- " file_name = im_file_name_split[length-1]\n",
- " print(file_name)\n",
- " \n",
- " mask = np.zeros(np.asarray(img.shape)+2, dtype=np.uint8)\n",
- " \n",
- " csv_df = csv_df[csv_df.Filename == file_name]\n",
- " csv_df = csv_df.reset_index(drop=True)\n",
- " \n",
- " for shapes in range(len(csv_df)):\n",
- " shape_mode = (csv_df['Shape mode'][shapes]).astype(int)\n",
- " start_pt = (csv_df['X'][shapes], csv_df['Y'][shapes])\n",
- " area =csv_df['Area'][shapes]\n",
- " \n",
- " if img[csv_df['Y'][shapes]][csv_df['X'][shapes]]== 1:\n",
- " if shape_mode==1: #0.5\n",
- " cv2.floodFill(img, mask, start_pt, 0, flags=0)\n",
- " mask[mask == 1] = 128\n",
- "\n",
- " elif shape_mode==2: #0.047\n",
- " cv2.floodFill(img, mask, start_pt, 0, flags=0)\n",
- " mask[mask == 1] = 12\n",
- " elif shape_mode==3: #0.7\n",
- " cv2.floodFill(img, mask, start_pt, 0, flags=0)\n",
- " mask[mask == 1] = 179\n",
- "\n",
- " elif shape_mode==4: #0.33\n",
- " cv2.floodFill(img, mask, start_pt, 0, flags=0)\n",
- " mask[mask == 1] = 85\n",
- "\n",
- " elif shape_mode==5: #0.9\n",
- " cv2.floodFill(img, mask, start_pt, 0, flags=0)\n",
- " mask[mask == 1] = 230\n",
- "\n",
- " if shapes == len(csv_df)-1:\n",
- " mask = mask[1:-1, 1:-1]\n",
- " mask[0][0] = 255\n",
- " \n",
- " mask = mask.astype('float')\n",
- " mask[mask==0] = np.nan\n",
- " \n",
- " plt.imshow(mask,cmap='twilight')\n",
- " plt.tick_params(\n",
- " axis='x',\n",
- " which='both',\n",
- " bottom=False,\n",
- " top=False,\n",
- " labelbottom=False)\n",
- " plt.yticks([])\n",
- " plt.savefig(str(label_save_file_path + str(k) + file_name), bbox_inches = 'tight',\n",
- " pad_inches = 0)\n",
- "\n",
- " #img_to_save = Image.fromarray(mask)\n",
- " #img_to_save.save(str(label_save_file_path + str(k) + file_name))\n",
- "\n",
- " \n",
- " k += 1"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "*Step 3: Environment Specs*"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "%load_ext watermark\n",
- "\n",
- "%watermark -v -m -p numpy,pandas,scipy,skimage,matplotlib,cv2,PIL,wget\n",
- "\n",
- "%watermark -u -n -t -z"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.4"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/temp_notebooks/temporary_notebooks_for_concept_testing/.gitkeep b/temp_notebooks/temporary_notebooks_for_concept_testing/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/test/test_adaptive_thresholding.py b/test/test_adaptive_thresholding.py
index 3cdef70..a6143ee 100644
--- a/test/test_adaptive_thresholding.py
+++ b/test/test_adaptive_thresholding.py
@@ -1,9 +1,10 @@
# import module or package here
-import unittest
+import pytest
import random
import os
+import turmoric
-class TestAdaptiveThresholding(unittest.TestCase):
+class TestAdaptiveThresholding():
def test_thresholding_creates_folder(self):
test = 0
@@ -15,8 +16,4 @@ def test_thresholding_Otsu(self):
test = 0
def test_thresholding_Mean(self):
- test = 0
-
-
-if __name__ == '__main__':
- unittest.main()
+ test = 0
\ No newline at end of file
diff --git a/test/test_analysis.py b/test/test_analysis.py
index 86d5964..36dd860 100644
--- a/test/test_analysis.py
+++ b/test/test_analysis.py
@@ -1,9 +1,10 @@
# import module or package here
-import unittest
+import pytest
import random
import os
+import turmoric
-class TestAnalysis(unittest.TestCase):
+class TestAnalysis():
def test_pca_bdreg(self):
test = 0
@@ -12,8 +13,4 @@ def test_pca_custom(self):
test = 0
def test_clusterSM(self):
- test = 0
-
-
-if __name__ == '__main__':
- unittest.main()
+ test = 0
\ No newline at end of file
diff --git a/test/test_apply_thresholds.py b/test/test_apply_thresholds.py
new file mode 100644
index 0000000..bf8ce51
--- /dev/null
+++ b/test/test_apply_thresholds.py
@@ -0,0 +1,149 @@
+import random
+import pytest
+import os
+import shutil
+from skimage import io
+from skimage.util import img_as_ubyte
+from skimage.io import imsave
+import shutil
+import tempfile
+from typing import Callable
+import numpy as np
+import turmoric
+from turmoric.apply_thresholds import apply_all_thresh
+from turmoric.apply_thresholds import apply_li_threshold
+from turmoric.apply_thresholds import apply_threshold_recursively
+
+
+@pytest.fixture
+def temp_dirs():
+ input_dir = tempfile.mkdtemp()
+ output_dir = tempfile.mkdtemp()
+
+ # Create dummy .tif files
+ test_files = ['image1.tif', 'subdir/image2.tif']
+ for file in test_files:
+ full_path = os.path.join(input_dir, file)
+ os.makedirs(os.path.dirname(full_path), exist_ok=True)
+ with open(full_path, 'wb') as f:
+ f.write(b'dummy data')
+
+ yield input_dir, output_dir, test_files
+
+ # Cleanup
+ shutil.rmtree(input_dir)
+ shutil.rmtree(output_dir)
+
+
+@pytest.fixture
+def input_folder():
+ # Create a temporary directory
+ temp_dir = tempfile.mkdtemp()
+ # Optionally, create test files here
+ yield temp_dir
+ # Cleanup after test
+ shutil.rmtree(temp_dir)
+
+
+@pytest.fixture
+def output_folder():
+ # Create a temporary directory
+ temp_dir = tempfile.mkdtemp()
+ # Optionally, create test files here
+ yield temp_dir
+ # Cleanup after test
+ shutil.rmtree(temp_dir)
+
+
+@pytest.fixture
+def channel():
+
+
+
+
+def test_apply_all_thresh(input_folder, output_folder, channel, figsize):
+ # Create temporary input and output directories
+ with tempfile.TemporaryDirectory() as input_dir, tempfile.TemporaryDirectory() as output_dir:
+ # Create a dummy RGB image
+ image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
+ image_path = os.path.join(input_dir, "test_image.tif")
+ imsave(image_path, image)
+
+ # Mock the file discovery function
+ def mock_get_all_filepaths(folder, ext):
+ return [image_path]
+ turmoric.recursively_get_all_filepaths = mock_get_all_filepaths
+
+ # Run the function
+ apply_all_thresh(input_dir, output_dir, channel=1)
+
+ # Check that output file exists
+ output_files = os.listdir(output_dir)
+ assert any(f.endswith("_all_thresh.tif") for f in output_files)
+
+ if not isinstance(input_folder, str):
+ raise TypeError("input_folder must be a string")
+ if not isinstance(output_folder, str):
+ raise TypeError("output_folder must be a string")
+ if not isinstance(channel, int) or channel < 0:
+ raise ValueError("channel must be a non-negative integer")
+ if not (isinstance(figsize, tuple) and len(figsize) == 2 and all(isinstance(x, (int, float)) for x in figsize)):
+ raise TypeError("figsize must be a tuple of two numbers")
+
+
+def test_apply_li_threshold(file, channel, binary_li):
+ # Create a synthetic image with two intensity regions
+ image = np.zeros((100, 100), dtype=np.uint8)
+ image[50:] = 200 # Bottom half is brighter
+
+ # Save to a temporary file
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
+ io.imsave(temp_file.name, img_as_ubyte(image))
+ temp_path = temp_file.name
+
+ try:
+ # Apply the thresholding function
+ binary = apply_li_threshold(temp_path)
+
+ # Assertions
+ assert binary.dtype == bool, "Output should be a boolean array"
+ assert binary.shape == image.shape, "Output shape should match input"
+ assert binary[:50].sum() == 0, "Top half should be below threshold"
+ assert binary[50:].sum() > 0, "Bottom half should be above threshold"
+
+ finally:
+ # Clean up
+ os.remove(temp_path)
+ if not isinstance(file, str):
+ raise TypeError("file must be a string")
+ if not isinstance(channel, int) or channel < 0:
+ raise ValueError("channel must be a non-negative integer")
+ if not isinstance(binary_li, np.ndarray):
+ raise TypeError("binary_li must be a numpy array")
+ if binary_li.ndim != 2:
+ assert isinstance(binary_li, np.ndarray) and binary_li.dtype == bool and binary_li.ndim == 2
+
+
+# Dummy threshold function
+def dummy_threshold(file_path):
+ return np.ones((10, 10), dtype=bool)
+
+
+def test_apply_threshold(temp_dirs):
+ input_dir, output_dir, test_files = temp_dirs
+
+ apply_threshold_recursively(
+ input_folder=input_dir,
+ output_folder=output_dir,
+ threshold_function=dummy_threshold
+ )
+
+ for file in test_files:
+ expected_path = os.path.join(
+ output_dir,
+ os.path.splitext(file)[0] + '.npy'
+ )
+ assert os.path.exists(expected_path), f"{expected_path} not found"
+
+ arr = np.load(expected_path)
+ assert np.array_equal(arr, np.ones((10, 10), dtype=bool))
diff --git a/test/test_cell_analysis.py b/test/test_cell_analysis.py
new file mode 100644
index 0000000..4853ed7
--- /dev/null
+++ b/test/test_cell_analysis.py
@@ -0,0 +1,74 @@
+import pytest
+import random
+import os
+import pandas as pd
+from skimage import io
+from skimage.util import img_as_ubyte
+import tempfile
+from typing import Callable
+import numpy as np
+import turmoric
+from turmoric.cell_analysis import apply_regionprops
+from turmoric.cell_analysis import apply_regionprops_recursively
+
+
+def test_apply_regionprops(input_folder, properties_list):
+ # Create a temporary directory with a dummy binary mask
+ with tempfile.TemporaryDirectory() as temp_dir:
+ mask = np.zeros((100, 100), dtype=np.uint8)
+ mask[30:70, 30:70] = 1 # Create a square region
+ mask_path = os.path.join(temp_dir, "mask_01.npy")
+ np.save(mask_path, mask)
+
+ # Run the function
+ props_df = apply_regionprops(mask_path, properties_list)
+
+ # Assertions
+ assert isinstance(props_df, pd.DataFrame), "Output should be a DataFrame"
+ assert 'filename' in props_df.columns, "DataFrame should contain 'filename' column"
+ assert len(props_df) == 1, "There should be one region in the mask"
+ assert all(prop in props_df.columns for prop in properties_list), "DataFrame should contain specified properties"
+
+ if not isinstance(input_folder, str):
+ raise TypeError("input_folder must be a string")
+ if not isinstance(properties_list, list):
+ raise TypeError("properties_list must be a list of strings")
+
+
+def test_apply_regionprops_recursively(input_folder, properties_list):
+ # Create a temporary directory with multiple binary masks
+ with tempfile.TemporaryDirectory() as temp_dir:
+ for i in range(3):
+ mask = np.zeros((100, 100), dtype=np.uint8)
+ mask[20*i:20*(i+1), 20*i:20*(i+1)] = 1 # Create square regions
+ mask_path = os.path.join(temp_dir, f"mask_{i+1:02d}.npy")
+ np.save(mask_path, mask)
+
+ # Run the function
+ props_df = apply_regionprops_recursively(temp_dir, properties_list)
+
+ # Assertions
+ assert isinstance(props_df, pd.DataFrame), "Output should be a DataFrame"
+ assert 'filename' in props_df.columns, "DataFrame should contain 'filename' column"
+ assert len(props_df) == 3, "There should be three regions in total"
+ assert all(prop in props_df.columns for prop in properties_list), "DataFrame should contain specified properties"
+
+ if not isinstance(input_folder, str):
+ raise TypeError("input_folder must be a string")
+ if not isinstance(properties_list, list):
+ raise TypeError("properties_list must be a list of strings")
+ if not all(isinstance(prop, str) for prop in properties_list):
+ raise TypeError("All elements in properties_list must be strings")
+ if not all(prop in ['area', 'centroid', 'eccentricity'] for prop in properties_list):
+ raise ValueError("properties_list contains invalid property names. Valid names are 'area', 'centroid', 'eccentricity'.")
+# assert binary.shape == (100, 100), "Output shape should match input image shape"
+# assert np.any(binary), "Output should contain some True values"
+# finally:
+# os.remove(temp_path) # Clean up temporary file
+#
+# if not isinstance(file, str):
+# raise TypeError("file must be a string representing the file path")
+# if not isinstance(channel, int) or channel < 0:
+# raise ValueError("channel must be a non-negative integer")
+# if not isinstance(properties_list, list):
+# raise TypeError("properties_list must be a list of strings")
\ No newline at end of file
diff --git a/test/test_image_filtering_and_cleaning.py b/test/test_image_filtering_and_cleaning.py
deleted file mode 100644
index 8b2cb76..0000000
--- a/test/test_image_filtering_and_cleaning.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# this is a python script that opens an image from a file, filters it depending on their extension, splits the image into two channels and then separate those new images on separated files depending on their chanel and type.
-# The script also includes unit tests to validate the image processing functions.
-
-import shutil, os
-import numpy as np
-import pandas as pd
-from skimage import io #loads image as a numpy array
-import matplotlib.pyplot as plt
-from PIL import Image #pillow library used for loading images or saving
-import unittest
-import tempfile
-
-#######################################
-stain1 = 'dapi'
-stain2 = 'iba'
-#DAPI staining is helpful for identifying the overall number and distribution of cells within brain tissue. It provides a general nuclear marker for both neuronal and non-neuronal cells (like astrocytes, oligodendrocytes, and microglia).
-#iba: IBA1 staining is essential for studying microglia morphology, activation, and distribution. Microglia play a significant role in neuroinflammation, brain development, and injury responses.
-
-file_type_new = '.png'
-
-# Set folder location
-folder_location = r"C:\Users\sergi\Desktop\analysis_images"
-
-#######################################
-#Function 1: Filters images by their format(folder_cleaner)
-
-def folder_cleaner(folder, image_type):
- if not folder: # Check if the folder is empty
- return "The folder is empty"
- k = 0
- folder_array = np.array(folder)
- for file in folder_array:
- if file.startswith('.'): # Skip hidden files (those starting with '.')
- continue
- if image_type in str(file): # Check if the file contains the image_type (e.g., '.jpg', '.png', etc.)
- k += 1
- else:
- # Remove files that do not match the specified image type
- folder_array = np.delete(folder_array, np.argwhere(folder_array == str(file)))
- return folder_array # Return the filtered folder array
-# List files in the folder
-files_in_folder = os.listdir(folder_location)
-
-# Call the folder_cleaner function
-cleaned_files = folder_cleaner(files_in_folder, file_type_new)
-
-# Display the list of image names
-print("Filtered images:")
-for file in cleaned_files:
- print(file) # Print only the image names
-
-#####################################
-# Function 2: Transforms a list of filenames into a NumPy array
-def list_to_array(file_list):
- return np.asarray(file_list) # Convert the list to a NumPy array
-file_array = list_to_array(cleaned_files) # Get all filtered files as an array
-print("\nFiltered image files as an array:") # Display the filtered array
-print(file_array)
-
-#####################################
-# Function 3: Extract and save the two channels of images.
-def extract_channels(file_list, folder_location):
- for file_name in file_list:
- file_path = os.path.join(folder_location, file_name)
-
- try:
- # Read the image
- im = io.imread(file_path)
-
- # Ensure the image has at least two channels
- if im.ndim < 3 or im.shape[0] < 2:
- raise ValueError(f"Image {file_name} does not have at least 2 channels.")
-
- # Extract first and second channels
- channel1 = im[0, :, :]
- channel2 = im[1, :, :]
-
- if np.all(channel1 == 0) and np.all(channel2 == 0): #checks if images are completely black as precaution
- raise ValueError(f"Image {file_name} is completely black.")
-
- # Convert channels to images
- channel1_img = Image.fromarray(np.uint16(channel1))
- channel2_img = Image.fromarray(np.uint16(channel2))
-
- # Remove file extension (e.g., image1.png → image1)
- base_name = file_name.replace(file_type_new, "")
-
- # Save extracted channels with modified names
- channel1_img.save(os.path.join(folder_location, f"{base_name}_{stain1}{file_type_new}"))
- channel2_img.save(os.path.join(folder_location, f"{base_name}_{stain2}{file_type_new}"))
-
- print(f"Extracted channels saved for {file_name}")
-
- except ValueError as e:
- print(f"Warning: {e}")
- raise
- except Exception as e:
- print(f"Error processing {file_name}: {e}")
-
-# Call function
-extract_channels(file_array, folder_location)
-
-#######################################
-#Function 4: Creates channel folders and relocate extracted channels for each filtered image.
-def move_stain_images(folder_location, stain1, stain2):
- # Define destination folders
- stain1_folder = os.path.join(folder_location, stain1)
- stain2_folder = os.path.join(folder_location, stain2)
-
- # Ensure destination folders exist
- try:
- os.makedirs(stain1_folder, exist_ok=True)
- os.makedirs(stain2_folder, exist_ok=True)
- print(f"Folders created/verified: {stain1_folder}, {stain2_folder}")
- except OSError as e:
- print(f"Error: Could not create destination folders - {e}")
- return
- file_list = os.listdir(folder_location)
-
- # Check if there are files to move
- if not file_list:
- print("No files found to move.")
- return
-
- # Iterate through the file list and move files
- for tiled_image in file_list:
- source_path = os.path.join(folder_location, tiled_image)
-
- # Check if the file exists in the original location
- if not os.path.exists(source_path):
- print(f"Error: File not found - {tiled_image}")
- continue # Skip to the next file
-
- # Determine destination based on channel type.
- if tiled_image.lower().endswith("_dapi.png"):
- destination = os.path.join(stain1_folder, tiled_image)
- elif tiled_image.lower().endswith("_iba.png"):
- destination = os.path.join(stain2_folder, tiled_image)
- else:
- print(f"Skipped: {tiled_image} (Condition not met)")
- continue # Skip files that do not match conditions
-
- # Move the file
- try:
- shutil.move(source_path, destination)
- print(f"Moved: {tiled_image} → {destination}")
- except Exception as e:
- print(f"Error moving {tiled_image}: {e}")
-
-# Call the function
-move_stain_images(folder_location,stain1,stain2)
-
-####################################################
-#UNIT TESTS
-####################################################
-
-#Function 1
-
-class TestFolderCleaner(unittest.TestCase):
-
- def test_format_types(self):
- """Ensures folder cleaner function is abble to handle different files type"""
- test_folder = [
- "image1.jpg", # Image file (jpg)
- "image2.png", # Image file (png)
- "image3.tif", # Image file (tif)
- "document.txt", # Non-image file
- "image4.pdf"] # Non-image file
- image_formats = ['.jpg', '.png', '.tif', '.jpeg'] # Define multiple image formats to test
-
- for image_type in image_formats:
- with self.subTest(image_type=image_type):
- cleaned_files = folder_cleaner(test_folder, image_type)
- self.assertTrue(all(file.endswith(image_type) for file in cleaned_files), f"Some files do not match {image_type}: {cleaned_files}")
-
- def test_empty_folder(self):
- """Ensures folder cleaner function is able to handle empty folders"""
- test_folder2 = [] # Simulates empty folder
- test_image_type = '.png'
- result = folder_cleaner(test_folder2, test_image_type)
- self.assertEqual(result, "The folder is empty", "ok") # Expected the "folder is empty" message
-
- def test_hidden_files(self):
- """Ensures folder cleaner function is not processing hidden files (.)"""
- test_folder3 = [".hiddenfile.png", "visible.png", ".anotherhidden.png"] # Simulated list including hidden files
- image_formats = ['.png']
-
- for image_type in image_formats:
- cleaned_files = folder_cleaner(test_folder3, image_type)
- # Ensure that no hidden files (files starting with '.') are included
- self.assertFalse(any(file.startswith('.') for file in cleaned_files),
- f"Hidden files were incorrectly included: {cleaned_files}")
- print(f"Filtered images for {image_type}: {cleaned_files}")
-
- def test_special_characters(self):
- """Ensures folder cleaner function handles files with special caracters names"""
- test_folder4 = ["phRo$$@home.jpg", "#!weird#name.png", "normA00)_image.jpeg"]
- image_formats = ['.jpg', '.png', '.jpeg']
-
- for image_type in image_formats:
- with self.subTest(image_type=image_type):
- cleaned_files = folder_cleaner(test_folder4, image_type)
- self.assertTrue(all(file.endswith(image_type) for file in cleaned_files),
- f"Some files do not match {image_type}: {cleaned_files}")
- print(f"Filtered images for {image_type}: {cleaned_files}")
-if __name__ == '__main__':
- unittest.main(argv=['first-arg-is-ignored'], exit=False)
-
-#Function 2
-
-class TestListToArray(unittest.TestCase):
-
- def test_non_empty_list(self):
- """Test conversion of a non-empty list to a NumPy array"""
- file_list = ["file1.jpg", "file2.png", "file3.jpeg"]
- result = list_to_array(file_list)
- self.assertIsInstance(result, np.ndarray, "The result should be a NumPy array")
- self.assertEqual(result.tolist(), file_list, "The converted array does not match the input list")
-
- def test_empty_list(self):
- """Test conversion of an empty list to a NumPy array"""
- file_list = []
- result = list_to_array(file_list)
- self.assertIsInstance(result, np.ndarray, "The result should be a NumPy array")
- self.assertEqual(result.tolist(), file_list, "The result array for an empty list should be empty")
-
- def test_mixed_data_types(self):
- """Test conversion of a list with mixed data types (strings and integers)"""
- file_list = ["file1.jpg", 123, "file2.png", 456]
- result = list_to_array(file_list)
- self.assertIsInstance(result, np.ndarray, "The result should be a NumPy array")
- # Check if the result is converted to string due to mixed data types
- self.assertEqual(result.tolist(), ['file1.jpg', '123', 'file2.png', '456'], "The converted array does not match the expected list with string conversion")
-
- def test_array_conversion(self):
- """Test conversion of a list containing NumPy arrays to a NumPy array"""
- file_list = [np.array([1, 2]), np.array([3, 4])]
- result = list_to_array(file_list)
- self.assertIsInstance(result, np.ndarray, "The result should be a NumPy array")
- self.assertEqual(result.tolist(), [item.tolist() for item in file_list], "The arrays inside the list should be converted correctly")
-
-if __name__ == '__main__':
- unittest.main(argv=['first-arg-is-ignored'], exit=False)
-
-#Function 3
-
-class TestExtractChannels(unittest.TestCase):
-
- def test_missing_channels(self):
- """Ensures images without 2 channels can be processed or fails gracefully"""
- with tempfile.TemporaryDirectory() as temp_folder:
- # Simulate an image with only 1 channel
- file_name = "image_single_channel.tif"
- image = np.zeros((1, 100, 100), dtype=np.uint16)
- io.imsave(os.path.join(temp_folder, file_name), image)
-
- # extracting channels
- with self.assertRaises(ValueError):
- extract_channels([file_name], temp_folder)
-
- def test_completely_black_image(self):
- """Ensures completely black images are handled appropriately"""
- with tempfile.TemporaryDirectory() as temp_folder:
- # Simulate a completely black image with 2 channels
- file_name = "image_black.tif"
- black_image = np.zeros((2, 100, 100), dtype=np.uint16)
- io.imsave(os.path.join(temp_folder, file_name), black_image)
-
- # extracting channels
- with self.assertRaises(ValueError):
- extract_channels([file_name], temp_folder)
-
- def test_large_image_size(self):
- """Ensure that images that are too large are handled appropriately."""
- with tempfile.TemporaryDirectory() as temp_folder:
- file_name = "large_image.tif"
- large_image = np.random.randint(1, 256, size=(2, 10000, 10000), dtype=np.uint16) # Create a very large image
- io.imsave(os.path.join(temp_folder, file_name), large_image)
- try:
- extract_channels([file_name], temp_folder)
- except MemoryError:
- print(f"MemoryError: The image {file_name} is too large to process.")
-
-
-if __name__ == '__main__':
- unittest.main(argv=['first-arg-is-ignored'], exit=False)
-
-#Function 4
-
-class TestMoveStainImages(unittest.TestCase):
-
- def setUp(self):
- """Set up test environment."""
- self.test_folder = tempfile.mkdtemp() #creates a temporary folder to simulate as test folder
- test_files = ["image1.jpg", "image2.png", "image3.jpg", "document.txt", ".hiddenfile.png", "image4.jpeg"]
- for file_name in test_files:
- with open(os.path.join(self.test_folder, file_name), 'w') as f:
- f.write(f"This is a test file: {file_name}")
-
- self.stain1 = "dapi"
- self.stain2 = "iba"
-
- def test_source_folder_exists(self):
- """Ensure the source folder exists before processing."""
- self.assertTrue(os.path.exists(self.test_folder)) #verifies that folder exists
-
- def test_empty_source_folder(self):
- """Ensure the function handles an empty source folder or fails gracefully."""
- with tempfile.TemporaryDirectory() as empty_folder: # Create a temporary empty folder using tempfile
- self.assertEqual(len(os.listdir(empty_folder)), 0) # Ensure the folder is empty
- # Now, test the function with the empty folder
- move_stain_images(empty_folder, self.stain1, self.stain2)
-
- def test_only_allowed_files(self):
- """Ensures that only files with the right extension are moved to their respective directory"""
- with tempfile.TemporaryDirectory() as temp_folder: #create a temporary folder
- # Create destination subfolders for stains
- stain1_folder = os.path.join(temp_folder, "dapi")
- stain2_folder = os.path.join(temp_folder, "iba")
- os.makedirs(stain1_folder, exist_ok=True)
- os.makedirs(stain2_folder, exist_ok=True)
-
- # Create files with different extensions
- file1 = os.path.join(temp_folder, "sample1_dapi.png")
- file2 = os.path.join(temp_folder, "sample2_iba.png")
- file3 = os.path.join(temp_folder, "sample3.txt")
-
- # Write content to the files
- for file in [file1, file2, file3]:
- with open(file, 'w') as f:
- f.write("content")
-
- # Call the move_stain_images function
- move_stain_images(temp_folder, "dapi", "iba")
-
- # Only "sample1_dapi.png" and "sample2_iba.png" should have been moved
- self.assertTrue(os.path.exists(os.path.join(stain1_folder, "sample1_dapi.png")))
- self.assertTrue(os.path.exists(os.path.join(stain2_folder, "sample2_iba.png")))
- self.assertFalse(os.path.exists(os.path.join(stain1_folder, "sample3.txt")))
- self.assertFalse(os.path.exists(os.path.join(stain2_folder, "sample3.txt")))
-
- def test_duplicate_file_handling(self):
- """Ensure duplicate files are detected and not overwritten for both stains."""
-
- for stain in [self.stain1, self.stain2]: # ["dapi", "iba"]
- stain_folder = os.path.join(self.test_folder, stain)
- os.makedirs(stain_folder, exist_ok=True) # Create stain folder if needed
-
- # Create a duplicate file in the destination folder
- duplicate_file = os.path.join(stain_folder, f"sample1_{stain}.png")
- with open(duplicate_file, 'w') as f:
- f.write(f"duplicate test for {stain}")
-
-if __name__ == '__main__':
- unittest.main(argv=['first-arg-is-ignored'], exit=False)
diff --git a/test/test_image_preprocessing.py b/test/test_image_preprocessing.py
deleted file mode 100644
index 10fa509..0000000
--- a/test/test_image_preprocessing.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# this is a python script that reads an image, splits it into 4 quadrants, converts it to grayscale, and saves the processed images.
-# The script also includes unit tests to validate the image processing functions.
-import cv2 # OpenCV library
-import numpy as np # Numpy library
-import unittest # Unit testing library
-import os # Operating system library
-
-def split_image_into_quadrants(image: np.ndarray): # Function to split image into 4 quadrants
- """Splits an image into four quadrants and returns them as a list."""
- if image is None or not isinstance(image, np.ndarray): # Check if image is valid or not
- raise ValueError("Invalid image input") # Raise an error if image is invalid
-
- height, width = image.shape[:2] # Get the height and width of the image
- mid_x, mid_y = width // 2, height // 2 # Get the mid point of the image
-
- top_left = image[:mid_y, :mid_x] # Get the top left quadrant of the image
- top_right = image[:mid_y, mid_x:] # Get the top right quadrant of the image
- bottom_left = image[mid_y:, :mid_x] # Get the bottom left quadrant of the image
- bottom_right = image[mid_y:, mid_x:] # Get the bottom right quadrant of the image
-
- return [top_left, top_right, bottom_left, bottom_right] # Return the 4 quadrants of the image
-
-def convert_to_grayscale(image: np.ndarray): # Function to convert image to grayscale
- """Converts a color image to grayscale."""
- if image is None or not isinstance(image, np.ndarray): # Check if image is valid or not
- raise ValueError("Invalid image input")
-
- return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert the image to grayscale
-
-def process_image(image_path): # Function to process the image
- """Loads an image, splits it into quadrants, converts to grayscale, and saves outputs."""
- if not os.path.exists(image_path): # Check if the image exists or not
- raise FileNotFoundError(f"Image not found: {image_path}") # Raise an error if image is not found
-
- image = cv2.imread(image_path) # Read the image
- if image is None: # Check if the image is valid or not
- raise ValueError("Failed to load image. Check file format.")
-
- quadrants = split_image_into_quadrants(image) # Split the image into quadrants
- grayscale_image = convert_to_grayscale(image) # Convert the image to grayscale
-
- output_dir = os.path.join(os.path.dirname(image_path), "processed_output") # Create a directory to save the processed images on my desktop
- os.makedirs(output_dir, exist_ok=True) # Create the directory if it does not exist, the name of this
-
- for i, quadrant in enumerate(quadrants): # Save each quadrant as a separate image
- cv2.imwrite(os.path.join(output_dir, f"quadrant_{i+1}.jpg"), quadrant) # Save the quadrant image
-
- cv2.imwrite(os.path.join(output_dir, "grayscale.jpg"), grayscale_image) # Save the grayscale image
-
- return output_dir # Return the output directory
-def validate_image_format(image_path):
- """Checks if the image format is valid (e.g., .jpg, .png).""" # Function to validate the image format
- valid_formats = ('.jpg', '.png', '.tiff', '.bmp', '.nd2') # List of valid image formats
- return image_path.lower().endswith(valid_formats) # Check if the image format is valid
-
-def generate_noisy_image(): # Function to generate a noisy image
- """Generates an image with artificial noise for testing.""" # Generate a noisy image
- noisy_image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8) # Generate random pixel values
- return noisy_image # Return the noisy image
-
-def generate_image_with_variations(): # Function to generate an image with intensity variations
- """Generates an image with intensity variations for testing.""" # Generate an image with intensity variations
- image = np.linspace(50, 200, 100 * 100 * 3).reshape(100, 100, 3).astype(np.uint8) # Create an image with intensity variations
- return image
-
-class TestImageProcessing(unittest.TestCase):
- def setUp(self): # Set up the test case
- self.image = np.ones((100, 100, 3), dtype=np.uint8) * 255 # White image
-
- def test_split_dimensions(self): # Test to check the dimensions of the quadrants
- quadrants = split_image_into_quadrants(self.image) # Split the image into quadrants
- self.assertEqual(len(quadrants), 4) # Check if the number of quadrants is 4
- self.assertEqual(quadrants[0].shape[:2], (50, 50)) # Check the dimensions of the quadrants
- self.assertEqual(quadrants[1].shape[:2], (50, 50)) # Check the dimensions of the quadrants
- self.assertEqual(quadrants[2].shape[:2], (50, 50)) # Check the dimensions of the quadrants
- self.assertEqual(quadrants[3].shape[:2], (50, 50)) # Check the dimensions of the quadrants
-
- def test_invalid_input(self): # Test to check invalid input
- with self.assertRaises(ValueError): # Check if the function raises a ValueError
- split_image_into_quadrants(None) # Pass None as input
- with self.assertRaises(ValueError):
- split_image_into_quadrants("not an image")
-
- def test_grayscale_conversion(self): # Test to check grayscale conversion
- gray_image = convert_to_grayscale(self.image) # Convert the image to grayscale
- self.assertEqual(len(gray_image.shape), 2) # Grayscale images have 2 dimensions
- self.assertEqual(gray_image.shape, (100, 100)) # Check the dimensions of the grayscale image
-
-if __name__ == "__main__":
- image_path = "/Users/munawaraxh/cheme_546/project/Planning/example_dataset/example_images/MEF_wildtype/xy013c1.tif"
- try:
- output_directory = process_image(image_path)
- print(f"Processed images saved in: {output_directory}")
- except Exception as e:
- print(f"Error: {e}")
-
- unittest.main()
diff --git a/test/test_image_process.py b/test/test_image_process.py
new file mode 100644
index 0000000..82210ec
--- /dev/null
+++ b/test/test_image_process.py
@@ -0,0 +1,85 @@
+import pytest
+import random
+import os
+from skimage import io
+from skimage.util import img_as_ubyte
+import tempfile
+from typing import Callable
+import numpy as np
+import turmoric
+from turmoric.image_process import load_npy_file
+from turmoric.image_process import normalize_npy_data
+from turmoric.image_process import nd2_to_tif
+
+def test_load_npy_file():
+ # Create a temporary directory with a dummy .npy file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Create a sample numpy array and save it
+ sample_data = np.random.rand(100, 100)
+ npy_path = os.path.join(temp_dir, "sample.npy")
+ np.save(npy_path, sample_data)
+
+ # Load the .npy file using the function
+ loaded_data = load_npy_file(npy_path)
+
+ # Assertions
+ assert isinstance(loaded_data, np.ndarray), "Loaded data should be a numpy array"
+ assert loaded_data.shape == sample_data.shape, "Loaded data shape should match saved data shape"
+ assert np.array_equal(loaded_data, sample_data), "Loaded data should match saved data"
+
+def test_normalize_npy_data():
+ # Create a sample numpy array with float data
+ float_data = np.random.rand(100, 100) * 1000 # Values in [0, 1000]
+ normalized_float = normalize_npy_data(float_data)
+
+ # Assertions for float data normalization
+ assert normalized_float.dtype == np.uint8, "Normalized data should be of type uint8"
+ assert normalized_float.min() >= 0 and normalized_float.max() <= 255, "Normalized data should be in [0, 255]"
+
+ # Create a boolean mask
+ bool_mask = np.array([[True, False], [False, True]])
+ normalized_mask = normalize_npy_data(bool_mask)
+
+ # Assertions for boolean mask normalization
+ assert normalized_mask.dtype == np.uint8, "Normalized mask should be of type uint8"
+ assert np.array_equal(normalized_mask, np.array([[255, 0], [0, 255]])), "Boolean mask should be converted to uint8"
+
+def test_nd2_to_tif():
+ # Create a temporary directory with a dummy .nd2 file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Create a sample numpy array and save it as .npy (simulating .nd2)
+ sample_data = np.random.rand(100, 100)
+ nd2_path = os.path.join(temp_dir, "sample.nd2")
+ np.save(nd2_path, sample_data) # Simulating .nd2 with .npy for testing
+
+ # Convert the .nd2 file to .tif
+ nd2_to_tif(temp_dir, "sample.nd2")
+
+ # Check if the .tif file was created
+ tif_path = os.path.join(temp_dir, "sample.tif")
+ assert os.path.exists(tif_path), "The TIFF file should be created"
+ # Load the TIFF file to verify its content
+ tif_data = io.imread(tif_path)
+ assert isinstance(tif_data, np.ndarray), "Loaded TIFF data should be a numpy array"
+ assert tif_data.shape == sample_data.shape, "TIFF data shape should match original data shape"
+ assert np.all(tif_data >= 0) and np.all(tif_data <= 255), "TIFF data should be in [0, 255] range"
+ # Clean up the temporary .nd2 file
+ os.remove(nd2_path)
+ # Check if the .tif file was created
+ assert os.path.exists(tif_path), "The TIFF file should be created from the .nd2 file"
+ # Load the TIFF file to verify its content
+ tif_data = io.imread(tif_path)
+ assert isinstance(tif_data, np.ndarray), "Loaded TIFF data should be a numpy array"
+ assert tif_data.shape == sample_data.shape, "TIFF data shape should match original data shape"
+ assert np.all(tif_data >= 0) and np.all(tif_data <= 255), "TIFF data should be in [0, 255] range"
+ # Clean up the temporary .nd2 file
+ os.remove(nd2_path)
+ # Check if the .tif file was created
+ assert os.path.exists(tif_path), "The TIFF file should be created from the .nd2 file"
+ # Load the TIFF file to verify its content
+ tif_data = io.imread(tif_path)
+ assert isinstance(tif_data, np.ndarray), "Loaded TIFF data should be a numpy array"
+ assert tif_data.shape == sample_data.shape, "TIFF data shape should match original data shape"
+ assert np.all(tif_data >= 0) and np.all(tif_data <= 255), "TIFF data should be in [0, 255] range"
+ # Clean up the temporary .nd2 file
+ os.remove(nd2_path)
\ No newline at end of file
diff --git a/test/test_preprocessing.py b/test/test_preprocessing.py
index 73817bc..b702644 100644
--- a/test/test_preprocessing.py
+++ b/test/test_preprocessing.py
@@ -1,10 +1,11 @@
# import module or package here
-import unittest
+import pytest
import random
import numpy as np
import os
+import turmoric
-class TestPreprocessing(unittest.TestCase):
+class TestPreprocessing():
def test_validate_iamge_format(self):
test = 0 # remove this and write testing code
@@ -27,27 +28,24 @@ def test_collect_selected_bstack(self):
def test_image_list_split(self):
##Just copying the main code from the .ipynb, not a test yet hehes
- folder_location = 'test_data'
- arr = os.listdir(folder_location)
- folder_list = np.asarray(arr)
- folder_list = [ x for x in folder_list if "DS" not in x ]
- files_to_split_list = []
- expected_file_list = []
-
- for folders in folder_location:
- image_array = os.listdir(str(folder_location + '/' + folders))
- subfolder_list = np.asarray(image_array)
- subfolder_list = [ x for x in subfolder_list if "DS" not in x]
- for subfolders in subfolder_list:
- image_array = os.listdir(str(folder_location + '/' + folders + '/' + subfolders))
- files_list = np.asarray(image_array)
- files_list = [x for x in files_list if "DS" not in x]
- for files in files_list:
- name = str(folder_location + '/' + folders + '/' + subfolders + '/' + files)
- files_to_split_list.append(name)
-
- self.assertListEqual(files_to_split_list, expected_file_list)
-
-
-if __name__ == '__main__':
- unittest.main()
+ # folder_location = 'example_dataset'
+ # arr = os.listdir(folder_location)
+ # folder_list = np.asarray(arr)
+ # folder_list = [ x for x in folder_list if "DS" not in x ]
+ # files_to_split_list = []
+ # expected_file_list = []
+
+ # for folders in folder_location:
+ # image_array = os.listdir(str(folder_location + '/' + folders))
+ # subfolder_list = np.asarray(image_array)
+ # subfolder_list = [ x for x in subfolder_list if "DS" not in x]
+ # for subfolders in subfolder_list:
+ # image_array = os.listdir(str(folder_location + '/' + folders + '/' + subfolders))
+ # files_list = np.asarray(image_array)
+ # files_list = [x for x in files_list if "DS" not in x]
+ # for files in files_list:
+ # name = str(folder_location + '/' + folders + '/' + subfolders + '/' + files)
+ # files_to_split_list.append(name)
+
+ # self.assertListEqual(files_to_split_list, expected_file_list)
+ pass
\ No newline at end of file
diff --git a/test/test_segmentation.py b/test/test_segmentation.py
index 95b3e78..92fff35 100644
--- a/test/test_segmentation.py
+++ b/test/test_segmentation.py
@@ -1,9 +1,10 @@
# import module or package here
-import unittest
+import pytest
import random
import os
+import turmoric
-class TestSegmentation(unittest.TestCase):
+class TestSegmentation():
def test_bdreg(self):
test = 0 # remove this line and replace with testing code
@@ -15,8 +16,4 @@ def test_image_segmentation(self):
test = 0
def test_segmentation_labelling(self):
- test = 0
-
-
-if __name__ == '__main__':
- unittest.main()
+ test = 0
\ No newline at end of file
diff --git a/test/test_utils.py b/test/test_utils.py
new file mode 100644
index 0000000..44ddd0f
--- /dev/null
+++ b/test/test_utils.py
@@ -0,0 +1,69 @@
+import pytest
+import random
+import os
+from skimage import io
+from skimage.util import img_as_ubyte
+import tempfile
+from typing import Callable
+import numpy as np
+import turmoric
+from turmoric.utils import organize_files_without_leakage
+from turmoric.utils import recursively_get_all_filepaths
+
+
+def test_organize_files_without_leakage(input_folder, output_folder):
+ # Create a temporary directory with dummy files
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Create dummy files
+ for i in range(5):
+ file_path = os.path.join(temp_dir, f"file_{i}.txt")
+ with open(file_path, 'w') as f:
+ f.write(f"This is file {i}")
+
+ # Run the function
+ organize_files_without_leakage(temp_dir, output_folder)
+
+ # Check if files are moved to output folder
+ output_files = os.listdir(output_folder)
+ assert len(output_files) == 5, "All files should be moved to the output folder"
+ for i in range(5):
+ assert f"file_{i}.txt" in output_files, f"file_{i}.txt should be in the output folder"
+
+ if not isinstance(input_folder, str):
+ raise TypeError("input_folder must be a string")
+ if not isinstance(output_folder, str):
+ raise TypeError("output_folder must be a string")
+
+
+def test_recursively_get_all_filepaths(input_folder, ext):
+ # Create a temporary directory with dummy files
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Create dummy files
+ for i in range(5):
+ file_path = os.path.join(temp_dir, f"file_{i}.{ext}")
+ with open(file_path, 'w') as f:
+ f.write(f"This is file {i}")
+
+ # Run the function
+ filepaths = recursively_get_all_filepaths(temp_dir, ext)
+
+ # Check if all files are returned
+ assert len(filepaths) == 5, "All files should be found"
+ for i in range(5):
+ assert os.path.basename(filepaths[i]) == f"file_{i}.{ext}", f"file_{i}.{ext} should be in the list"
+
+ if not isinstance(input_folder, str):
+ raise TypeError("input_folder must be a string")
+ if not isinstance(ext, str):
+ raise TypeError("ext must be a string")
+
+
+def test_recursively_get_all_filepaths_invalid_input():
+ with pytest.raises(TypeError):
+ recursively_get_all_filepaths(123, "txt") # input_folder should be a string
+ with pytest.raises(TypeError):
+ recursively_get_all_filepaths("/path/to/folder", 123) # ext should be a string
+ with pytest.raises(FileNotFoundError):
+ recursively_get_all_filepaths("/non/existent/path", "txt") # Non-existent path
+# """
+# Recursively retrieve all file paths of a specific type from a directory.
\ No newline at end of file