Skip to content

Commit

Permalink
Spelling corrections.
Browse files Browse the repository at this point in the history
  • Loading branch information
hlgirard committed Sep 25, 2019
1 parent 4b9896a commit a00121f
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 109 deletions.
164 changes: 67 additions & 97 deletions notebooks/SegmentationDropletsInCapillary_hlg_4_openCV.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion src/crystal_processing/process_image_folder.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def process_image(image_path, crop_box, model, save_overlay=False):
crop_box: (minRow, maxRow, minCol, maxCol)
Cropping box to select the region of interest
model: tensorflow model
Instance of a tensorflow model trained to discriminate droples containing crystals vs. clear
Instance of a tensorflow model trained to discriminate droplets containing crystals vs. clear
save_overlay: bool, optional
Save an image with green / red overlays for drops containing crystals / empty to `image_path / overlay`
Expand Down
4 changes: 2 additions & 2 deletions src/data/segment_droplets.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def segment_skimage(img, exp_clip_limit=0.06, closing_disk_radius=4, rm_holes_ar
img: numpy.ndarray
Array representing the greyscale values (0-255) of an image cropped to show only the droplets region
exp_clip_limit: float [0-1], optional
clip_limit parameter for adaptive equalisation
clip_limit parameter for adaptive equalization
closing_disk_radius: int, optional
diamater of selection disk for the closing function
rm_holes_area: int, optional
Expand Down Expand Up @@ -96,7 +96,7 @@ def segment(img, exp_clip_limit=15):
img: numpy.ndarray
Array representing the greyscale values (0-255) of an image cropped to show only the droplets region
exp_clip_limit: float [0-1], optional
clip_limit parameter for adaptive equalisation
clip_limit parameter for adaptive equalization
Returns
-------
Expand Down
17 changes: 8 additions & 9 deletions src/models/train/cnn_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,18 +163,16 @@ def train_cnn_simple_from_directory(training_directory, bTensorboard):
validation_split=0.2)


# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
# Training generator
train_generator = train_datagen.flow_from_directory(
training_directory, # this is the target directory
target_size=(150, 150), # all images will be resized to 150x225
training_directory,
target_size=(150, 150), # all images will be resized to 150x150
batch_size=batch_size,
color_mode='grayscale',
class_mode='binary', # since we use binary_crossentropy loss, we need binary labels
class_mode='binary',
subset='training')

# this is a similar generator, for validation data
# Validation generator
validation_generator = train_datagen.flow_from_directory(
training_directory,
target_size=(150, 150),
Expand Down Expand Up @@ -208,7 +206,8 @@ def train_cnn_simple_from_directory(training_directory, bTensorboard):
logging.info("Saving model JSON file to: %s", model_path)
with open(model_path, "w") as json_file:
json_file.write(model_json)
# Save weigths

# Save weights
model_weights_path = pkg_resources.resource_filename('models', "cnn-simple-model-{}.h5".format(time()))
logging.info("Saving model weights to %s", model_weights_path)
model.save_weights(model_weights_path)
Expand All @@ -225,4 +224,4 @@ def train_cnn_simple_from_directory(training_directory, bTensorboard):
target_names = ['Clear', 'Crystal']
print(classification_report(validation_generator.classes, y_pred, target_names=target_names))
except ImportError:
logging.info("sklearn is required to print confucion matrix and classification report.")
logging.info("sklearn is required to print confusion matrix and classification report.")

0 comments on commit a00121f

Please sign in to comment.