-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
- Loading branch information
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
from keras import applications,Model | ||
from keras.preprocessing.image import ImageDataGenerator | ||
from keras import optimizers | ||
from keras.models import Sequential | ||
from keras.layers import Dropout, Flatten, Dense | ||
|
||
# path to the model weights files. | ||
weights_path = 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5' | ||
# dimensions of our images. | ||
img_width, img_height = 150, 150 | ||
|
||
train_data_dir = 'train' | ||
validation_data_dir = 'validation' | ||
nb_train_samples = 97 | ||
nb_validation_samples = 23 | ||
epochs = 50 | ||
batch_size = 2 | ||
|
||
# build the VGG16 network | ||
model = applications.VGG16(weights=weights_path, include_top=False, input_shape=(150,150,3)) | ||
print('Model loaded.') | ||
|
||
# build a classifier model to put on top of the convolutional model | ||
top_model = Sequential() | ||
top_model.add(Flatten(input_shape=model.output_shape[1:])) | ||
top_model.add(Dense(256, activation='relu')) | ||
top_model.add(Dropout(0.5)) | ||
top_model.add(Dense(1, activation='sigmoid')) | ||
|
||
# note that it is necessary to start with a fully-trained | ||
# classifier, including the top classifier, | ||
# in order to successfully do fine-tuning | ||
|
||
# add the model on top of the convolutional base | ||
#model.add(top_model) | ||
x=model.output | ||
x=Flatten(input_shape=model.output_shape[1:])(x) | ||
x=Dropout(0.5)(x) | ||
x=Dense(1, activation='sigmoid')(x) | ||
model = Model(model.input, x) | ||
# set the first 25 layers (up to the last conv block) | ||
# to non-trainable (weights will not be updated) | ||
for layer in model.layers[:25]: | ||
layer.trainable = False | ||
|
||
# compile the model with a SGD/momentum optimizer | ||
# and a very slow learning rate. | ||
model.compile(loss='binary_crossentropy', | ||
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), | ||
metrics=['accuracy']) | ||
|
||
# prepare data augmentation configuration | ||
train_datagen = ImageDataGenerator( | ||
rescale=1. / 255, | ||
shear_range=0.2, | ||
zoom_range=0.2, | ||
horizontal_flip=True) | ||
|
||
test_datagen = ImageDataGenerator(rescale=1. / 255) | ||
|
||
train_generator = train_datagen.flow_from_directory( | ||
train_data_dir, | ||
target_size=(img_height, img_width), | ||
batch_size=batch_size, | ||
class_mode='binary') | ||
|
||
validation_generator = test_datagen.flow_from_directory( | ||
validation_data_dir, | ||
target_size=(img_height, img_width), | ||
batch_size=batch_size, | ||
class_mode='binary') | ||
|
||
# fine-tune the model | ||
model.fit_generator( | ||
train_generator, | ||
samples_per_epoch=nb_train_samples, | ||
epochs=epochs, | ||
validation_data=validation_generator, | ||
nb_val_samples=nb_validation_samples) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
from keras.models import Sequential | ||
from keras.layers import Conv2D, MaxPooling2D | ||
from keras.layers import Activation, Dropout, Flatten, Dense | ||
from keras.preprocessing.image import ImageDataGenerator | ||
model = Sequential() | ||
model.add(Conv2D(32, (3, 3), input_shape=(150, 150, 3))) | ||
model.add(Activation('relu')) | ||
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th")) | ||
|
||
model.add(Conv2D(32, (3, 3), dim_ordering="th")) | ||
model.add(Activation('relu')) | ||
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th")) | ||
|
||
model.add(Conv2D(64, (3, 3), dim_ordering="th")) | ||
model.add(Activation('relu')) | ||
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th")) | ||
|
||
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors | ||
model.add(Dense(64)) | ||
model.add(Activation('relu')) | ||
model.add(Dropout(0.5)) | ||
model.add(Dense(1)) | ||
model.add(Activation('sigmoid')) | ||
|
||
model.compile(loss='binary_crossentropy', | ||
optimizer='rmsprop', | ||
metrics=['accuracy']) | ||
|
||
batch_size = 16 | ||
|
||
# this is the augmentation configuration we will use for training | ||
train_datagen = ImageDataGenerator( | ||
rescale=1./255, | ||
shear_range=0.2, | ||
zoom_range=0.2, | ||
horizontal_flip=True) | ||
|
||
# this is the augmentation configuration we will use for testing: | ||
# only rescaling | ||
test_datagen = ImageDataGenerator(rescale=1./255) | ||
|
||
# this is a generator that will read pictures found in | ||
# subfolers of 'data/train', and indefinitely generate | ||
# batches of augmented image data | ||
train_generator = train_datagen.flow_from_directory( | ||
'train', # this is the target directory | ||
target_size=(150, 150), # all images will be resized to 150x150 | ||
batch_size=batch_size, | ||
class_mode='binary') # since we use binary_crossentropy loss, we need binary labels | ||
labels = (train_generator.class_indices) | ||
labels = dict((v,k) for k,v in labels.items()) | ||
# this is a similar generator, for validation data | ||
validation_generator = test_datagen.flow_from_directory( | ||
'validation', | ||
target_size=(150, 150), | ||
batch_size=batch_size, | ||
class_mode='binary') | ||
|
||
model.fit_generator( | ||
train_generator, | ||
steps_per_epoch=2000 // batch_size, | ||
epochs=50, | ||
validation_data=validation_generator, | ||
validation_steps=800 // batch_size) | ||
|
||
model.save_weights('first_try.h5') # always save your weights after training or during training |