-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodelAndFit.py
More file actions
116 lines (98 loc) · 5.07 KB
/
modelAndFit.py
File metadata and controls
116 lines (98 loc) · 5.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
from keras import backend as K
from keras import regularizers
from keras import layers
from keras import optimizers
import datetime
import os
import bottle_functions as bf
from time import time
epochs = 30
lamda = 5E-5
batch_size = 50
# tensorboard --logdir=g:\BottleProject\logs
start_time = datetime.datetime.now() # Log the start time
# dimensions of our images.
img_width, img_height = 128, 128
train_data_dir = 'output/Dataset2/TRAIN'
validation_data_dir = 'output/Dataset2/VALIDATE'
nb_train_samples = sum(len(files) for _, _, files in os.walk(train_data_dir))
nb_validation_samples = sum(len(files) for _, _, files in os.walk(validation_data_dir))
experiment_name = "CNN_reseach4-3"
# Set up TensorBoard
callbacks = [TensorBoard(log_dir="logs/{}".format(time())),
EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=1, mode='auto', baseline=None,
restore_best_weights=False),
ModelCheckpoint(experiment_name + '.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', verbose=0,
save_best_only=True, save_weights_only=True,
mode='auto', period=1)]
print(K.image_data_format())
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
#####DEFECT MODEL START#####
model = Sequential()
# Add a dropout layer for input layer
model.add(layers.Dropout(0.2, input_shape=input_shape))
# Convolution layer: 32 filters, kernal size 3 x 3, L2 regularization
model.add(Conv2D(32, (3, 3), input_shape=input_shape, strides=2, kernel_regularizer=regularizers.l2(lamda)))
model.add(Activation('relu'))
# Pooling layer: subsampling 2 x 2, stride 2
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
# Convolution layer: 64 filters, kernal size 3 x 3, L2 regularization
model.add(Conv2D(64, (3, 3), input_shape=input_shape, kernel_regularizer=regularizers.l2(lamda)))
model.add(Activation('relu'))
# Convolution layer: 64 filters, kernal size 3 x 3, L2 regularization
model.add(Conv2D(64, (3, 3), input_shape=input_shape, kernel_regularizer=regularizers.l2(lamda)))
model.add(Activation('relu'))
# Pooling layer: subsampling 2 x 2, stride 2
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
# Convolution layer: 128 filters, kernal size 3 x 3, L2 regularization
model.add(Conv2D(128, (3, 3), input_shape=input_shape, kernel_regularizer=regularizers.l2(lamda)))
model.add(Activation('relu'))
# Convolution layer: 128 filters, kernal size 3 x 3, L2 regularization
model.add(Conv2D(128, (3, 3), input_shape=input_shape, kernel_regularizer=regularizers.l2(lamda)))
model.add(Activation('relu'))
# Pooling layer: subsampling 2 x 2, stride 2
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
# Fully connected layer: 1024 Activation Units
model.add(layers.Dense(units=1024, activation='relu'))
# Dropout layer probability 0.5
model.add(layers.Dropout(0.5))
# Fully connected layer: 1024 Activation Units
model.add(layers.Dense(units=1024, activation='relu'))
# Dropout layer probability 0.5
model.add(layers.Dropout(0.5))
# Add fully connected layer with a sigmoid activation function
model.add(layers.Dense(units=1, activation='sigmoid')) # org
print(model.summary())
######DEFECT MODEL END######
optimizer = optimizers.RMSprop(lr=0.0001, rho=0.9, epsilon=None, decay=0.0)
# optimizer = optimizers.SGD(lr=0.01, momentum=0.9, decay=0.012, nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # org
# Datagenerators
train_datagen = ImageDataGenerator(rotation_range=5, width_shift_range=0.1, height_shift_range=0.1, rescale=1. / 255,
zoom_range=0.1, horizontal_flip=False, fill_mode='nearest')
valid_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_width, img_height),
batch_size=batch_size, class_mode='binary', shuffle=True)
validation_generator = valid_datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height),
batch_size=batch_size, class_mode='binary', shuffle=True)
model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs,
validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size,
verbose=2, callbacks=callbacks)
# serialize model to JSON
model_json = model.to_json()
with open(experiment_name + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(experiment_name + '.h5')
# Finish, and print execution time
end_time = datetime.datetime.now()
print("execution time: " + str(end_time - start_time))