diff --git a/TensorFlowLiteInceptionTutorial b/TensorFlowLiteInceptionTutorial new file mode 160000 index 0000000..09ed9a2 --- /dev/null +++ b/TensorFlowLiteInceptionTutorial @@ -0,0 +1 @@ +Subproject commit 09ed9a28a3206fa7c5f79351dc907223aaa02007 diff --git a/train.py b/train.py new file mode 100644 index 0000000..56309a3 --- /dev/null +++ b/train.py @@ -0,0 +1,225 @@ +import numpy as np +from tensorflow import keras +from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dropout, Flatten, Dense +from tensorflow.keras import applications +from keras.utils.np_utils import to_categorical +import matplotlib.pyplot as plt +import math +import cv2 + +# dimensions of our images. +img_width, img_height = 224, 224 + +top_model_weights_path = 'bottleneck_fc_model.h5' +train_data_dir = 'data/train' +validation_data_dir = 'data/validation' + +# number of epochs to train top model +epochs = 50 +# batch size used by flow_from_directory and predict_generator +batch_size = 4 + + +def save_bottlebeck_features(): + # build the VGG16 network + model = applications.VGG16(include_top=True, weights='imagenet') + + datagen = ImageDataGenerator(rescale=1. / 255) + + generator = datagen.flow_from_directory( + train_data_dir, + target_size=(img_width, img_height), + batch_size=batch_size, + class_mode=None, + shuffle=False) + + print(len(generator.filenames)) + print(generator.class_indices) + print(len(generator.class_indices)) + + nb_train_samples = len(generator.filenames) + num_classes = len(generator.class_indices) + + predict_size_train = int(math.ceil(nb_train_samples / batch_size)) + + bottleneck_features_train = model.predict_generator( + generator, predict_size_train) + + np.save('bottleneck_features_train.npy', bottleneck_features_train) + + generator = datagen.flow_from_directory( + validation_data_dir, + target_size=(img_width, img_height), + batch_size=batch_size, + class_mode=None, + shuffle=False) + + nb_validation_samples = len(generator.filenames) + + predict_size_validation = int( + math.ceil(nb_validation_samples / batch_size)) + + bottleneck_features_validation = model.predict_generator( + generator, predict_size_validation) + + np.save('bottleneck_features_validation.npy', + bottleneck_features_validation) + + +def train_top_model(): + datagen_top = ImageDataGenerator(rescale=1. / 255) + generator_top = datagen_top.flow_from_directory( + train_data_dir, + target_size=(img_width, img_height), + batch_size=batch_size, + class_mode='categorical', + shuffle=False) + + nb_train_samples = len(generator_top.filenames) + num_classes = len(generator_top.class_indices) + + # save the class indices to use use later in predictions + np.save('class_indices.npy', generator_top.class_indices) + + # load the bottleneck features saved earlier + train_data = np.load('bottleneck_features_train.npy') + + # get the class lebels for the training data, in the original order + train_labels = generator_top.classes + + + train_labels = to_categorical(train_labels, num_classes=num_classes) + + generator_top = datagen_top.flow_from_directory( + validation_data_dir, + target_size=(img_width, img_height), + batch_size=batch_size, + class_mode=None, + shuffle=False) + + nb_validation_samples = len(generator_top.filenames) + + validation_data = np.load('bottleneck_features_validation.npy') + + validation_labels = generator_top.classes + validation_labels = to_categorical( + validation_labels, num_classes=num_classes) + + model = Sequential() + model.add(Flatten(input_shape=train_data.shape[1:])) + model.add(Dense(128, activation='relu')) + model.add(Dropout(0.5)) + model.add(Dense(num_classes, activation='softmax')) + #from keras.optimizers import Adam,SGD + #sgd=SGD(lr=0.01,decay=1e-6,momentum=0.9,nesterov=True) + model.summary() + model.compile(optimizer='adam', + loss='categorical_crossentropy', metrics=['acc']) + from tensorflow.keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping + checkpoint = ModelCheckpoint('vgg16_mod.h5', verbose=1, monitor='val_acc',save_best_only=True, mode='auto') + red=ReduceLROnPlateau(monitor="val_loss",factor=0.1,patience=5,mode="auto") + early=EarlyStopping(monitor="val_loss",min_delta=1e-4,patience=10,mode="auto") + cbarray=[checkpoint,red,early] + + history = model.fit(train_data, train_labels, + epochs=epochs, + batch_size=batch_size, + validation_data=(validation_data, validation_labels),callbacks=cbarray) + + #model.save_weights(top_model_weights_path) + + (eval_loss, eval_accuracy) = model.evaluate( + validation_data, validation_labels, batch_size=batch_size, verbose=1) + + print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100)) + print("[INFO] Loss: {}".format(eval_loss)) + + plt.figure(1) + + # summarize history for accuracy + + plt.subplot(211) + plt.plot(history.history['acc']) + plt.plot(history.history['val_acc']) + plt.title('model accuracy') + plt.ylabel('accuracy') + plt.xlabel('epoch') + plt.legend(['train', 'test'], loc='upper left') + + # summarize history for loss + + plt.subplot(212) + plt.plot(history.history['loss']) + plt.plot(history.history['val_loss']) + plt.title('model loss') + plt.ylabel('loss') + plt.xlabel('epoch') + plt.legend(['train', 'test'], loc='upper left') + plt.show() + + +def predict(): + # load the class_indices saved in the earlier step + class_dictionary = np.load('class_indices.npy').item() + + num_classes = len(class_dictionary) + + # add the path to your test image below + image_path = 'path/to/your/test_image' + + orig = cv2.imread(image_path) + + print("[INFO] loading and preprocessing image...") + image = load_img(image_path, target_size=(224, 224)) + image = img_to_array(image) + + # important! otherwise the predictions will be '0' + image = image / 255 + + image = np.expand_dims(image, axis=0) + + # build the VGG16 network + model = applications.VGG16(include_top=False, weights='imagenet') + + # get the bottleneck prediction from the pre-trained VGG16 model + bottleneck_prediction = model.predict(image) + + # build top model + model = Sequential() + model.add(Flatten(input_shape=bottleneck_prediction.shape[1:])) + model.add(Dense(256, activation='relu')) + model.add(Dropout(0.5)) + model.add(Dense(num_classes, activation='sigmoid')) + + model.load_weights(top_model_weights_path) + + # use the bottleneck prediction on the top model to get the final + # classification + class_predicted = model.predict_classes(bottleneck_prediction) + + probabilities = model.predict_proba(bottleneck_prediction) + + inID = class_predicted[0] + + inv_map = {v: k for k, v in class_dictionary.items()} + + label = inv_map[inID] + + # get the prediction label + print("Image ID: {}, Label: {}".format(inID, label)) + + # display the predictions with the image + cv2.putText(orig, "Predicted: {}".format(label), (10, 30), + cv2.FONT_HERSHEY_PLAIN, 1.5, (43, 99, 255), 2) + + cv2.imshow("Classification", orig) + cv2.waitKey(0) + cv2.destroyAllWindows() + + +save_bottlebeck_features() +train_top_model() + + diff --git a/train1.py b/train1.py new file mode 100644 index 0000000..9ad8eef --- /dev/null +++ b/train1.py @@ -0,0 +1,49 @@ +import tensorflow +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Convolution2D +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import Flatten +from tensorflow.keras.layers import Dense,Dropout + + + + +from keras.preprocessing.image import ImageDataGenerator + +train_datagen = ImageDataGenerator( + rescale = 1./255) + +test_datagen = ImageDataGenerator(rescale=1./255) + +training_set = train_datagen.flow_from_directory( + 'data/train', + target_size=(224, 224), + batch_size=32, + class_mode='categorical') + +test_set = test_datagen.flow_from_directory( + 'data/validation', + target_size=(224, 224), + batch_size=32, + class_mode='categorical') +print(training_set.class_indices) +print(test_set.class_indices) +from tensorflow.keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping +checkpoint = ModelCheckpoint('vgg16_mod.h5', verbose=1, monitor='val_acc',save_best_only=True, mode='auto') +red=ReduceLROnPlateau(monitor="val_loss",factor=0.1,patience=5,mode="auto") +early=EarlyStopping(monitor="val_loss",min_delta=1e-4,patience=10,mode="auto") +from tensorflow.keras.models import load_model +model = load_model('agerta.h5') +model.compile(optimizer ='adam', loss="categorical_crossentropy", metrics=["accuracy"]) +''' +model.fit_generator( + training_set, + steps_per_epoch=1000, + epochs=80, + validation_data=test_set, + validation_steps=500, + callbacks=[checkpoint,red,early]) + +''' + +model.save("model2.h5")