Donate. I desperately need donations to survive due to my health

Get paid by answering surveys Click here

Click here to donate

Remote/Work from Home jobs

CNN not converging

I used Kaggle's Dog Breed competition (obsolete, but data still available) as an exercise: [https://www.kaggle.com/c/dog-breed-identification]

Below is the code of my Keras NN, I use CNN and ImageGenerators. Originally images are in all_images folder, then (if corresponding lines are uncommented) the script copies them to train, valid and test folders.

It all is almost "by the book", except the NN does not converge, and I can not understand why.

I have a feeling it is something simple and stupid. Will appreciate any suggestions.

Thank you.

import datetime as dt
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
import numpy as np
import os
import sys
import random
import warnings
from sklearn.model_selection import train_test_split
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
import shutil
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator

from keras.models import load_model

warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
np.random.seed(7)

start = dt.datetime.now()

BATCH_SIZE = 32
EPOCHS = 100
TESTING_SPLIT=0.3   # 70/30 %

NUM_CLASSES = 120
IM_SIZE = 128

# ---

# 000bec180eb18c7604dcecc8fe0dba07  boston_bull
# 001513dfcb2ffafc82cccf4d8bbaba97  dingo
labels = pd.read_csv('data/labels.csv')
print(labels.head())

# ---

train_ids, valid_ids = train_test_split(labels, 
    test_size = TESTING_SPLIT) # , stratify = balanced_train_df['ships'])

print(len(train_ids), 'train ids', len(valid_ids), 'validation ids')
print('Total', len(labels), 'testing images')

# ---

def copyFileSet(strDirFrom, strDirTo, arrFileNames):
    arrBreeds = np.asarray(arrFileNames['breed'])
    arrFileNames = np.asarray(arrFileNames['id'])

    if not os.path.exists(strDirTo):
        os.makedirs(strDirTo)

    for i in tqdm(range(len(arrFileNames))):
        strFileNameFrom = strDirFrom + arrFileNames[i] + ".jpg"
        strFileNameTo = strDirTo + arrBreeds[i] + "/" + arrFileNames[i] + ".jpg"

        if not os.path.exists(strDirTo + arrBreeds[i] + "/"):
            os.makedirs(strDirTo + arrBreeds[i] + "/")

            # As a new breed dir is created, copy 1st file to "test" under name of that breed
            if not os.path.exists("data/test/"):
                os.makedirs("data/test/")

            strFileNameTo = "data/test/" + arrBreeds[i] + ".jpg"
            shutil.copy(strFileNameFrom, strFileNameTo)

        shutil.copy(strFileNameFrom, strFileNameTo) 




# Move the data in subfolders so we can use the Keras ImageDataGenerator. 
# This way we can also later use Keras Data augmentation features.

# --- Uncomment once, to copy files ---
#copyFileSet("data/all_images/", "data/train/", train_ids)
#copyFileSet("data/all_images/", "data/valid/", valid_ids)

# ---

def preprocess(img):
    img = cv2.resize(img, (IM_SIZE, IM_SIZE), interpolation = cv2.INTER_AREA)
    input_img = preprocess_input(np.expand_dims(img, axis=0))
    return input_img[0]

train_datagen = ImageDataGenerator(preprocessing_function=preprocess)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess)

train_gen = train_datagen.flow_from_directory("data/train/", 
                                              batch_size=BATCH_SIZE, 
                                              target_size=(IM_SIZE, IM_SIZE), 
                                              shuffle=False)

val_gen = val_datagen.flow_from_directory("data/valid/", 
                                          batch_size=BATCH_SIZE, 
                                          target_size=(IM_SIZE, IM_SIZE), 
                                          shuffle=False)

# ---

if not os.path.exists("models/model.h5"):
    input_shape = (IM_SIZE, IM_SIZE, 3)
    model = Sequential()
    model.add(Conv2D(8, kernel_size=(3, 3), activation='relu',
        input_shape=input_shape))
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(Dropout(0.1))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(Dropout(0.1))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Dropout(0.1))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(NUM_CLASSES, activation='softmax'))
    model.compile(loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.RMSprop(lr=0.0001), #keras.optimizers.Adam(lr=0.002),
        metrics=['accuracy'])
else:
    model = load_model("models/model.h5")

# ---

checkpoint = ModelCheckpoint("models/model.h5", monitor='val_loss', 
    verbose=1, save_best_only=True, mode='min', save_weights_only=False)
#reduceLROnPlat = ReduceLROnPlateau(monitor='loss', factor=0.33,
#   patience=1, verbose=1, mode='min',
#   min_delta=0.0001, cooldown=0, min_lr=1e-8)
earlyStopping = EarlyStopping(
    monitor='loss', patience=20, verbose=0, mode='auto', restore_best_weights=True)

callbacks_list = [ 
    checkpoint, 
    earlyStopping
    #, reduceLROnPlat
    ]

# ---

breeds = np.unique(labels['breed'])
map_characters = {} #{0:'none'}
for i in range(len(breeds)):
    map_characters[i] = breeds[i]

# ---

STEP_SIZE_TRAIN=train_gen.n//train_gen.batch_size
STEP_SIZE_VALID=val_gen.n//val_gen.batch_size

history = model.fit_generator(generator=train_gen,
    steps_per_epoch=STEP_SIZE_TRAIN,
    validation_data=val_gen,
    validation_steps=STEP_SIZE_VALID,
    epochs=EPOCHS,
    callbacks=callbacks_list)

# ---

plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

# --- Test

j = 0
for file_name in os.listdir("data/test/"):
    img = image.load_img("data/test/" + file_name);

    img_1 = image.img_to_array(img)
    img_1 = cv2.resize(img_1, (IM_SIZE, IM_SIZE), interpolation = cv2.INTER_AREA)
    img_1 = np.expand_dims(img_1, axis=0)

    y_pred = model.predict(img_1)[0]

    print(y_pred)

    # get 5 max predictions
    y_pred_ids = y_pred.argsort()[-5:][::-1]

    print(y_pred_ids)

    for i in range(len(y_pred_ids)):
        print(file_name + " >>> " + map_characters[y_pred_ids[i]] + " (" + 
            str(y_pred[y_pred_ids[i]]) + ")")

    print("--------------------\n")

    j = j + 1
    if j > 5:
        break



img = image.load_img('data/test/australian_terrier.jpg')
img_1 = image.img_to_array(img)
img_1 = cv2.resize(img_1, (IM_SIZE, IM_SIZE), interpolation = cv2.INTER_AREA)
img_1 = np.expand_dims(img_1, axis=0)

y_pred = model.predict(img_1)
Y_pred_classes = np.argmax(y_pred,axis = 1) 
print(y_pred)

fig, ax = plt.subplots()
ax.imshow(img) 
ax.axis('off')
ax.set_title(map_characters[Y_pred_classes[0]])
plt.show()

Comments