AI

[AI] Term Project - Image Classification

이태홍 2022. 6. 25. 02:12

서론

 - 'AI 활용 표현과 문제해결'과목을 수강하며 학습한 내용을 바탕으로 TermProject를 진행했습니다.

 - 간단한 3D이미지에 대해 분류하는 모델을 설계 또는 사용하여 이미지를 분류하는 것이 TermProject의 목표였습니다.

 

 

 

 

 

데이터 분석

 - 가방, 화분, 침대, 신발, 책상, 의자 등 11개의 카테고리 이미지 데이터 셋을 분류해야하는 문제였습니다.

 - 데이터에는 다른 정보는 크게 존재하지 않았으며 이미지만 분류하면 되는 특징이 있습니다.

 - 데이터 셋 개수가 864개로 매우 적었습니다.

 

 

 

접근법

 - 데이터 셋 개수가 매우 적어 Data Augmentation(데이터 증강)을 통해 데이터 셋의 개수를 늘려야 겠다고 생각했습니다.

 - 대량의 데이터를 이용하여 이미 학습된 모델을 더 규모가 작은 특정한 문제로 Fine-Tuning을 할 때 사용하는 전사학습을 이용했습니다.

 - 이미지 분류에 대한 문제기 때문에 매년 챌린지가 이뤄지고 있으며 우승한 모델의 구현체가 오픈소스로 구현되어 있는 ImageNet을 사용했습니다.

 - 모델의 앙상블을 통해 각각의 단점을 개선하는 방법을 선택했습니다.

 

 

https://medium.com/syncedreview/sensetime-trains-imagenet-alexnet-in-record-1-5-minutes-e944ab049b2c

 

 

 

 

실행 코드

# -*- coding: utf-8 -*-
"""Baseline _keras pretrained model

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1jTdJtTvyfz7FBRVF_eDEL0mk5aP38AI4

#Google Drive
"""

#from google.colab import drive
# drive.mount('/content/drive')

"""#Unzip Data"""

#!unzip -qq "/content/drive/MyDrive/2022/충남대학교/AI/Termproject/작업공간/Term_Dataset.zip"

"""#Pakage"""


from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Flatten, GlobalAveragePooling2D
from keras.models import Sequential
from tensorflow.python.client import device_lib
from keras.applications.efficientnet_v2 import preprocess_input, decode_predictions
from keras.applications.efficientnet_v2 import EfficientNetV2M
from pathlib import Path
import hashlib
from glob import glob
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
PATH_PREFIX = "./Term_Dataset/"
#import torch

"""#DataAnalyzing
W
## csv 내용 확인
"""

train_csv = pd.read_csv(PATH_PREFIX+"Term_Dataset/train_data.csv")
train_emp_csv = pd.read_csv(PATH_PREFIX+"Term_Dataset/train_data_emp.csv")
val_csv = pd.read_csv(PATH_PREFIX+"Term_Dataset/val_data.csv")
val_emp_csv = pd.read_csv(PATH_PREFIX+"Term_Dataset/val_data_emp.csv")

train_csv.head()

train_emp_csv.head()

val_csv.head()

val_emp_csv.head()

print(train_csv.shape, train_emp_csv.shape, val_csv.shape, val_emp_csv.shape)

train_csv.info()

train_emp_csv.info()

classes = train_csv.drop_duplicates(["class"])
num_classes = len(classes)
classes = list(classes["class"])


"""## Image 확인"""
"""
#train_image
path = PATH_PREFIX+"Term_Dataset/train"
os.chdir(path)
train_image_files = os.listdir(path)

#val_image
path = PATH_PREFIX+"Term_Dataset/val"
os.chdir(path)
val_image_files = os.listdir(path)

train_images = []
val_images = []
for file in train_image_files:
  if ".png" in file:
    train_images.append(cv2.imread(file))
for file in val_image_files:
  if ".png" in file:
    val_images.append(cv2.imread(file))

fig, ax = plt.subplots(10, 3, figsize=(20, 40))
ax = ax.flatten()

for i in range(30):
  sample_image = cv2.cvtColor(train_images[i], cv2.COLOR_BGR2RGB)
  ax[i].imshow(sample_image)

fig, ax = plt.subplots(10, 3, figsize=(20, 40))
ax = ax.flatten()

for i in range(30):
  sample_image = cv2.cvtColor(val_images[i], cv2.COLOR_BGR2RGB)
  ax[i].imshow(sample_image)





"""  # keras import

"""

from keras.applications.resnet import ResNet152
from keras.preprocessing import image
from keras.applications.resnet import preprocess_input, decode_predictions
from keras import backend as K


model = ResNet152(weights='imagenet')

fig, ax = plt.subplots(20, 2, figsize=(20, 40))
ax = ax.flatten()

for i in range(20):
  img_path = f'E:/data/Term_Dataset/Term_Dataset/train/img00{280+i}.png'

  img = image.load_img(img_path, target_size=(224, 224))

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)

  preds = model.predict(x)
  # 결과를 튜플의 리스트(클래스, 설명, 확률)로 디코딩합니다
  # (배치 내 각 샘플 당 하나의 리스트)
  #sample_image = cv2.cvtColor(val_images[i], cv2.COLOR_BGR2RGB)
  ax[i*2].imshow(img)
  
  x = np.arange(5)
  values = []
  classes = []
  for j in np.array(decode_predictions(preds, top=5)[0]):
    values.append(float(j[2]))
    classes.append(j[1])
  #print(values, classes)
  ax[(i*2+1)].bar(x, values)
  ax[(i*2+1)].set_xticks(x)
  ax[(i*2+1)].set_xticklabels(classes)
  #ax.set_xticks(range(6))
  #ax.set_xticklabels([str(x)+"foo" for x in range(6)], rotation=45)
  #ax.plot(range(6), range(6))
  
  #ax[i+1].legend()
  #ax[i+1].show()
print('Predicted:', decode_predictions(preds, top=5)[0])
"""


print(device_lib.list_local_devices())


model = EfficientNetV2M(weights='imagenet')

# num_classes = 2 #위에서 이미 num_classes를 계산해놨음

my_new_model = Sequential()
my_new_model.add(model)
#my_new_model.add(Dense(units=200,input_dim=1000, activation='relu'))
my_new_model.add(Dense(num_classes, activation='softmax'))
# Say not to train first layer (ResNet) model. It is already trained
my_new_model.layers[0].trainable = False
my_new_model.layers[1].trainable = True
#my_new_model.layers[2].trainable = True
my_new_model.compile(
    optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

"""## Fit the model"""
val_hashes = set()
for i in val_csv.iterrows():
    try:
        if not os.path.exists(PATH_PREFIX+"Term_Dataset/val/" + i[1]["class"]):
            os.makedirs(PATH_PREFIX+"Term_Dataset/val/" + i[1]["class"])
    except OSError:
        pass
    try:
        os.replace(PATH_PREFIX+"Term_Dataset/val/" +
                   i[1]["file_name"], PATH_PREFIX+"Term_Dataset/val/" + i[1]["class"] + "/" + i[1]["file_name"])
        val_hashes.add(hashlib.sha256(open(PATH_PREFIX+"Term_Dataset/val/" +
                       i[1]["class"] + "/" + i[1]["file_name"], 'rb').read()).hexdigest())
    except OSError:
        pass
for i in train_csv.iterrows():
    try:
        if not os.path.exists(PATH_PREFIX+"Term_Dataset/train/" + i[1]["class"]):
            os.makedirs(PATH_PREFIX+"Term_Dataset/train/" + i[1]["class"])
    except OSError:
        pass
    try:
        os.replace(PATH_PREFIX+"Term_Dataset/train/" +
                   i[1]["file_name"], PATH_PREFIX+"Term_Dataset/train/" + i[1]["class"] + "/" + i[1]["file_name"])
        if hashlib.sha256(open(PATH_PREFIX+"Term_Dataset/train/" + i[1]["class"] + "/" + i[1]["file_name"], 'rb').read()).hexdigest() in val_hashes:
            os.remove(PATH_PREFIX+"Term_Dataset/train/" +
                      i[1]["class"] + "/" + i[1]["file_name"])
    except OSError:
        pass

#from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
image_size = 480
data_generator = ImageDataGenerator(
    preprocessing_function=preprocess_input,
    rotation_range=30,
    zoom_range=0.15,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.15,
    horizontal_flip=True)


BATCH_SIZE = 32
EPOCH = 200 * 15
STEP = (len(train_csv) - len(val_csv)) // BATCH_SIZE

train_generator = data_generator.flow_from_directory(
    PATH_PREFIX+"/Term_Dataset/train",
    target_size=(image_size, image_size),
    batch_size=BATCH_SIZE,
    shuffle=True,
    seed=0,
    subset="training",
    class_mode='categorical')

validation_generator = data_generator.flow_from_directory(
    PATH_PREFIX+"/Term_Dataset/val",
    target_size=(image_size, image_size),
    shuffle=True,
    seed=0,
    subset="training",
    class_mode='categorical')


checkpoint = ModelCheckpoint('1_checkpoint-epoch-{epoch:04d}-val_loss-{val_loss:.5f}-accuracy-{accuracy:.5f}-val_accuracy-{val_accuracy:.5f}.h5',
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='auto'
                             )

my_new_model.load_weights('0_checkpoint-epoch-0516-val_loss-0.54913-accuracy-0.93991-val_accuracy-0.93750.h5')
# os._exit(0)
my_new_model.fit_generator(
    train_generator,
    steps_per_epoch=STEP,
    epochs=EPOCH,
    validation_data=validation_generator,
    validation_steps=1,
    callbacks=[checkpoint]
)