A.I
Data Augmentation 본문
(1) 데이터 불러오기¶
- pip install tensorflow_datasets
In [1]:
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
In [2]:
tf.config.list_physical_devices('GPU')
Out[2]:
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
In [3]:
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices(device_type="GPU")
tf.config.experimental.set_visible_devices(devices=gpus[0], device_type="GPU")
tf.config.experimental.set_memory_growth(device=gpus[0], enable=True)
In [4]:
import tensorflow as tf
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
Out[4]:
[name: "/device:CPU:0" device_type: "CPU" memory_limit: 268435456 locality { } incarnation: 4640797650663064601, name: "/device:GPU:0" device_type: "GPU" memory_limit: 7413837344 locality { bus_id: 1 links { } } incarnation: 4521566842374445615 physical_device_desc: "device: 0, name: GeForce RTX 2070, pci bus id: 0000:01:00.0, compute capability: 7.5"]
In [5]:
import urllib3
urllib3.disable_warnings()
(ds_train, ds_test), ds_info = tfds.load(
'stanford_dogs',
split=['train', 'test'],
shuffle_files=True,
with_info=True,
)
In [6]:
fig = tfds.show_examples(ds_train, ds_info)
(2) Augmentation 적용하기¶
In [7]:
# 이미지 변환 전처리함수의 예
def 전처리_함수(image, label): # 변환할 이미지와
# 이미지 변환 로직 적용
new_image = 이미지_변환(image)
return new_image, label
In [8]:
def normalize_and_resize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
image = tf.image.resize(image, [224, 224])
return tf.cast(image, tf.float32) / 255., label
In [9]:
def augment(image,label):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.2)
return image,label
In [10]:
# 데이터셋(ds)을 가공하는 메인함수
def apply_normalize_on_dataset(ds, is_test=False, batch_size=16, with_aug=False):
ds = ds.map(
normalize_and_resize_img, # 기본적인 전처리 함수 적용
num_parallel_calls=2
)
if not is_test and with_aug:
ds = ds.map(
augment, # augment 함수 적용
num_parallel_calls=2
)
ds = ds.batch(batch_size)
# 테스트셋에는 미적용시키기위함
if not is_test:
ds = ds.repeat()
ds = ds.shuffle(200)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
In [11]:
def augment2(image,label):
image = tf.image.central_crop(image, np.random.uniform(0.50, 1.00)) # 50%의 확률로 이미지 가운데 부분을 crop합니다.
image = tf.image.resize(image, INPUT_SHAPE) # crop한 이미지를 원본 사이즈로 resize
return image, label
(3) 비교실험 하기¶
In [12]:
# 텐서플로우의 imagenet에 훈련된 모델
num_classes = ds_info.features["label"].num_classes
resnet50 = keras.models.Sequential([
keras.applications.resnet.ResNet50(
# 마지막 fully connected layer를 포함할지 여부
# 해당 레이어를 포함하지 않고 생성하면 특성 추출기(feature extractor) 부분만 불러와
# 필요에 맞게 수정된 fully connected layer를 붙여서 활용 가능
include_top=False,
weights='imagenet',
input_shape=(224, 224,3),
pooling='avg',
),
keras.layers.Dense(num_classes, activation = 'softmax')
])
In [13]:
# Augmentation을 적용한 데이터셋으로 학습시킬 ResNet
aug_resnet50 = keras.models.Sequential([
keras.applications.resnet.ResNet50(
include_top=False,
weights='imagenet',
input_shape=(224, 224,3),
pooling='avg',
),
keras.layers.Dense(num_classes, activation = 'softmax')
])
In [14]:
(ds_train, ds_test), ds_info = tfds.load(
'stanford_dogs',
split=['train', 'test'],
as_supervised=True,
shuffle_files=True,
with_info=True,
)
# 학습셋 나누기
ds_train_no_aug = apply_normalize_on_dataset(ds_train, with_aug=False)
ds_train_aug = apply_normalize_on_dataset(ds_train, with_aug=True)
ds_test = apply_normalize_on_dataset(ds_test, is_test = True)
In [15]:
#EPOCH = 20 # Augentation 적용 효과를 확인하기 위해 필요한 epoch 수
EPOCH = 20
tf.random.set_seed(2020)
resnet50.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'],
)
aug_resnet50.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'],
)
history_resnet50_no_aug = resnet50.fit(
ds_train_no_aug, # augmentation 적용하지 않은 데이터셋 사용
steps_per_epoch=int(ds_info.splits['train'].num_examples/16),
validation_steps=int(ds_info.splits['test'].num_examples/16),
epochs=EPOCH,
validation_data=ds_test,
verbose=1,
use_multiprocessing=True,
)
history_resnet50_aug = aug_resnet50.fit(
ds_train_aug, # augmentation 적용한 데이터셋 사용
steps_per_epoch=int(ds_info.splits['train'].num_examples/16),
validation_steps=int(ds_info.splits['test'].num_examples/16),
epochs=EPOCH,
validation_data=ds_test,
verbose=1,
use_multiprocessing=True,
)
Epoch 1/20 750/750 [==============================] - 137s 145ms/step - loss: 3.1905 - accuracy: 0.2725 - val_loss: 3.5296 - val_accuracy: 0.1692 Epoch 2/20 750/750 [==============================] - 109s 145ms/step - loss: 0.9483 - accuracy: 0.7333 - val_loss: 1.1462 - val_accuracy: 0.6712 Epoch 3/20 750/750 [==============================] - 109s 145ms/step - loss: 0.3754 - accuracy: 0.9058 - val_loss: 1.0590 - val_accuracy: 0.6999 Epoch 4/20 750/750 [==============================] - 109s 145ms/step - loss: 0.1357 - accuracy: 0.9782 - val_loss: 0.9207 - val_accuracy: 0.7373 Epoch 5/20 750/750 [==============================] - 109s 145ms/step - loss: 0.0579 - accuracy: 0.9947 - val_loss: 0.9414 - val_accuracy: 0.7354 Epoch 6/20 750/750 [==============================] - 109s 145ms/step - loss: 0.0348 - accuracy: 0.9970 - val_loss: 0.9449 - val_accuracy: 0.7414 Epoch 7/20 750/750 [==============================] - 109s 145ms/step - loss: 0.0206 - accuracy: 0.9990 - val_loss: 0.9482 - val_accuracy: 0.7366 Epoch 8/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0205 - accuracy: 0.9978 - val_loss: 0.9503 - val_accuracy: 0.7423 Epoch 9/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0119 - accuracy: 0.9990 - val_loss: 0.9584 - val_accuracy: 0.7435 Epoch 10/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0091 - accuracy: 0.9995 - val_loss: 0.9698 - val_accuracy: 0.7438 Epoch 11/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0056 - accuracy: 0.9999 - val_loss: 0.9716 - val_accuracy: 0.7445 Epoch 12/20 750/750 [==============================] - 109s 145ms/step - loss: 0.0039 - accuracy: 0.9999 - val_loss: 0.9758 - val_accuracy: 0.7443 Epoch 13/20 750/750 [==============================] - 109s 145ms/step - loss: 0.0056 - accuracy: 0.9998 - val_loss: 0.9940 - val_accuracy: 0.7411 Epoch 14/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0062 - accuracy: 0.9999 - val_loss: 0.9977 - val_accuracy: 0.7400 Epoch 15/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0040 - accuracy: 0.9999 - val_loss: 1.0016 - val_accuracy: 0.7451 Epoch 16/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 1.0056 - val_accuracy: 0.7441 Epoch 17/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0045 - accuracy: 1.0000 - val_loss: 1.0088 - val_accuracy: 0.7438 Epoch 18/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0039 - accuracy: 0.9995 - val_loss: 1.0136 - val_accuracy: 0.7449 Epoch 19/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.0190 - val_accuracy: 0.7470 Epoch 20/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0030 - accuracy: 1.0000 - val_loss: 1.0297 - val_accuracy: 0.7443 Epoch 1/20 750/750 [==============================] - 114s 146ms/step - loss: 3.1880 - accuracy: 0.2748 - val_loss: 2.6019 - val_accuracy: 0.3291 Epoch 2/20 750/750 [==============================] - 109s 146ms/step - loss: 1.1019 - accuracy: 0.6869 - val_loss: 1.3639 - val_accuracy: 0.6143 Epoch 3/20 750/750 [==============================] - 109s 146ms/step - loss: 0.6110 - accuracy: 0.8318 - val_loss: 1.2002 - val_accuracy: 0.6572 Epoch 4/20 750/750 [==============================] - 109s 146ms/step - loss: 0.3459 - accuracy: 0.9077 - val_loss: 1.0021 - val_accuracy: 0.7093 Epoch 5/20 750/750 [==============================] - 109s 146ms/step - loss: 0.1866 - accuracy: 0.9598 - val_loss: 1.0915 - val_accuracy: 0.7015 Epoch 6/20 750/750 [==============================] - 109s 146ms/step - loss: 0.1313 - accuracy: 0.9725 - val_loss: 0.9971 - val_accuracy: 0.7240 Epoch 7/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0776 - accuracy: 0.9879 - val_loss: 0.9725 - val_accuracy: 0.7355 Epoch 8/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0572 - accuracy: 0.9913 - val_loss: 0.9905 - val_accuracy: 0.7337 Epoch 9/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0431 - accuracy: 0.9939 - val_loss: 0.9529 - val_accuracy: 0.7452 Epoch 10/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0348 - accuracy: 0.9951 - val_loss: 1.0291 - val_accuracy: 0.7287 Epoch 11/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0260 - accuracy: 0.9971 - val_loss: 0.9537 - val_accuracy: 0.7478 Epoch 12/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0180 - accuracy: 0.9979 - val_loss: 0.9408 - val_accuracy: 0.7568 Epoch 13/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0092 - accuracy: 0.9992 - val_loss: 0.9422 - val_accuracy: 0.7570 Epoch 14/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0072 - accuracy: 0.9994 - val_loss: 0.9704 - val_accuracy: 0.7543 Epoch 15/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0060 - accuracy: 0.9995 - val_loss: 0.9562 - val_accuracy: 0.7626 Epoch 16/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0075 - accuracy: 0.9991 - val_loss: 0.9602 - val_accuracy: 0.7601 Epoch 17/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0048 - accuracy: 0.9998 - val_loss: 0.9737 - val_accuracy: 0.7583 Epoch 18/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0087 - accuracy: 0.9993 - val_loss: 0.9724 - val_accuracy: 0.7577 Epoch 19/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0079 - accuracy: 0.9992 - val_loss: 0.9876 - val_accuracy: 0.7549 Epoch 20/20 750/750 [==============================] - 109s 146ms/step - loss: 0.0081 - accuracy: 0.9988 - val_loss: 0.9885 - val_accuracy: 0.7604
In [16]:
plt.plot(history_resnet50_no_aug.history['val_accuracy'], 'r')
plt.plot(history_resnet50_aug.history['val_accuracy'], 'b')
plt.title('Model validation accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['No Augmentation', 'With Augmentation'], loc='upper left')
plt.show()
In [32]:
plt.plot(history_resnet50_no_aug.history['val_accuracy'], 'r')
plt.plot(history_resnet50_aug.history['val_accuracy'], 'b')
plt.title('Model validation accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['No Augmentation', 'With Augmentation'], loc='upper left')
plt.grid(True)
#plt.ylim(0.50, 0.80) # 출력하고자 하는 Accuracy 범위를 지정해 주세요.
plt.ylim(0.72, 0.76) # EPOCH=20으로 진행한다면 이 범위가 적당합니다.
plt.show()
1) 이미지 섞기¶
In [18]:
import matplotlib.pyplot as plt
# 데이터셋에서 이미지 2개를 가져옵니다.
for i, (image, label) in enumerate(ds_train_no_aug.take(1)):
if i == 0:
image_a = image[0]
image_b = image[1]
label_a = label[0]
label_b = label[1]
break
plt.subplot(1,2,1)
plt.imshow(image_a)
plt.subplot(1,2,2)
plt.imshow(image_b)
Out[18]:
<matplotlib.image.AxesImage at 0x7f53d56816d0>
In [19]:
# 리사이즈를 하지않게 임의지정
def get_clip_box(image_a, image_b):
# image.shape = (height, width, channel)
image_size_x = image_a.shape[1]
image_size_y = image_a.shape[0]
# get center of box
x = tf.cast( tf.random.uniform([],0, image_size_x),tf.int32)
y = tf.cast( tf.random.uniform([],0, image_size_y),tf.int32)
# get width, height of box
width = tf.cast(image_size_x * tf.math.sqrt(1-tf.random.uniform([],0,1)),tf.int32)
height = tf.cast(image_size_y * tf.math.sqrt(1-tf.random.uniform([],0,1)),tf.int32)
# clip box in image and get minmax bbox
xa = tf.math.maximum(0, x-width//2)
ya = tf.math.maximum(0, y-height//2)
xb = tf.math.minimum(image_size_x, x+width//2)
yb = tf.math.minimum(image_size_y, y+width//2)
return xa, ya, xb, yb
xa, ya, xb, yb = get_clip_box(image_a, image_b)
print(xa, ya, xb, yb)
tf.Tensor(65, shape=(), dtype=int32) tf.Tensor(88, shape=(), dtype=int32) tf.Tensor(224, shape=(), dtype=int32) tf.Tensor(224, shape=(), dtype=int32)
In [20]:
# mix two images
def mix_2_images(image_a, image_b, xa, ya, xb, yb):
image_size_x = image_a.shape[1]
image_size_y = image_a.shape[0]
one = image_a[ya:yb,0:xa,:]
two = image_b[ya:yb,xa:xb,:]
three = image_a[ya:yb,xb:image_size_x,:]
middle = tf.concat([one,two,three],axis=1)
top = image_a[0:ya,:,:]
bottom = image_a[yb:image_size_y,:,:]
mixed_img = tf.concat([top, middle, bottom],axis=0)
return mixed_img
mixed_img = mix_2_images(image_a, image_b, xa, ya, xb, yb)
plt.imshow(mixed_img.numpy())
Out[20]:
<matplotlib.image.AxesImage at 0x7f53fafebc90>
2) 라벨 섞기¶
In [21]:
# mix two labels
def mix_2_label(label_a, label_b, xa, ya, xb, yb, num_classes=120):
image_size_x = image_a.shape[1]
image_size_y = image_a.shape[0]
mixed_area = (xb-xa)*(yb-ya)
total_area = image_size_x*image_size_y
a = tf.cast(mixed_area/total_area, tf.float32)
if len(label_a.shape)==0:
label_a = tf.one_hot(label_a, num_classes)
if len(label_b.shape)==0:
label_b = tf.one_hot(label_b, num_classes)
mixed_label = (1-a)*label_a + a*label_b
return mixed_label
mixed_label = mix_2_label(label_a, label_b, xa, ya, xb, yb)
mixed_label
Out[21]:
<tf.Tensor: shape=(120,), dtype=float32, numpy= array([0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.56903696, 0. , 0. , 0.430963 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], dtype=float32)>
In [22]:
# CutMix에서는 면적에 비례해서 라벨을 섞어줍니다
def cutmix(image, label, prob = 1.0, batch_size=16, img_size=224, num_classes=120):
mixed_imgs = []
mixed_labels = []
for i in range(batch_size):
image_a = image[i]
label_a = label[i]
j = tf.cast(tf.random.uniform([],0, batch_size),tf.int32)
image_b = image[j]
label_b = label[j]
xa, ya, xb, yb = get_clip_box(image_a, image_b)
mixed_imgs.append(mix_2_images(image_a, image_b, xa, ya, xb, yb))
mixed_labels.append(mix_2_label(label_a, label_b, xa, ya, xb, yb))
mixed_imgs = tf.reshape(tf.stack(mixed_imgs),(batch_size, img_size, img_size, 3))
mixed_labels = tf.reshape(tf.stack(mixed_labels),(batch_size, num_classes))
return mixed_imgs, mixed_label
심화 기법 (2) Mixup Augmentation¶
- mixup: Beyond Empirical Risk Minimization
- 두 개 이미지의 픽셀별 값을 비율에 따라 섞어주는 방식
In [23]:
# function for mixup
def mixup_2_images(image_a, image_b, label_a, label_b):
a = tf.random.uniform([],0,1)
if len(label_a.shape)==0:
label_a = tf.one_hot(label_a, num_classes)
if len(label_b.shape)==0:
label_b = tf.one_hot(label_b, num_classes)
mixed_image= (1-a)*image_a + a*image_b
mixed_label = (1-a)*label_a + a*label_b
return mixed_image, mixed_label
mixed_img, mixed_label = mixup_2_images(image_a, image_b, label_a, label_b)
plt.imshow(mixed_img.numpy())
print(mixed_label)
tf.Tensor( [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.5529437 0. 0. 0.4470563 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ], shape=(120,), dtype=float32)
In [24]:
def mixup(image, label, prob = 1.0, batch_size=16, img_size=224, num_classes=120):
mixed_imgs = []
mixed_labels = []
for i in range(batch_size):
image_a = image[i]
label_a = label[i]
j = tf.cast(tf.random.uniform([],0, batch_size),tf.int32)
image_b = image[j]
label_b = label[j]
mixed_img, mixed_label = mixup_2_images(image_a, image_b, label_a, label_b)
mixed_imgs.append(mixed_img)
mixed_labels.append(mixed_label)
mixed_imgs = tf.reshape(tf.stack(mixed_imgs),(batch_size, img_size, img_size, 3))
mixed_labels = tf.reshape(tf.stack(mixed_labels),(batch_size, num_classes))
return mixed_imgs, mixed_labels
프로젝트: CutMix 또는 Mixup 비교실험 하기¶
1. Augmentation을 적용한 데이터셋 만들기¶
In [25]:
import urllib3
urllib3.disable_warnings()
(ds_train, ds_test), ds_info = tfds.load(
'stanford_dogs',
split=['train', 'test'],
shuffle_files=True,
with_info=True,
)
In [26]:
# CutMix를 적용할 수 있도록 변경한 apply_normalize_on_dataset()
def normalize_and_resize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
image = tf.image.resize(image, [224, 224])
return tf.cast(image, tf.float32) / 255., label
def augment(image,label):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.2)
return image,label
def apply_normalize_on_dataset(ds, is_test=False, batch_size=16, with_aug=False, with_cutmix=False):
ds = ds.map(
normalize_and_resize_img,
num_parallel_calls=2
)
if not is_test and with_aug:
ds = ds.map(
augment
)
ds = ds.batch(batch_size)
if not is_test and with_cutmix:
ds = ds.map(
cutmix,
num_parallel_calls=2
)
else:
ds = ds.map(
mixup,
num_parallel_calls=2
)
if not is_test:
ds = ds.repeat()
ds = ds.shuffle(200)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
2. 모델 만들기¶
In [27]:
cutmix_resnet50 = keras.models.Sequential([
keras.applications.resnet.ResNet50(
include_top=False,
weights='imagenet',
input_shape=(224, 224,3),
pooling='avg',
),
keras.layers.Dense(num_classes, activation = 'softmax')
])
In [28]:
mixup_resnet50 = keras.models.Sequential([
keras.applications.resnet.ResNet50(
include_top=False,
weights='imagenet',
input_shape=(224, 224,3),
pooling='avg',
),
keras.layers.Dense(num_classes, activation = 'softmax')
])
3. 모델 훈련하기¶
In [29]:
(ds_train, ds_test), ds_info = tfds.load(
'stanford_dogs',
split=['train', 'test'],
as_supervised=True,
shuffle_files=True,
with_info=True,
)
# 학습셋 나누기
ds_train_cutmix = apply_normalize_on_dataset(ds_train,is_test=True, with_cutmix=True)
ds_train_mixup = apply_normalize_on_dataset(ds_train, is_test=True, with_cutmix=False)
ds_test = apply_normalize_on_dataset(ds_test, is_test = True)
WARNING:tensorflow:AutoGraph could not transform <function mixup at 0x7f549e81e560> and will run it as-is. Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: module 'gast' has no attribute 'Index' To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
WARNING:tensorflow:AutoGraph could not transform <function mixup at 0x7f549e81e560> and will run it as-is. Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: module 'gast' has no attribute 'Index' To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
WARNING: AutoGraph could not transform <function mixup at 0x7f549e81e560> and will run it as-is. Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: module 'gast' has no attribute 'Index' To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
In [30]:
#EPOCH = 20 # Augentation 적용 효과를 확인하기 위해 필요한 epoch 수
EPOCH = 20
tf.random.set_seed(2020)
cutmix_resnet50.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'],
)
mixup_resnet50.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'],
)
history_cutmix_resnet50 = cutmix_resnet50.fit(
ds_train_cutmix,
steps_per_epoch=int(ds_info.splits['train'].num_examples/16),
validation_steps=int(ds_info.splits['test'].num_examples/16),
epochs=EPOCH,
validation_data=ds_test,
verbose=1,
use_multiprocessing=True,
)
history_mixup_resnet50 = mixup_resnet50.fit(
ds_train_mixup,
steps_per_epoch=int(ds_info.splits['train'].num_examples/16),
validation_steps=int(ds_info.splits['test'].num_examples/16),
epochs=EPOCH,
validation_data=ds_test,
verbose=1,
use_multiprocessing=True,
)
Epoch 1/20 750/750 [==============================] - 118s 153ms/step - loss: 4.2783 - accuracy: 0.1263 - val_loss: 4.6710 - val_accuracy: 0.0541 Epoch 2/20 750/750 [==============================] - 114s 153ms/step - loss: 2.9702 - accuracy: 0.4400 - val_loss: 2.9085 - val_accuracy: 0.4588 Epoch 3/20 750/750 [==============================] - 115s 153ms/step - loss: 2.6406 - accuracy: 0.5622 - val_loss: 3.0083 - val_accuracy: 0.4222 Epoch 4/20 750/750 [==============================] - 115s 153ms/step - loss: 2.4447 - accuracy: 0.6347 - val_loss: 2.6702 - val_accuracy: 0.5295 Epoch 5/20 750/750 [==============================] - 115s 153ms/step - loss: 2.3282 - accuracy: 0.6842 - val_loss: 2.6349 - val_accuracy: 0.5415 Epoch 6/20 750/750 [==============================] - 115s 153ms/step - loss: 2.1983 - accuracy: 0.7283 - val_loss: 2.6396 - val_accuracy: 0.5433 Epoch 7/20 750/750 [==============================] - 115s 153ms/step - loss: 2.1331 - accuracy: 0.7530 - val_loss: 2.7183 - val_accuracy: 0.5147 Epoch 8/20 750/750 [==============================] - 115s 153ms/step - loss: 2.0424 - accuracy: 0.7804 - val_loss: 2.7055 - val_accuracy: 0.5239 Epoch 9/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8977 - accuracy: 0.8318 - val_loss: 2.6394 - val_accuracy: 0.5406 Epoch 10/20 750/750 [==============================] - 115s 153ms/step - loss: 1.9538 - accuracy: 0.8100 - val_loss: 2.6763 - val_accuracy: 0.5340 Epoch 11/20 750/750 [==============================] - 115s 153ms/step - loss: 1.9100 - accuracy: 0.8224 - val_loss: 2.6946 - val_accuracy: 0.5297 Epoch 12/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8462 - accuracy: 0.8406 - val_loss: 2.5605 - val_accuracy: 0.5584 Epoch 13/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8587 - accuracy: 0.8425 - val_loss: 2.5909 - val_accuracy: 0.5573 Epoch 14/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8541 - accuracy: 0.8455 - val_loss: 2.5904 - val_accuracy: 0.5638 Epoch 15/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8241 - accuracy: 0.8486 - val_loss: 2.6787 - val_accuracy: 0.5251 Epoch 16/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7114 - accuracy: 0.8691 - val_loss: 2.5305 - val_accuracy: 0.5744 Epoch 17/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7685 - accuracy: 0.8552 - val_loss: 2.7430 - val_accuracy: 0.5209 Epoch 18/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7081 - accuracy: 0.8684 - val_loss: 2.5510 - val_accuracy: 0.5592 Epoch 19/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7015 - accuracy: 0.8645 - val_loss: 2.5992 - val_accuracy: 0.5466 Epoch 20/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7244 - accuracy: 0.8665 - val_loss: 2.5935 - val_accuracy: 0.5465 Epoch 1/20 750/750 [==============================] - 118s 154ms/step - loss: 4.2763 - accuracy: 0.1319 - val_loss: 4.5706 - val_accuracy: 0.0637 Epoch 2/20 750/750 [==============================] - 115s 153ms/step - loss: 2.9653 - accuracy: 0.4442 - val_loss: 3.0655 - val_accuracy: 0.4075 Epoch 3/20 750/750 [==============================] - 115s 153ms/step - loss: 2.6578 - accuracy: 0.5492 - val_loss: 2.8867 - val_accuracy: 0.4614 Epoch 4/20 750/750 [==============================] - 115s 153ms/step - loss: 2.4683 - accuracy: 0.6216 - val_loss: 2.7633 - val_accuracy: 0.5126 Epoch 5/20 750/750 [==============================] - 115s 153ms/step - loss: 2.3068 - accuracy: 0.6859 - val_loss: 2.6278 - val_accuracy: 0.5484 Epoch 6/20 750/750 [==============================] - 115s 153ms/step - loss: 2.1871 - accuracy: 0.7358 - val_loss: 2.6503 - val_accuracy: 0.5441 Epoch 7/20 750/750 [==============================] - 115s 153ms/step - loss: 2.1237 - accuracy: 0.7462 - val_loss: 2.6326 - val_accuracy: 0.5457 Epoch 8/20 750/750 [==============================] - 115s 153ms/step - loss: 2.0426 - accuracy: 0.7839 - val_loss: 2.6521 - val_accuracy: 0.5459 Epoch 9/20 750/750 [==============================] - 115s 153ms/step - loss: 1.6512 - accuracy: 0.8945 - val_loss: 2.6510 - val_accuracy: 0.5433 Epoch 10/20 750/750 [==============================] - 115s 153ms/step - loss: 1.9797 - accuracy: 0.8071 - val_loss: 2.6535 - val_accuracy: 0.5380 Epoch 11/20 750/750 [==============================] - 115s 153ms/step - loss: 1.9122 - accuracy: 0.8184 - val_loss: 2.6226 - val_accuracy: 0.5351 Epoch 12/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8678 - accuracy: 0.8348 - val_loss: 2.6479 - val_accuracy: 0.5368 Epoch 13/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7584 - accuracy: 0.8600 - val_loss: 2.7000 - val_accuracy: 0.5227 Epoch 14/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8587 - accuracy: 0.8294 - val_loss: 2.5869 - val_accuracy: 0.5581 Epoch 15/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8163 - accuracy: 0.8381 - val_loss: 2.5649 - val_accuracy: 0.5576 Epoch 16/20 750/750 [==============================] - 115s 153ms/step - loss: 1.8031 - accuracy: 0.8390 - val_loss: 2.5626 - val_accuracy: 0.5557 Epoch 17/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7699 - accuracy: 0.8584 - val_loss: 2.5385 - val_accuracy: 0.5677 Epoch 18/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7439 - accuracy: 0.8582 - val_loss: 2.5928 - val_accuracy: 0.5524 Epoch 19/20 750/750 [==============================] - 115s 153ms/step - loss: 1.6256 - accuracy: 0.8868 - val_loss: 2.5360 - val_accuracy: 0.5625 Epoch 20/20 750/750 [==============================] - 115s 153ms/step - loss: 1.7101 - accuracy: 0.8648 - val_loss: 2.6807 - val_accuracy: 0.5244
In [31]:
plt.plot(history_resnet50_no_aug.history['val_accuracy'], 'r')
plt.plot(history_resnet50_aug.history['val_accuracy'], 'b')
plt.plot(history_cutmix_resnet50.history['val_accuracy'], 'g')
plt.plot(history_mixup_resnet50.history['val_accuracy'], color='violet')
plt.title('Model validation accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['No Augmentation', 'With Augmentation','cutmix','mixup'], loc='upper left')
plt.show()
정리¶
- Augmentation 적용시 Augmentaion 미적용보다 성능이 향상됨을 확인함
'Going Deeper' 카테고리의 다른 글
Segmentation (0) | 2021.04.15 |
---|---|
RetinaNet으로 자율주행 시스템 만들기 (0) | 2021.04.13 |
Object detection (0) | 2021.04.12 |
GAN을 이용한 augmentation 기법 논문 (0) | 2021.04.08 |
Image Augmentation (0) | 2021.04.08 |