사용할 라이브러리 정의¶
In [ ]:
"""
- 패션MNIST 데이터 읽어들이기(훈련 및 테스트 데이터)
- 정규화하기
- 훈련 및 검증으로 분류하기
"""
In [2]:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
In [10]:
(train_input, train_target), (test_input, test_target) =\
keras.datasets.fashion_mnist.load_data()
print(train_input.shape, train_target.shape)
print(test_input.shape, test_target.shape)
(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)
In [11]:
train_scaled =train_input/255.0
test_scaled = test_input/255.0
In [12]:
train_scaled, val_scaled, train_target, val_target = \
train_test_split(train_scaled, train_target,
test_size=0.2,
random_state=42)
In [13]:
print(train_scaled.shape, train_target.shape)
print(val_scaled.shape, val_target.shape)
print(test_scaled.shape, test_target.shape)
(48000, 28, 28) (48000,)
(12000, 28, 28) (12000,)
(10000, 28, 28) (10000,)
심층신경망(Deep Neural Network, DNN)¶
In [ ]:
"""
* 인공신경망(Artificial Neural Network , ANN)
- 계층이 1개인 경우 또는 은닉계층이 없는 경우
* 심층신경망(Deep Neural Network, DNN)
- 은닉계층을 가지고 있는 경우
"""
모델 생성 시키는 함수 생성하기¶
In [50]:
"""
- 함수이름 : model_fn
--> 매개변수 : a_layer
-----> 은닉계층이 있는 경우 계층자체를 매개변수로 받아서 아래에서 추가
- 모델생성
- 입력층(1차원 전처리계층) 추가
- 100개의 출력을 담당하는 은닉계층 추가, 활성화 함수 relu
- 추가할 은닉계층이 있으면 추가, 없으면 건너띄기
- 출력층
- 모델 반환
"""
def model_fn(a_layer = None) :
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
model.add(keras.layers.Dense(100, activation="relu"))
if a_layer :
model.add(a_layer)
model.add(keras.layers.Dense(10, activation="softmax"))
return model
In [51]:
""" 함수 호출하기 """
model5 = model_fn()
model5
Out[51]:
<keras.engine.sequential.Sequential at 0x2969b838f40>
In [27]:
"""
param = (입력크기 * 출력크기) + 출력크기
78500 = (784*100) + 100
"""
model5.summary()
Model: "sequential_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_3 (Flatten) (None, 784) 0
dense_2 (Dense) (None, 100) 78500
dense_3 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
In [52]:
""" 모델 설정하기(compile) """
model5.compile(loss = "sparse_categorical_crossentropy", metrics= "accuracy")
In [34]:
model5
Out[34]:
<keras.engine.sequential.Sequential at 0x2969a52c9a0>
In [56]:
model5.fit(train_scaled, train_target, epochs=10)
Epoch 1/10
1500/1500 [==============================] - 1s 829us/step - loss: 0.3079 - accuracy: 0.8905
Epoch 2/10
1500/1500 [==============================] - 1s 765us/step - loss: 0.2979 - accuracy: 0.8949
Epoch 3/10
1500/1500 [==============================] - 1s 779us/step - loss: 0.2896 - accuracy: 0.8971
Epoch 4/10
1500/1500 [==============================] - 1s 768us/step - loss: 0.2828 - accuracy: 0.9004
Epoch 5/10
1500/1500 [==============================] - 1s 797us/step - loss: 0.2750 - accuracy: 0.9018
Epoch 6/10
1500/1500 [==============================] - 1s 808us/step - loss: 0.2718 - accuracy: 0.9049
Epoch 7/10
1500/1500 [==============================] - 1s 804us/step - loss: 0.2643 - accuracy: 0.9070
Epoch 8/10
1500/1500 [==============================] - 1s 769us/step - loss: 0.2610 - accuracy: 0.9095
Epoch 9/10
1500/1500 [==============================] - 1s 824us/step - loss: 0.2567 - accuracy: 0.9118
Epoch 10/10
1500/1500 [==============================] - 1s 814us/step - loss: 0.2500 - accuracy: 0.9133
Out[56]:
<keras.callbacks.History at 0x2969d4f1e50>
In [59]:
"""훈련 출력방법 지정
- verbose : 출력방법 지정
: 0은 아무것도 안나옴
: 1은 프로그래스바와 함께 loss와 accuracy
: 2는 loss이 accuracy만 출력(프로그래스바 안나옴)
: 기본값은 1
"""
history5 = model5.fit(train_scaled, train_target, epochs=5, verbose=1)
print("훈련 끝>>>>>>>>>>>>>>>")
Epoch 1/5
1500/1500 [==============================] - 1s 794us/step - loss: 0.2459 - accuracy: 0.9154
Epoch 2/5
1500/1500 [==============================] - 1s 774us/step - loss: 0.2431 - accuracy: 0.9167
Epoch 3/5
1500/1500 [==============================] - 1s 765us/step - loss: 0.2374 - accuracy: 0.9172
Epoch 4/5
1500/1500 [==============================] - 1s 769us/step - loss: 0.2345 - accuracy: 0.9188
Epoch 5/5
1500/1500 [==============================] - 1s 791us/step - loss: 0.2345 - accuracy: 0.9194
훈련 끝>>>>>>>>>>>>>>>
In [60]:
history5.epoch
Out[60]:
[0, 1, 2, 3, 4]
In [61]:
history5.history
Out[61]:
{'loss': [0.2459409534931183,
0.24312011897563934,
0.23739585280418396,
0.23448622226715088,
0.2344704419374466],
'accuracy': [0.9153749942779541,
0.9167291522026062,
0.917187511920929,
0.918833315372467,
0.9193958044052124]}
In [62]:
""" 시각화 하기 """
import matplotlib.pyplot as plt
plt.title("Epoch5 - Loss")
plt.plot(history5.epoch, history5.history["loss"])
plt.xlabel("epoch")
plt.ylabel("loss")
plt.grid()
plt.savefig("./saveFig/Epoch5-loss.png")
plt.show()
In [63]:
""" 정확도 시각화 하기 """
import matplotlib.pyplot as plt
plt.title("Epoch5 - Accuracy")
plt.plot(history5.epoch, history5.history["accuracy"])
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.savefig("./saveFig/Epoch5-accuracy.png")
plt.show()
In [ ]:
"""
- 새로운 모델 생성 : model20 변수명 사용
- epoch 20번 수행
- 프로그래스바가 보이도록 훈련시 출력하기
- 손실 및 정확도 곡선 각각 그리기
"""
In [68]:
(train_input, train_target), (test_input, test_target) =\
keras.datasets.fashion_mnist.load_data()
print(train_input.shape, train_target.shape)
print(test_input.shape, test_target.shape)
(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)
In [69]:
train_scaled =train_input/255.0
test_scaled = test_input/255.0
In [70]:
train_scaled, val_scaled, train_target, val_target = \
train_test_split(train_scaled, train_target,
test_size=0.2,
random_state=42)
In [71]:
print(train_scaled.shape, train_target.shape)
print(val_scaled.shape, val_target.shape)
print(test_scaled.shape, test_target.shape)
(48000, 28, 28) (48000,)
(12000, 28, 28) (12000,)
(10000, 28, 28) (10000,)
In [75]:
model20 = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
# model20
model20.compile(loss = "sparse_categorical_crossentropy", metrics= "accuracy")
history20 = model20.fit(train_scaled, train_target, epochs=20, validation_data=(val_scaled, val_target), verbose=1)
Epoch 1/20
1500/1500 [==============================] - 2s 958us/step - loss: 0.5306 - accuracy: 0.8138 - val_loss: 0.4586 - val_accuracy: 0.8409
Epoch 2/20
1500/1500 [==============================] - 1s 880us/step - loss: 0.3957 - accuracy: 0.8585 - val_loss: 0.3924 - val_accuracy: 0.8591
Epoch 3/20
1500/1500 [==============================] - 1s 909us/step - loss: 0.3564 - accuracy: 0.8728 - val_loss: 0.3668 - val_accuracy: 0.8716
Epoch 4/20
1500/1500 [==============================] - 1s 886us/step - loss: 0.3325 - accuracy: 0.8811 - val_loss: 0.3410 - val_accuracy: 0.8796
Epoch 5/20
1500/1500 [==============================] - 1s 908us/step - loss: 0.3213 - accuracy: 0.8840 - val_loss: 0.3647 - val_accuracy: 0.8758
Epoch 6/20
1500/1500 [==============================] - 1s 885us/step - loss: 0.3075 - accuracy: 0.8906 - val_loss: 0.3565 - val_accuracy: 0.8814
Epoch 7/20
1500/1500 [==============================] - 1s 922us/step - loss: 0.2993 - accuracy: 0.8934 - val_loss: 0.3710 - val_accuracy: 0.8776
Epoch 8/20
1500/1500 [==============================] - 1s 906us/step - loss: 0.2918 - accuracy: 0.8964 - val_loss: 0.3726 - val_accuracy: 0.8770
Epoch 9/20
1500/1500 [==============================] - 1s 891us/step - loss: 0.2848 - accuracy: 0.8997 - val_loss: 0.3734 - val_accuracy: 0.8813
Epoch 10/20
1500/1500 [==============================] - 1s 891us/step - loss: 0.2778 - accuracy: 0.9021 - val_loss: 0.3642 - val_accuracy: 0.8863
Epoch 11/20
1500/1500 [==============================] - 1s 908us/step - loss: 0.2716 - accuracy: 0.9051 - val_loss: 0.4111 - val_accuracy: 0.8766
Epoch 12/20
1500/1500 [==============================] - 1s 910us/step - loss: 0.2683 - accuracy: 0.9061 - val_loss: 0.4441 - val_accuracy: 0.8728
Epoch 13/20
1500/1500 [==============================] - 1s 997us/step - loss: 0.2604 - accuracy: 0.9094 - val_loss: 0.4008 - val_accuracy: 0.8832
Epoch 14/20
1500/1500 [==============================] - 1s 952us/step - loss: 0.2576 - accuracy: 0.9117 - val_loss: 0.3796 - val_accuracy: 0.8907
Epoch 15/20
1500/1500 [==============================] - 1s 902us/step - loss: 0.2507 - accuracy: 0.9128 - val_loss: 0.4155 - val_accuracy: 0.8816
Epoch 16/20
1500/1500 [==============================] - 1s 926us/step - loss: 0.2475 - accuracy: 0.9137 - val_loss: 0.4325 - val_accuracy: 0.8809
Epoch 17/20
1500/1500 [==============================] - 1s 891us/step - loss: 0.2425 - accuracy: 0.9163 - val_loss: 0.4243 - val_accuracy: 0.8882
Epoch 18/20
1500/1500 [==============================] - 1s 921us/step - loss: 0.2398 - accuracy: 0.9181 - val_loss: 0.4661 - val_accuracy: 0.8783
Epoch 19/20
1500/1500 [==============================] - 1s 886us/step - loss: 0.2345 - accuracy: 0.9176 - val_loss: 0.4214 - val_accuracy: 0.8874
Epoch 20/20
1500/1500 [==============================] - 1s 917us/step - loss: 0.2307 - accuracy: 0.9183 - val_loss: 0.4328 - val_accuracy: 0.8865
In [79]:
import matplotlib.pyplot as plt
plt.title("Epoch5 - Loss")
plt.plot(history5.epoch, history5.history["loss"])
plt.xlabel("epoch")
plt.ylabel("loss")
plt.grid()
plt.savefig("./saveFig/Epoch5-loss.png")
plt.show()
In [78]:
plt.title("Epoch5 - Accuracy")
plt.plot(history5.epoch, history5.history["accuracy"])
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.savefig("./saveFig/Epoch5-accuracy.png")
plt.show()
In [87]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Epoch5 - Loss")
plt.plot(history5.epoch, history5.history["loss"])
plt.xlabel("epoch")
plt.ylabel("loss")
# plt.grid()
plt.subplot(1, 2, 2)
plt.title("Epoch5 - Accuracy")
plt.plot(history5.epoch, history5.history["accuracy"])
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
# plt.legend(["loss","accuracy"])
plt.show()
In [88]:
model = model_fn()
model
Out[88]:
<keras.engine.sequential.Sequential at 0x296c31ef490>
In [89]:
model.compile(loss = "sparse_categorical_crossentropy", metrics= "accuracy")
In [90]:
history = model.fit(
train_scaled, train_target, epochs=20, verbose=1,
validation_data=(val_scaled, val_target)
)
Epoch 1/20
1500/1500 [==============================] - 2s 954us/step - loss: 0.5328 - accuracy: 0.8139 - val_loss: 0.4325 - val_accuracy: 0.8443
Epoch 2/20
1500/1500 [==============================] - 1s 923us/step - loss: 0.3928 - accuracy: 0.8588 - val_loss: 0.4704 - val_accuracy: 0.8332
Epoch 3/20
1500/1500 [==============================] - 1s 918us/step - loss: 0.3557 - accuracy: 0.8728 - val_loss: 0.3914 - val_accuracy: 0.8646
Epoch 4/20
1500/1500 [==============================] - 1s 943us/step - loss: 0.3332 - accuracy: 0.8816 - val_loss: 0.3918 - val_accuracy: 0.8658
Epoch 5/20
1500/1500 [==============================] - 1s 897us/step - loss: 0.3184 - accuracy: 0.8862 - val_loss: 0.4095 - val_accuracy: 0.8635
Epoch 6/20
1500/1500 [==============================] - 1s 901us/step - loss: 0.3078 - accuracy: 0.8905 - val_loss: 0.3482 - val_accuracy: 0.8837
Epoch 7/20
1500/1500 [==============================] - 1s 943us/step - loss: 0.2982 - accuracy: 0.8950 - val_loss: 0.3798 - val_accuracy: 0.8740
Epoch 8/20
1500/1500 [==============================] - 1s 901us/step - loss: 0.2914 - accuracy: 0.8979 - val_loss: 0.3577 - val_accuracy: 0.8856
Epoch 9/20
1500/1500 [==============================] - 1s 927us/step - loss: 0.2818 - accuracy: 0.9013 - val_loss: 0.4019 - val_accuracy: 0.8773
Epoch 10/20
1500/1500 [==============================] - 1s 899us/step - loss: 0.2785 - accuracy: 0.9033 - val_loss: 0.4168 - val_accuracy: 0.8777
Epoch 11/20
1500/1500 [==============================] - 1s 935us/step - loss: 0.2694 - accuracy: 0.9050 - val_loss: 0.4011 - val_accuracy: 0.8798
Epoch 12/20
1500/1500 [==============================] - 1s 902us/step - loss: 0.2638 - accuracy: 0.9076 - val_loss: 0.4039 - val_accuracy: 0.8811
Epoch 13/20
1500/1500 [==============================] - 1s 900us/step - loss: 0.2596 - accuracy: 0.9091 - val_loss: 0.4164 - val_accuracy: 0.8774
Epoch 14/20
1500/1500 [==============================] - 1s 903us/step - loss: 0.2564 - accuracy: 0.9115 - val_loss: 0.3921 - val_accuracy: 0.8846
Epoch 15/20
1500/1500 [==============================] - 1s 962us/step - loss: 0.2506 - accuracy: 0.9136 - val_loss: 0.4210 - val_accuracy: 0.8797
Epoch 16/20
1500/1500 [==============================] - 1s 907us/step - loss: 0.2457 - accuracy: 0.9142 - val_loss: 0.4201 - val_accuracy: 0.8835
Epoch 17/20
1500/1500 [==============================] - 1s 957us/step - loss: 0.2422 - accuracy: 0.9164 - val_loss: 0.4208 - val_accuracy: 0.8852
Epoch 18/20
1500/1500 [==============================] - 1s 955us/step - loss: 0.2389 - accuracy: 0.9166 - val_loss: 0.4626 - val_accuracy: 0.8778
Epoch 19/20
1500/1500 [==============================] - 1s 923us/step - loss: 0.2344 - accuracy: 0.9193 - val_loss: 0.4643 - val_accuracy: 0.8826
Epoch 20/20
1500/1500 [==============================] - 1s 911us/step - loss: 0.2313 - accuracy: 0.9206 - val_loss: 0.5061 - val_accuracy: 0.8782
In [91]:
history.history.keys()
Out[91]:
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
In [ ]:
"""
- 훈련과 검증에 대한 손실(loss)곡선을 그려주세요
"""
In [ ]:
"""
- 훈련과 검증에 대한 정확도(accuracy)곡선을 그려주세요
"""
In [99]:
plt.figure(figsize=(12,4))
plt.subplot(1, 2, 1)
plt.plot(history.epoch, history.history["loss"])
plt.plot(history.epoch, history.history["val_loss"])
plt.xlabel("epoch")
plt.legend(["loss","val_loss"])
plt.show()
In [100]:
plt.title("Epoch5 - Accuracy")
plt.plot(history.epoch, history.history["accuracy"])
plt.plot(history.epoch, history.history["val_accuracy"])
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.legend(["accuracy","val_accuracy"])
Out[100]:
<matplotlib.legend.Legend at 0x296c3820400>
In [106]:
"""
- model 변수명으로 신규 모델 만들기
--> 옵티마이저 adam 사용
- 훈련 및 검증 동시에 훈련시킨 후 손실 및 검증 곡선 그려서 과적합 여부 확인하기..
"""
model = model_fn()
model
Out[106]:
<keras.engine.sequential.Sequential at 0x296c26a2a00>
In [107]:
model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy'])
In [108]:
history = model.fit(train_scaled, train_target, epochs=200, verbose=1,
validation_data=(val_scaled,val_target))
Epoch 1/200
1500/1500 [==============================] - 2s 911us/step - loss: 0.5238 - accuracy: 0.8159 - val_loss: 0.4110 - val_accuracy: 0.8562
Epoch 2/200
1500/1500 [==============================] - 1s 838us/step - loss: 0.3970 - accuracy: 0.8589 - val_loss: 0.3777 - val_accuracy: 0.8644
Epoch 3/200
1500/1500 [==============================] - 1s 838us/step - loss: 0.3530 - accuracy: 0.8727 - val_loss: 0.3760 - val_accuracy: 0.8616
Epoch 4/200
1500/1500 [==============================] - 1s 816us/step - loss: 0.3253 - accuracy: 0.8830 - val_loss: 0.3537 - val_accuracy: 0.8724
Epoch 5/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.3094 - accuracy: 0.8875 - val_loss: 0.3515 - val_accuracy: 0.8722
Epoch 6/200
1500/1500 [==============================] - 1s 822us/step - loss: 0.2923 - accuracy: 0.8934 - val_loss: 0.3247 - val_accuracy: 0.8840
Epoch 7/200
1500/1500 [==============================] - 1s 821us/step - loss: 0.2791 - accuracy: 0.8978 - val_loss: 0.3156 - val_accuracy: 0.8893
Epoch 8/200
1500/1500 [==============================] - 1s 830us/step - loss: 0.2666 - accuracy: 0.9012 - val_loss: 0.3203 - val_accuracy: 0.8855
Epoch 9/200
1500/1500 [==============================] - 1s 892us/step - loss: 0.2572 - accuracy: 0.9050 - val_loss: 0.3433 - val_accuracy: 0.8787
Epoch 10/200
1500/1500 [==============================] - 1s 844us/step - loss: 0.2501 - accuracy: 0.9074 - val_loss: 0.3254 - val_accuracy: 0.8849
Epoch 11/200
1500/1500 [==============================] - 1s 835us/step - loss: 0.2413 - accuracy: 0.9114 - val_loss: 0.3259 - val_accuracy: 0.8870
Epoch 12/200
1500/1500 [==============================] - 1s 821us/step - loss: 0.2344 - accuracy: 0.9135 - val_loss: 0.3271 - val_accuracy: 0.8864
Epoch 13/200
1500/1500 [==============================] - 1s 842us/step - loss: 0.2271 - accuracy: 0.9142 - val_loss: 0.3306 - val_accuracy: 0.8892
Epoch 14/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.2221 - accuracy: 0.9174 - val_loss: 0.3524 - val_accuracy: 0.8777
Epoch 15/200
1500/1500 [==============================] - 1s 829us/step - loss: 0.2154 - accuracy: 0.9194 - val_loss: 0.3268 - val_accuracy: 0.8878
Epoch 16/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.2061 - accuracy: 0.9229 - val_loss: 0.3292 - val_accuracy: 0.8903
Epoch 17/200
1500/1500 [==============================] - 1s 846us/step - loss: 0.2036 - accuracy: 0.9241 - val_loss: 0.3225 - val_accuracy: 0.8901
Epoch 18/200
1500/1500 [==============================] - 1s 852us/step - loss: 0.1959 - accuracy: 0.9266 - val_loss: 0.3369 - val_accuracy: 0.8888
Epoch 19/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.1935 - accuracy: 0.9281 - val_loss: 0.3348 - val_accuracy: 0.8899
Epoch 20/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.1865 - accuracy: 0.9311 - val_loss: 0.3439 - val_accuracy: 0.8914
Epoch 21/200
1500/1500 [==============================] - 1s 837us/step - loss: 0.1856 - accuracy: 0.9304 - val_loss: 0.3578 - val_accuracy: 0.8882
Epoch 22/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.1792 - accuracy: 0.9336 - val_loss: 0.3477 - val_accuracy: 0.8899
Epoch 23/200
1500/1500 [==============================] - 1s 823us/step - loss: 0.1727 - accuracy: 0.9359 - val_loss: 0.3568 - val_accuracy: 0.8870
Epoch 24/200
1500/1500 [==============================] - 1s 840us/step - loss: 0.1715 - accuracy: 0.9364 - val_loss: 0.3561 - val_accuracy: 0.8895
Epoch 25/200
1500/1500 [==============================] - 1s 846us/step - loss: 0.1670 - accuracy: 0.9371 - val_loss: 0.3645 - val_accuracy: 0.8863
Epoch 26/200
1500/1500 [==============================] - 1s 854us/step - loss: 0.1616 - accuracy: 0.9399 - val_loss: 0.3645 - val_accuracy: 0.8899
Epoch 27/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.1614 - accuracy: 0.9401 - val_loss: 0.3790 - val_accuracy: 0.8876
Epoch 28/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.1570 - accuracy: 0.9410 - val_loss: 0.3898 - val_accuracy: 0.8871
Epoch 29/200
1500/1500 [==============================] - 1s 852us/step - loss: 0.1529 - accuracy: 0.9430 - val_loss: 0.3708 - val_accuracy: 0.8893
Epoch 30/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.1505 - accuracy: 0.9442 - val_loss: 0.3923 - val_accuracy: 0.8903
Epoch 31/200
1500/1500 [==============================] - 1s 835us/step - loss: 0.1465 - accuracy: 0.9453 - val_loss: 0.3852 - val_accuracy: 0.8878
Epoch 32/200
1500/1500 [==============================] - 1s 859us/step - loss: 0.1457 - accuracy: 0.9457 - val_loss: 0.3896 - val_accuracy: 0.8934
Epoch 33/200
1500/1500 [==============================] - 1s 853us/step - loss: 0.1390 - accuracy: 0.9486 - val_loss: 0.4006 - val_accuracy: 0.8863
Epoch 34/200
1500/1500 [==============================] - 1s 850us/step - loss: 0.1354 - accuracy: 0.9504 - val_loss: 0.3890 - val_accuracy: 0.8942
Epoch 35/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.1371 - accuracy: 0.9487 - val_loss: 0.4408 - val_accuracy: 0.8837
Epoch 36/200
1500/1500 [==============================] - 1s 821us/step - loss: 0.1324 - accuracy: 0.9512 - val_loss: 0.3994 - val_accuracy: 0.8905
Epoch 37/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.1306 - accuracy: 0.9509 - val_loss: 0.4188 - val_accuracy: 0.8861
Epoch 38/200
1500/1500 [==============================] - 1s 824us/step - loss: 0.1282 - accuracy: 0.9526 - val_loss: 0.3964 - val_accuracy: 0.8926
Epoch 39/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.1230 - accuracy: 0.9550 - val_loss: 0.4168 - val_accuracy: 0.8927
Epoch 40/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.1210 - accuracy: 0.9548 - val_loss: 0.4343 - val_accuracy: 0.8866
Epoch 41/200
1500/1500 [==============================] - 1s 863us/step - loss: 0.1187 - accuracy: 0.9554 - val_loss: 0.4244 - val_accuracy: 0.8932
Epoch 42/200
1500/1500 [==============================] - 1s 838us/step - loss: 0.1184 - accuracy: 0.9550 - val_loss: 0.4367 - val_accuracy: 0.8867
Epoch 43/200
1500/1500 [==============================] - 1s 849us/step - loss: 0.1125 - accuracy: 0.9586 - val_loss: 0.4379 - val_accuracy: 0.8930
Epoch 44/200
1500/1500 [==============================] - 1s 867us/step - loss: 0.1148 - accuracy: 0.9572 - val_loss: 0.4239 - val_accuracy: 0.8893
Epoch 45/200
1500/1500 [==============================] - 1s 864us/step - loss: 0.1111 - accuracy: 0.9587 - val_loss: 0.4545 - val_accuracy: 0.8883
Epoch 46/200
1500/1500 [==============================] - 1s 822us/step - loss: 0.1115 - accuracy: 0.9587 - val_loss: 0.4454 - val_accuracy: 0.8865
Epoch 47/200
1500/1500 [==============================] - 1s 832us/step - loss: 0.1060 - accuracy: 0.9609 - val_loss: 0.4548 - val_accuracy: 0.8848
Epoch 48/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.1051 - accuracy: 0.9609 - val_loss: 0.4593 - val_accuracy: 0.8916
Epoch 49/200
1500/1500 [==============================] - 1s 852us/step - loss: 0.1043 - accuracy: 0.9612 - val_loss: 0.4616 - val_accuracy: 0.8885
Epoch 50/200
1500/1500 [==============================] - 1s 863us/step - loss: 0.1015 - accuracy: 0.9614 - val_loss: 0.4946 - val_accuracy: 0.8905
Epoch 51/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.1016 - accuracy: 0.9620 - val_loss: 0.4767 - val_accuracy: 0.8877
Epoch 52/200
1500/1500 [==============================] - 1s 825us/step - loss: 0.0982 - accuracy: 0.9634 - val_loss: 0.4818 - val_accuracy: 0.8933
Epoch 53/200
1500/1500 [==============================] - 1s 847us/step - loss: 0.0985 - accuracy: 0.9629 - val_loss: 0.5315 - val_accuracy: 0.8866
Epoch 54/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0965 - accuracy: 0.9635 - val_loss: 0.5005 - val_accuracy: 0.8869
Epoch 55/200
1500/1500 [==============================] - 1s 824us/step - loss: 0.0933 - accuracy: 0.9652 - val_loss: 0.5039 - val_accuracy: 0.8902
Epoch 56/200
1500/1500 [==============================] - 1s 830us/step - loss: 0.0925 - accuracy: 0.9661 - val_loss: 0.5167 - val_accuracy: 0.8859
Epoch 57/200
1500/1500 [==============================] - 1s 847us/step - loss: 0.0884 - accuracy: 0.9674 - val_loss: 0.5207 - val_accuracy: 0.8887
Epoch 58/200
1500/1500 [==============================] - 1s 837us/step - loss: 0.0914 - accuracy: 0.9660 - val_loss: 0.5485 - val_accuracy: 0.8892
Epoch 59/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.0899 - accuracy: 0.9660 - val_loss: 0.5379 - val_accuracy: 0.8893
Epoch 60/200
1500/1500 [==============================] - 1s 835us/step - loss: 0.0857 - accuracy: 0.9694 - val_loss: 0.5243 - val_accuracy: 0.8867
Epoch 61/200
1500/1500 [==============================] - 1s 851us/step - loss: 0.0884 - accuracy: 0.9671 - val_loss: 0.5273 - val_accuracy: 0.8923
Epoch 62/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.0867 - accuracy: 0.9671 - val_loss: 0.5382 - val_accuracy: 0.8896
Epoch 63/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0825 - accuracy: 0.9696 - val_loss: 0.5658 - val_accuracy: 0.8862
Epoch 64/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0828 - accuracy: 0.9688 - val_loss: 0.5427 - val_accuracy: 0.8884
Epoch 65/200
1500/1500 [==============================] - 1s 840us/step - loss: 0.0813 - accuracy: 0.9698 - val_loss: 0.5649 - val_accuracy: 0.8827
Epoch 66/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.0813 - accuracy: 0.9698 - val_loss: 0.5548 - val_accuracy: 0.8903
Epoch 67/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.0789 - accuracy: 0.9713 - val_loss: 0.5526 - val_accuracy: 0.8888
Epoch 68/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.0779 - accuracy: 0.9708 - val_loss: 0.5769 - val_accuracy: 0.8869
Epoch 69/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.0750 - accuracy: 0.9717 - val_loss: 0.5686 - val_accuracy: 0.8894
Epoch 70/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0753 - accuracy: 0.9722 - val_loss: 0.5456 - val_accuracy: 0.8917
Epoch 71/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0745 - accuracy: 0.9721 - val_loss: 0.5929 - val_accuracy: 0.8883
Epoch 72/200
1500/1500 [==============================] - 1s 843us/step - loss: 0.0723 - accuracy: 0.9738 - val_loss: 0.5944 - val_accuracy: 0.8905
Epoch 73/200
1500/1500 [==============================] - 1s 850us/step - loss: 0.0711 - accuracy: 0.9742 - val_loss: 0.6374 - val_accuracy: 0.8828
Epoch 74/200
1500/1500 [==============================] - 1s 835us/step - loss: 0.0744 - accuracy: 0.9727 - val_loss: 0.6135 - val_accuracy: 0.8848
Epoch 75/200
1500/1500 [==============================] - 1s 821us/step - loss: 0.0708 - accuracy: 0.9734 - val_loss: 0.6106 - val_accuracy: 0.8845
Epoch 76/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.0675 - accuracy: 0.9752 - val_loss: 0.6190 - val_accuracy: 0.8869
Epoch 77/200
1500/1500 [==============================] - 1s 832us/step - loss: 0.0699 - accuracy: 0.9736 - val_loss: 0.6240 - val_accuracy: 0.8865
Epoch 78/200
1500/1500 [==============================] - 1s 818us/step - loss: 0.0694 - accuracy: 0.9736 - val_loss: 0.6581 - val_accuracy: 0.8873
Epoch 79/200
1500/1500 [==============================] - 1s 812us/step - loss: 0.0661 - accuracy: 0.9758 - val_loss: 0.6601 - val_accuracy: 0.8855
Epoch 80/200
1500/1500 [==============================] - 1s 861us/step - loss: 0.0666 - accuracy: 0.9750 - val_loss: 0.6389 - val_accuracy: 0.8839
Epoch 81/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.0643 - accuracy: 0.9766 - val_loss: 0.6329 - val_accuracy: 0.8901
Epoch 82/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0635 - accuracy: 0.9773 - val_loss: 0.6439 - val_accuracy: 0.8907
Epoch 83/200
1500/1500 [==============================] - 1s 837us/step - loss: 0.0648 - accuracy: 0.9761 - val_loss: 0.6574 - val_accuracy: 0.8872
Epoch 84/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.0642 - accuracy: 0.9767 - val_loss: 0.6540 - val_accuracy: 0.8886
Epoch 85/200
1500/1500 [==============================] - 1s 872us/step - loss: 0.0630 - accuracy: 0.9770 - val_loss: 0.6837 - val_accuracy: 0.8832
Epoch 86/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0638 - accuracy: 0.9763 - val_loss: 0.7225 - val_accuracy: 0.8848
Epoch 87/200
1500/1500 [==============================] - 1s 818us/step - loss: 0.0621 - accuracy: 0.9768 - val_loss: 0.6882 - val_accuracy: 0.8868
Epoch 88/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0581 - accuracy: 0.9780 - val_loss: 0.6877 - val_accuracy: 0.8873
Epoch 89/200
1500/1500 [==============================] - 1s 862us/step - loss: 0.0587 - accuracy: 0.9782 - val_loss: 0.6975 - val_accuracy: 0.8845
Epoch 90/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0627 - accuracy: 0.9772 - val_loss: 0.6662 - val_accuracy: 0.8840
Epoch 91/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.0557 - accuracy: 0.9793 - val_loss: 0.6967 - val_accuracy: 0.8907
Epoch 92/200
1500/1500 [==============================] - 1s 837us/step - loss: 0.0598 - accuracy: 0.9784 - val_loss: 0.7091 - val_accuracy: 0.8859
Epoch 93/200
1500/1500 [==============================] - 1s 851us/step - loss: 0.0537 - accuracy: 0.9797 - val_loss: 0.6903 - val_accuracy: 0.8874
Epoch 94/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0569 - accuracy: 0.9786 - val_loss: 0.6948 - val_accuracy: 0.8906
Epoch 95/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0544 - accuracy: 0.9809 - val_loss: 0.7227 - val_accuracy: 0.8880
Epoch 96/200
1500/1500 [==============================] - 1s 867us/step - loss: 0.0576 - accuracy: 0.9791 - val_loss: 0.7231 - val_accuracy: 0.8877
Epoch 97/200
1500/1500 [==============================] - 1s 847us/step - loss: 0.0559 - accuracy: 0.9790 - val_loss: 0.7396 - val_accuracy: 0.8879
Epoch 98/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0544 - accuracy: 0.9806 - val_loss: 0.7077 - val_accuracy: 0.8894
Epoch 99/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.0531 - accuracy: 0.9805 - val_loss: 0.7768 - val_accuracy: 0.8824
Epoch 100/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.0544 - accuracy: 0.9795 - val_loss: 0.7451 - val_accuracy: 0.8898
Epoch 101/200
1500/1500 [==============================] - 1s 851us/step - loss: 0.0565 - accuracy: 0.9794 - val_loss: 0.7572 - val_accuracy: 0.8861
Epoch 102/200
1500/1500 [==============================] - 1s 822us/step - loss: 0.0530 - accuracy: 0.9801 - val_loss: 0.7757 - val_accuracy: 0.8860
Epoch 103/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0490 - accuracy: 0.9811 - val_loss: 0.7761 - val_accuracy: 0.8834
Epoch 104/200
1500/1500 [==============================] - 1s 847us/step - loss: 0.0530 - accuracy: 0.9810 - val_loss: 0.7600 - val_accuracy: 0.8835
Epoch 105/200
1500/1500 [==============================] - 1s 829us/step - loss: 0.0512 - accuracy: 0.9809 - val_loss: 0.7427 - val_accuracy: 0.8856
Epoch 106/200
1500/1500 [==============================] - 1s 823us/step - loss: 0.0484 - accuracy: 0.9819 - val_loss: 0.7618 - val_accuracy: 0.8913
Epoch 107/200
1500/1500 [==============================] - 1s 857us/step - loss: 0.0528 - accuracy: 0.9804 - val_loss: 0.8191 - val_accuracy: 0.8785
Epoch 108/200
1500/1500 [==============================] - 1s 848us/step - loss: 0.0465 - accuracy: 0.9823 - val_loss: 0.7683 - val_accuracy: 0.8843
Epoch 109/200
1500/1500 [==============================] - 1s 872us/step - loss: 0.0488 - accuracy: 0.9821 - val_loss: 0.7974 - val_accuracy: 0.8838
Epoch 110/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.0495 - accuracy: 0.9818 - val_loss: 0.8005 - val_accuracy: 0.8867
Epoch 111/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0482 - accuracy: 0.9822 - val_loss: 0.7620 - val_accuracy: 0.8888
Epoch 112/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.0446 - accuracy: 0.9838 - val_loss: 0.7855 - val_accuracy: 0.8833
Epoch 113/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0496 - accuracy: 0.9817 - val_loss: 0.8612 - val_accuracy: 0.8805
Epoch 114/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.0450 - accuracy: 0.9838 - val_loss: 0.8251 - val_accuracy: 0.8898
Epoch 115/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.0467 - accuracy: 0.9824 - val_loss: 0.8024 - val_accuracy: 0.8798
Epoch 116/200
1500/1500 [==============================] - 1s 856us/step - loss: 0.0474 - accuracy: 0.9822 - val_loss: 0.8666 - val_accuracy: 0.8813
Epoch 117/200
1500/1500 [==============================] - 1s 857us/step - loss: 0.0455 - accuracy: 0.9833 - val_loss: 0.7953 - val_accuracy: 0.8873
Epoch 118/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0454 - accuracy: 0.9825 - val_loss: 0.8261 - val_accuracy: 0.8892
Epoch 119/200
1500/1500 [==============================] - 1s 824us/step - loss: 0.0441 - accuracy: 0.9841 - val_loss: 0.8452 - val_accuracy: 0.8863
Epoch 120/200
1500/1500 [==============================] - 1s 865us/step - loss: 0.0462 - accuracy: 0.9827 - val_loss: 0.8256 - val_accuracy: 0.8856
Epoch 121/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.0453 - accuracy: 0.9830 - val_loss: 0.8092 - val_accuracy: 0.8883
Epoch 122/200
1500/1500 [==============================] - 1s 832us/step - loss: 0.0442 - accuracy: 0.9841 - val_loss: 0.8207 - val_accuracy: 0.8887
Epoch 123/200
1500/1500 [==============================] - 1s 837us/step - loss: 0.0417 - accuracy: 0.9848 - val_loss: 0.8524 - val_accuracy: 0.8850
Epoch 124/200
1500/1500 [==============================] - 1s 856us/step - loss: 0.0431 - accuracy: 0.9840 - val_loss: 0.8915 - val_accuracy: 0.8823
Epoch 125/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.0414 - accuracy: 0.9847 - val_loss: 0.8607 - val_accuracy: 0.8860
Epoch 126/200
1500/1500 [==============================] - 1s 856us/step - loss: 0.0439 - accuracy: 0.9834 - val_loss: 0.9249 - val_accuracy: 0.8868
Epoch 127/200
1500/1500 [==============================] - 1s 832us/step - loss: 0.0397 - accuracy: 0.9854 - val_loss: 0.8751 - val_accuracy: 0.8870
Epoch 128/200
1500/1500 [==============================] - 1s 878us/step - loss: 0.0420 - accuracy: 0.9845 - val_loss: 0.8971 - val_accuracy: 0.8870
Epoch 129/200
1500/1500 [==============================] - 1s 921us/step - loss: 0.0394 - accuracy: 0.9854 - val_loss: 0.8918 - val_accuracy: 0.8869
Epoch 130/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.0426 - accuracy: 0.9847 - val_loss: 0.8791 - val_accuracy: 0.8874
Epoch 131/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.0402 - accuracy: 0.9856 - val_loss: 0.8935 - val_accuracy: 0.8872
Epoch 132/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.0365 - accuracy: 0.9871 - val_loss: 0.8721 - val_accuracy: 0.8906
Epoch 133/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.0407 - accuracy: 0.9856 - val_loss: 0.9385 - val_accuracy: 0.8849
Epoch 134/200
1500/1500 [==============================] - 1s 840us/step - loss: 0.0396 - accuracy: 0.9861 - val_loss: 0.9419 - val_accuracy: 0.8873
Epoch 135/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.0389 - accuracy: 0.9856 - val_loss: 0.9289 - val_accuracy: 0.8879
Epoch 136/200
1500/1500 [==============================] - 1s 851us/step - loss: 0.0364 - accuracy: 0.9862 - val_loss: 0.9024 - val_accuracy: 0.8853
Epoch 137/200
1500/1500 [==============================] - 1s 822us/step - loss: 0.0385 - accuracy: 0.9858 - val_loss: 0.8887 - val_accuracy: 0.8874
Epoch 138/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0378 - accuracy: 0.9863 - val_loss: 0.9153 - val_accuracy: 0.8885
Epoch 139/200
1500/1500 [==============================] - 1s 841us/step - loss: 0.0377 - accuracy: 0.9864 - val_loss: 0.9030 - val_accuracy: 0.8882
Epoch 140/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.0368 - accuracy: 0.9866 - val_loss: 0.9128 - val_accuracy: 0.8859
Epoch 141/200
1500/1500 [==============================] - 1s 822us/step - loss: 0.0348 - accuracy: 0.9874 - val_loss: 0.9450 - val_accuracy: 0.8888
Epoch 142/200
1500/1500 [==============================] - 1s 840us/step - loss: 0.0352 - accuracy: 0.9879 - val_loss: 0.9032 - val_accuracy: 0.8865
Epoch 143/200
1500/1500 [==============================] - 1s 822us/step - loss: 0.0408 - accuracy: 0.9853 - val_loss: 0.9716 - val_accuracy: 0.8841
Epoch 144/200
1500/1500 [==============================] - 1s 878us/step - loss: 0.0334 - accuracy: 0.9882 - val_loss: 0.9711 - val_accuracy: 0.8847
Epoch 145/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.0372 - accuracy: 0.9864 - val_loss: 0.9840 - val_accuracy: 0.8854
Epoch 146/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0350 - accuracy: 0.9872 - val_loss: 0.9782 - val_accuracy: 0.8863
Epoch 147/200
1500/1500 [==============================] - 1s 832us/step - loss: 0.0403 - accuracy: 0.9859 - val_loss: 0.9399 - val_accuracy: 0.8890
Epoch 148/200
1500/1500 [==============================] - 1s 851us/step - loss: 0.0345 - accuracy: 0.9870 - val_loss: 0.9686 - val_accuracy: 0.8892
Epoch 149/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0336 - accuracy: 0.9875 - val_loss: 0.9497 - val_accuracy: 0.8825
Epoch 150/200
1500/1500 [==============================] - 1s 824us/step - loss: 0.0364 - accuracy: 0.9869 - val_loss: 0.9822 - val_accuracy: 0.8860
Epoch 151/200
1500/1500 [==============================] - 1s 837us/step - loss: 0.0323 - accuracy: 0.9881 - val_loss: 0.9937 - val_accuracy: 0.8846
Epoch 152/200
1500/1500 [==============================] - 1s 864us/step - loss: 0.0355 - accuracy: 0.9877 - val_loss: 0.9717 - val_accuracy: 0.8868
Epoch 153/200
1500/1500 [==============================] - 1s 826us/step - loss: 0.0346 - accuracy: 0.9876 - val_loss: 0.9908 - val_accuracy: 0.8902
Epoch 154/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.0309 - accuracy: 0.9886 - val_loss: 0.9809 - val_accuracy: 0.8832
Epoch 155/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.0369 - accuracy: 0.9873 - val_loss: 0.9649 - val_accuracy: 0.8887
Epoch 156/200
1500/1500 [==============================] - 1s 842us/step - loss: 0.0307 - accuracy: 0.9886 - val_loss: 1.0143 - val_accuracy: 0.8819
Epoch 157/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.0374 - accuracy: 0.9872 - val_loss: 1.0021 - val_accuracy: 0.8857
Epoch 158/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.0323 - accuracy: 0.9881 - val_loss: 1.0566 - val_accuracy: 0.8832
Epoch 159/200
1500/1500 [==============================] - 1s 842us/step - loss: 0.0284 - accuracy: 0.9902 - val_loss: 0.9865 - val_accuracy: 0.8867
Epoch 160/200
1500/1500 [==============================] - 1s 868us/step - loss: 0.0340 - accuracy: 0.9875 - val_loss: 1.0409 - val_accuracy: 0.8848
Epoch 161/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0389 - accuracy: 0.9860 - val_loss: 1.0028 - val_accuracy: 0.8868
Epoch 162/200
1500/1500 [==============================] - 1s 830us/step - loss: 0.0250 - accuracy: 0.9914 - val_loss: 1.0228 - val_accuracy: 0.8857
Epoch 163/200
1500/1500 [==============================] - 1s 847us/step - loss: 0.0359 - accuracy: 0.9873 - val_loss: 1.0148 - val_accuracy: 0.8871
Epoch 164/200
1500/1500 [==============================] - 1s 895us/step - loss: 0.0341 - accuracy: 0.9871 - val_loss: 1.0280 - val_accuracy: 0.8827
Epoch 165/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0321 - accuracy: 0.9889 - val_loss: 1.0127 - val_accuracy: 0.8851
Epoch 166/200
1500/1500 [==============================] - 1s 820us/step - loss: 0.0310 - accuracy: 0.9889 - val_loss: 1.0582 - val_accuracy: 0.8854
Epoch 167/200
1500/1500 [==============================] - 1s 845us/step - loss: 0.0292 - accuracy: 0.9893 - val_loss: 1.0412 - val_accuracy: 0.8882
Epoch 168/200
1500/1500 [==============================] - 1s 861us/step - loss: 0.0291 - accuracy: 0.9889 - val_loss: 1.0256 - val_accuracy: 0.8848
Epoch 169/200
1500/1500 [==============================] - 1s 823us/step - loss: 0.0328 - accuracy: 0.9882 - val_loss: 1.0755 - val_accuracy: 0.8832
Epoch 170/200
1500/1500 [==============================] - 1s 819us/step - loss: 0.0279 - accuracy: 0.9897 - val_loss: 1.0799 - val_accuracy: 0.8842
Epoch 171/200
1500/1500 [==============================] - 1s 854us/step - loss: 0.0306 - accuracy: 0.9894 - val_loss: 1.0710 - val_accuracy: 0.8808
Epoch 172/200
1500/1500 [==============================] - 1s 852us/step - loss: 0.0291 - accuracy: 0.9898 - val_loss: 1.0986 - val_accuracy: 0.8808
Epoch 173/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0312 - accuracy: 0.9883 - val_loss: 1.0979 - val_accuracy: 0.8903
Epoch 174/200
1500/1500 [==============================] - 1s 864us/step - loss: 0.0311 - accuracy: 0.9889 - val_loss: 1.1018 - val_accuracy: 0.8849
Epoch 175/200
1500/1500 [==============================] - 1s 851us/step - loss: 0.0283 - accuracy: 0.9901 - val_loss: 1.1017 - val_accuracy: 0.8812
Epoch 176/200
1500/1500 [==============================] - 1s 896us/step - loss: 0.0311 - accuracy: 0.9886 - val_loss: 1.1517 - val_accuracy: 0.8788
Epoch 177/200
1500/1500 [==============================] - 1s 858us/step - loss: 0.0316 - accuracy: 0.9889 - val_loss: 1.0925 - val_accuracy: 0.8847
Epoch 178/200
1500/1500 [==============================] - 1s 857us/step - loss: 0.0284 - accuracy: 0.9903 - val_loss: 1.1082 - val_accuracy: 0.8826
Epoch 179/200
1500/1500 [==============================] - 1s 863us/step - loss: 0.0247 - accuracy: 0.9911 - val_loss: 1.1759 - val_accuracy: 0.8833
Epoch 180/200
1500/1500 [==============================] - 1s 849us/step - loss: 0.0318 - accuracy: 0.9886 - val_loss: 1.0928 - val_accuracy: 0.8863
Epoch 181/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0271 - accuracy: 0.9900 - val_loss: 1.0937 - val_accuracy: 0.8847
Epoch 182/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0281 - accuracy: 0.9900 - val_loss: 1.0877 - val_accuracy: 0.8864
Epoch 183/200
1500/1500 [==============================] - 1s 877us/step - loss: 0.0270 - accuracy: 0.9903 - val_loss: 1.1319 - val_accuracy: 0.8858
Epoch 184/200
1500/1500 [==============================] - 1s 836us/step - loss: 0.0328 - accuracy: 0.9889 - val_loss: 1.1147 - val_accuracy: 0.8862
Epoch 185/200
1500/1500 [==============================] - 1s 828us/step - loss: 0.0262 - accuracy: 0.9908 - val_loss: 1.1304 - val_accuracy: 0.8855
Epoch 186/200
1500/1500 [==============================] - 1s 827us/step - loss: 0.0288 - accuracy: 0.9901 - val_loss: 1.1266 - val_accuracy: 0.8871
Epoch 187/200
1500/1500 [==============================] - 1s 877us/step - loss: 0.0266 - accuracy: 0.9908 - val_loss: 1.1947 - val_accuracy: 0.8781
Epoch 188/200
1500/1500 [==============================] - 1s 839us/step - loss: 0.0299 - accuracy: 0.9900 - val_loss: 1.0947 - val_accuracy: 0.8870
Epoch 189/200
1500/1500 [==============================] - 1s 831us/step - loss: 0.0269 - accuracy: 0.9904 - val_loss: 1.1387 - val_accuracy: 0.8848
Epoch 190/200
1500/1500 [==============================] - 1s 848us/step - loss: 0.0280 - accuracy: 0.9905 - val_loss: 1.1453 - val_accuracy: 0.8852
Epoch 191/200
1500/1500 [==============================] - 1s 864us/step - loss: 0.0262 - accuracy: 0.9910 - val_loss: 1.1602 - val_accuracy: 0.8827
Epoch 192/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.0291 - accuracy: 0.9899 - val_loss: 1.1049 - val_accuracy: 0.8886
Epoch 193/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.0283 - accuracy: 0.9899 - val_loss: 1.1878 - val_accuracy: 0.8836
Epoch 194/200
1500/1500 [==============================] - 1s 834us/step - loss: 0.0234 - accuracy: 0.9920 - val_loss: 1.1139 - val_accuracy: 0.8860
Epoch 195/200
1500/1500 [==============================] - 1s 871us/step - loss: 0.0262 - accuracy: 0.9911 - val_loss: 1.2288 - val_accuracy: 0.8825
Epoch 196/200
1500/1500 [==============================] - 1s 833us/step - loss: 0.0334 - accuracy: 0.9880 - val_loss: 1.1884 - val_accuracy: 0.8802
Epoch 197/200
1500/1500 [==============================] - 1s 844us/step - loss: 0.0209 - accuracy: 0.9923 - val_loss: 1.2766 - val_accuracy: 0.8767
Epoch 198/200
1500/1500 [==============================] - 1s 850us/step - loss: 0.0277 - accuracy: 0.9904 - val_loss: 1.1566 - val_accuracy: 0.8870
Epoch 199/200
1500/1500 [==============================] - 1s 858us/step - loss: 0.0275 - accuracy: 0.9906 - val_loss: 1.1580 - val_accuracy: 0.8877
Epoch 200/200
1500/1500 [==============================] - 1s 852us/step - loss: 0.0267 - accuracy: 0.9904 - val_loss: 1.1535 - val_accuracy: 0.8808
In [109]:
plt.title("Epoch-train&val loss(Adam)")
plt.plot(history.epoch, history.history["loss"])
plt.plot(history.epoch, history.history["val_loss"])
plt.xlabel("epoch")
plt.legend(["loss","val_loss"])
plt.show()
In [110]:
plt.title("Epoch-train&val - Accuracy")
plt.plot(history.epoch, history.history["accuracy"])
plt.plot(history.epoch, history.history["val_accuracy"])
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.legend(["accuracy","val_accuracy"])
Out[110]:
<matplotlib.legend.Legend at 0x2969d3cda60>
성능 규제하기¶
In [ ]:
"""
<성능 규제>
- 성능(과적합 여부 포함)을 높이기 위한 방법
- 보통 전처리 계층을 사용하게 됩니다.
- 전처리 계층은 훈련에 영향을 미치지 않음
"""
성능 규제 방법 - 드롭 아웃(Dropout())¶
In [ ]:
"""
<드롭 아웃(Dropout)>
- 훈련 과정 중 일부 특성들을 랜덤으로 제외 시켜서 과대적합을 해소하는 방법
- 딥러닝에서 자주 사용하는 전처리 계층으로 성능 개선에 효율적으로 사용함
<사용방법>
- 계층의 중간에 은닉층(hidden layer)으로 추가하여 사용됨
- 훈련에 관여하지는 않음 -> 데이터에 대한 전처리라고 보시면 됩니다.
** 조금 멍청하게 만드는 방법
"""
In [118]:
"""모델 생성하기"""
"""Dropout(0.3) : 사용되는 특성 30% 정도를 제외하기"""
dropout_layer = keras.layers.Dropout(0.3)
model = model_fn(dropout_layer)
model.summary()
Model: "sequential_17"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_13 (Flatten) (None, 784) 0
dense_22 (Dense) (None, 100) 78500
dropout_2 (Dropout) (None, 100) 0
dense_23 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
In [119]:
model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy'])
In [120]:
history = model.fit(train_scaled, train_target, epochs=14, verbose=1,
validation_data=(val_scaled,val_target))
Epoch 1/14
1500/1500 [==============================] - 2s 934us/step - loss: 0.5898 - accuracy: 0.7912 - val_loss: 0.4356 - val_accuracy: 0.8452
Epoch 2/14
1500/1500 [==============================] - 1s 851us/step - loss: 0.4397 - accuracy: 0.8402 - val_loss: 0.4070 - val_accuracy: 0.8515
Epoch 3/14
1500/1500 [==============================] - 1s 841us/step - loss: 0.4045 - accuracy: 0.8514 - val_loss: 0.3881 - val_accuracy: 0.8536
Epoch 4/14
1500/1500 [==============================] - 1s 843us/step - loss: 0.3820 - accuracy: 0.8595 - val_loss: 0.3645 - val_accuracy: 0.8662
Epoch 5/14
1500/1500 [==============================] - 1s 887us/step - loss: 0.3665 - accuracy: 0.8658 - val_loss: 0.3485 - val_accuracy: 0.8748
Epoch 6/14
1500/1500 [==============================] - 1s 847us/step - loss: 0.3570 - accuracy: 0.8692 - val_loss: 0.3420 - val_accuracy: 0.8777
Epoch 7/14
1500/1500 [==============================] - 1s 868us/step - loss: 0.3468 - accuracy: 0.8722 - val_loss: 0.3339 - val_accuracy: 0.8799
Epoch 8/14
1500/1500 [==============================] - 1s 854us/step - loss: 0.3323 - accuracy: 0.8770 - val_loss: 0.3297 - val_accuracy: 0.8776
Epoch 9/14
1500/1500 [==============================] - 1s 875us/step - loss: 0.3262 - accuracy: 0.8774 - val_loss: 0.3361 - val_accuracy: 0.8779
Epoch 10/14
1500/1500 [==============================] - 1s 871us/step - loss: 0.3200 - accuracy: 0.8818 - val_loss: 0.3291 - val_accuracy: 0.8785
Epoch 11/14
1500/1500 [==============================] - 1s 849us/step - loss: 0.3149 - accuracy: 0.8830 - val_loss: 0.3234 - val_accuracy: 0.8840
Epoch 12/14
1500/1500 [==============================] - 1s 845us/step - loss: 0.3088 - accuracy: 0.8854 - val_loss: 0.3260 - val_accuracy: 0.8808
Epoch 13/14
1500/1500 [==============================] - 1s 930us/step - loss: 0.3051 - accuracy: 0.8846 - val_loss: 0.3191 - val_accuracy: 0.8841
Epoch 14/14
1500/1500 [==============================] - 1s 853us/step - loss: 0.3017 - accuracy: 0.8873 - val_loss: 0.3205 - val_accuracy: 0.8820
In [121]:
plt.title("Epoch-train&val loss(Adam-Dropout)")
plt.plot(history.epoch, history.history["loss"])
plt.plot(history.epoch, history.history["val_loss"])
plt.xlabel("epoch")
plt.legend(["loss","val_loss"])
plt.show()
In [122]:
plt.title("Epoch-train&val - Accuracy(Adam-Dropout)")
plt.plot(history.epoch, history.history["accuracy"])
plt.plot(history.epoch, history.history["val_accuracy"])
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.legend(["accuracy","val_accuracy"])
Out[122]:
<matplotlib.legend.Legend at 0x296c2886790>
모델 저장 및 복원하기¶
In [ ]:
"""
<모델 지정하는 방법>
* 가중치만 저장하기
- 모델이 훈련하면서 찾아낸 가중치 값들만 저장하기
- 모델 자체가 저장되지는 않습니다.
- 모델 신규생성 > 저장된 가중치 불러와서 반영 > 예측 진행
- 별도로 훈련(fit)은 하지 않아도 됩니다.
* 모델 자체 저장하기
- 저장된 모델을 불러와서 > 예측 진행
"""
In [123]:
"""가중치 저장하기"""
model.save_weights("./model/model_weights.h5")
In [125]:
model_weight = model_fn(keras.layers.Dropout(0.3))
model_weight.summary()
model_weight.load_weights("./model/model_weights.h5")
Model: "sequential_19"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_15 (Flatten) (None, 784) 0
dense_26 (Dense) (None, 100) 78500
dropout_4 (Dropout) (None, 100) 0
dense_27 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
모델 자체를 저장 및 불러들이기¶
In [126]:
"""모델 자체 저장하기"""
model.save("./model/model_all.h5")
In [128]:
"""모델 자체 불러들이기"""
model_all = keras.models.load_model("./model/model_all.h5")
model_all.summary()
"""이후부터는 바로 예측으로 사용가능"""
Model: "sequential_17"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_13 (Flatten) (None, 784) 0
dense_22 (Dense) (None, 100) 78500
dropout_2 (Dropout) (None, 100) 0
dense_23 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
In [130]:
"""예측하기"""
pred_data = model_all.predict(val_scaled)
pred_data[0]
Out[130]:
array([2.5053196e-10, 7.1568843e-20, 2.9653634e-13, 7.9270129e-16,
1.2597780e-10, 8.9381620e-06, 3.4937539e-12, 3.9884365e-10,
9.9999106e-01, 4.6605374e-13], dtype=float32)
In [131]:
import numpy as np
np.argmax(pred_data[0]), val_target[0]
Out[131]:
(8, 8)
In [132]:
"""예측 결과의 모든행에 대해서 가장 높은 값을 가지는 열의 인덱스 위치 추출"""
np.argmax(pred_data, axis=1)
Out[132]:
array([8, 8, 7, ..., 8, 8, 6], dtype=int64)
In [133]:
pred_data
Out[133]:
array([[2.5053196e-10, 7.1568843e-20, 2.9653634e-13, ..., 3.9884365e-10,
9.9999106e-01, 4.6605374e-13],
[1.4917783e-08, 1.1555841e-18, 1.4186421e-09, ..., 9.7033564e-16,
1.0000000e+00, 1.4837005e-14],
[4.4840420e-13, 2.4854863e-15, 9.1234241e-15, ..., 9.9836415e-01,
1.0773331e-08, 2.4058189e-05],
...,
[1.5103324e-06, 3.3118898e-16, 9.7406847e-09, ..., 2.0191558e-10,
9.9999452e-01, 2.0948869e-09],
[9.9100728e-09, 1.0316789e-18, 1.3353646e-10, ..., 1.9365596e-12,
1.0000000e+00, 1.1215463e-14],
[4.7344062e-02, 2.4619487e-07, 1.2514123e-03, ..., 1.1174584e-04,
1.4394047e-04, 4.5353267e-03]], dtype=float32)
In [134]:
val_pred = np.argmax(pred_data, axis=1)
val_pred
Out[134]:
array([8, 8, 7, ..., 8, 8, 6], dtype=int64)
In [135]:
val_target
Out[135]:
array([8, 8, 7, ..., 8, 8, 1], dtype=uint8)
In [ ]:
"""
- 정답갯수, 오답갯수, 정답률, 오답률을 출력해주세요
"""
In [ ]:
# 정답 갯수
correct_count = np.sum(val_pred == val_target)
# len(val_pred[val_pred == val_target])
# 오답 갯수
incorrect_count = np.sum(val_pred != val_target)
# len(val_pred[val_pred != val_target])
#정답률
accuracy_rate = correct_count / len(val_target)
#오답률
error_rate = incorrect_count / len(val_target)
print(f"정답 갯수 : {correct_count}")
print(f"오답 갯수 : {incorrect_count}")
print(f"정답률 : {accuracy_rate}")
print(f"오답률 : {error_rate}")
성능 향상 - 콜백(Callback) 함수¶
In [ ]:
"""
<콜백함수(Callback Function)>
- 모델 훈련 중에 특정 작업(함수)를 호출할 수 있는 기능
- 훈련(fit)시에 지정하는 함수를 호출하는 방식
- 훈련 중에 발생시키는 "이벤트"라고 생각하시면 됩니다.
- 별도의 계층은 아니며, 속성(매개변수)으로 정의 됩니다.
<콜백함수 종류>
- ModelCheckpoint()
: epoch 마다 모델을 저장하는 방식
: 단, 앞에서 실행된 훈련 성능보다 높아진 경우에만 저장됨
- EaralyStopping()
: 훈련이 더 이상 좋아지지 않으면 훈련(fit)을 종료시키는 방식
: 일반적으로 ModelCheckpoint()와 함께 사용
"""
ModelCheckpoint 콜백함수¶
In [139]:
"""1. 모델 생성하기"""
dropout_layer = keras.layers.Dropout(0.3)
model = model_fn(dropout_layer)
model.summary()
Model: "sequential_20"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_16 (Flatten) (None, 784) 0
dense_28 (Dense) (None, 100) 78500
dropout_5 (Dropout) (None, 100) 0
dense_29 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
In [140]:
"""2. 모델 설정하기"""
model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy'])
In [141]:
"""3. 콜백함수 생성하기
- 훈련(fit) 전에 생성합니다.
- save_best_only = True
: 이전에 수행한 검증 손실율보다 좋을 때 마다 훈련모델 자동 저장시키기
: 훈련이 종료되면, 가장 좋은 모델만 저장되어 있습니다.
- save_best_only = False
: epoch마다 훈련모델 자동 저장 시키기
- 저장된 모델은 : 모델 자체가 저장되는 방식으로 추후 불러들인 후 바로 예측 가능
"""
checkpoint_cb = keras.callbacks.ModelCheckpoint(
"./model/best_model.h5",
save_best_only=True
)
checkpoint_cb
Out[141]:
<keras.callbacks.ModelCheckpoint at 0x2969be44f40>
In [143]:
"""
- fit()함수 내에 콜백함수 매개변수에 정의
"""
history = model.fit(
train_scaled, train_target, epochs=10, verbose=1,
validation_data=(val_scaled,val_target),
callbacks=[checkpoint_cb]
)
Epoch 1/10
1500/1500 [==============================] - 2s 996us/step - loss: 0.5827 - accuracy: 0.7956 - val_loss: 0.4202 - val_accuracy: 0.8459
Epoch 2/10
1500/1500 [==============================] - 1s 871us/step - loss: 0.4354 - accuracy: 0.8430 - val_loss: 0.3896 - val_accuracy: 0.8570
Epoch 3/10
1500/1500 [==============================] - 1s 876us/step - loss: 0.4041 - accuracy: 0.8547 - val_loss: 0.3799 - val_accuracy: 0.8550
Epoch 4/10
1500/1500 [==============================] - 1s 849us/step - loss: 0.3818 - accuracy: 0.8623 - val_loss: 0.3459 - val_accuracy: 0.8755
Epoch 5/10
1500/1500 [==============================] - 1s 851us/step - loss: 0.3630 - accuracy: 0.8665 - val_loss: 0.3420 - val_accuracy: 0.8747
Epoch 6/10
1500/1500 [==============================] - 1s 851us/step - loss: 0.3536 - accuracy: 0.8701 - val_loss: 0.3408 - val_accuracy: 0.8769
Epoch 7/10
1500/1500 [==============================] - 1s 860us/step - loss: 0.3421 - accuracy: 0.8734 - val_loss: 0.3423 - val_accuracy: 0.8713
Epoch 8/10
1500/1500 [==============================] - 1s 866us/step - loss: 0.3373 - accuracy: 0.8768 - val_loss: 0.3383 - val_accuracy: 0.8800
Epoch 9/10
1500/1500 [==============================] - 1s 850us/step - loss: 0.3259 - accuracy: 0.8799 - val_loss: 0.3244 - val_accuracy: 0.8811
Epoch 10/10
1500/1500 [==============================] - 1s 839us/step - loss: 0.3193 - accuracy: 0.8806 - val_loss: 0.3340 - val_accuracy: 0.8769
In [144]:
model_cp_cb = keras.models.load_model("./model/best_model.h5")
model_cp_cb.summary()
Model: "sequential_20"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_16 (Flatten) (None, 784) 0
dense_28 (Dense) (None, 100) 78500
dropout_5 (Dropout) (None, 100) 0
dense_29 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
EarlyStopping() 콜백함수¶
In [ ]:
"""
- 훈련 성능이 더 이상 좋아지지 않으면 훈련을 종료시킴
- 과대적합을 해소하는데 좋음(과대 적합이 발생하기 전에 종료시킴)
"""
In [145]:
"""1. 모델생성"""
dropout_layer = keras.layers.Dropout(0.3)
model = model_fn(dropout_layer)
model.summary()
Model: "sequential_21"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_17 (Flatten) (None, 784) 0
dense_30 (Dense) (None, 100) 78500
dropout_6 (Dropout) (None, 100) 0
dense_31 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
In [146]:
model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy'])
In [148]:
"""3. 콜백함수 생성하기"""
""" - ModelCheckPoint() """
checkpoint_cb = keras.callbacks.ModelCheckpoint(
"./model/best_model.h5",
save_best_only=True
)
""" -EarlyStopping()
- patience=2 : 더 이상 좋아지지 않는 epoch의 갯수 지정
: 가장 좋은 시점의 epoch 이후 2번 더 수행 후
그래도 좋아지지 않으면 종료시킨다는 의미
- restore_best_weights=True : 가장 낮은 검증 손실을 나타낸 모델의 하이퍼파라메터로
모델을 업데이트 시킴
"""
early_stopping_cb = keras.callbacks.EarlyStopping(
patience=2,
restore_best_weights=True
)
In [149]:
history = model.fit(
train_scaled, train_target, epochs=100, verbose=1,
validation_data=(val_scaled,val_target),
callbacks=[checkpoint_cb, early_stopping_cb]
)
Epoch 1/100
1500/1500 [==============================] - 2s 902us/step - loss: 0.5884 - accuracy: 0.7913 - val_loss: 0.4214 - val_accuracy: 0.8471
Epoch 2/100
1500/1500 [==============================] - 1s 886us/step - loss: 0.4378 - accuracy: 0.8414 - val_loss: 0.3860 - val_accuracy: 0.8577
Epoch 3/100
1500/1500 [==============================] - 1s 838us/step - loss: 0.4046 - accuracy: 0.8540 - val_loss: 0.3808 - val_accuracy: 0.8622
Epoch 4/100
1500/1500 [==============================] - 1s 883us/step - loss: 0.3807 - accuracy: 0.8598 - val_loss: 0.3555 - val_accuracy: 0.8685
Epoch 5/100
1500/1500 [==============================] - 1s 857us/step - loss: 0.3627 - accuracy: 0.8666 - val_loss: 0.3486 - val_accuracy: 0.8717
Epoch 6/100
1500/1500 [==============================] - 1s 867us/step - loss: 0.3529 - accuracy: 0.8689 - val_loss: 0.3357 - val_accuracy: 0.8791
Epoch 7/100
1500/1500 [==============================] - 1s 851us/step - loss: 0.3420 - accuracy: 0.8750 - val_loss: 0.3407 - val_accuracy: 0.8739
Epoch 8/100
1500/1500 [==============================] - 1s 879us/step - loss: 0.3301 - accuracy: 0.8776 - val_loss: 0.3337 - val_accuracy: 0.8793
Epoch 9/100
1500/1500 [==============================] - 1s 840us/step - loss: 0.3268 - accuracy: 0.8791 - val_loss: 0.3380 - val_accuracy: 0.8758
Epoch 10/100
1500/1500 [==============================] - 1s 859us/step - loss: 0.3173 - accuracy: 0.8842 - val_loss: 0.3266 - val_accuracy: 0.8816
Epoch 11/100
1500/1500 [==============================] - 1s 901us/step - loss: 0.3095 - accuracy: 0.8847 - val_loss: 0.3254 - val_accuracy: 0.8832
Epoch 12/100
1500/1500 [==============================] - 1s 869us/step - loss: 0.3049 - accuracy: 0.8861 - val_loss: 0.3428 - val_accuracy: 0.8762
Epoch 13/100
1500/1500 [==============================] - 1s 846us/step - loss: 0.3033 - accuracy: 0.8867 - val_loss: 0.3335 - val_accuracy: 0.8792
In [150]:
model_f = keras.models.load_model("./model/best_model.h5")
model_f.summary()
model_f.evaluate(train_scaled, train_target)
model_f.evaluate(val_scaled, val_target)
Model: "sequential_21"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_17 (Flatten) (None, 784) 0
dense_30 (Dense) (None, 100) 78500
dropout_6 (Dropout) (None, 100) 0
dense_31 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
1500/1500 [==============================] - 1s 553us/step - loss: 0.2613 - accuracy: 0.9036
375/375 [==============================] - 0s 573us/step - loss: 0.3254 - accuracy: 0.8832
Out[150]:
[0.3254171907901764, 0.8831666707992554]
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: