-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathimage_classification.py
More file actions
104 lines (80 loc) · 3.27 KB
/
image_classification.py
File metadata and controls
104 lines (80 loc) · 3.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from typing import cast
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_tuner as kt
import numpy as np
from utils import (
Sequential, layers, losses, optimizers, callbacks, activations)
MAX_VALUE = 255.0
LABEL_NAMES = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal','Shirt', 'Sneaker', 'Bag', 'Ankle boot']
Load_Response = tuple[
tf.data.Dataset, tf.data.Dataset, tf.data.Dataset]
train_data, validation_data, test_data = cast(
Load_Response,
tfds.load(
'fashion_mnist',
split=('train[:80%]', 'train[80%:]', 'test'),
batch_size=128, as_supervised=True))
def hypermodel_builder(hp):
# Tune the number of units in the first Dense layer
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
# Tune the learning rate for the optimizer
hp_learning_rate = hp.Choice(
'learning_rate', values=[1e-2, 1e-3, 1e-4])
optimizer = hp.Choice(
'optimizer', values=[optimizers.SGD, optimizers.Adam])
model = Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.BatchNormalization(),
layers.Dense(units=hp_units, activation=activations.relu),
layers.BatchNormalization(),
layers.Dense(10)
])
model.compile(
optimizer=optimizer(learning_rate=hp_learning_rate),
loss=losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
tuner = kt.Hyperband(
hypermodel_builder, objective='val_accuracy', max_epochs=10,
factor=3, project_name='tuner_files')
tuner.search(
train_data, validation_data=validation_data, epochs=50,
callbacks=[callbacks.EarlyStopping(
monitor='val_loss', patience=5)]
)
best_hps = tuner.get_best_hyperparameters()[0]
base_model = tuner.hypermodel.build(best_hps)
history = base_model.fit(
train_data, validation_data=validation_data, epochs=50)
# Find the optimal number of epochs
val_acc_per_epoch = history.history['val_accuracy']
best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
print('Best epoch: ', best_epoch)
model = tuner.hypermodel.build(best_hps)
# Retrain model with hyperparameters and optimal epochs
model.fit(
train_data, validation_data=validation_data, epochs=best_epoch)
evaluation = model.evaluate(test_data)
print("[test loss, test accuracy]:", evaluation)
# Attach a softmax layer to convert the model's
# linear outputs—logits—to probabilities
probability_model = Sequential([model, layers.Softmax()])
# Remove labels
X_test = test_data.map(lambda x, y: x)
# Predict the label for each image in the testing set
predictions = probability_model.predict(X_test)
X_batch_images, y_batch_lables = tuple(test_data.take(1))[0]
# A prediction is an array of 10 numbers that represent the
# probability that the image corresponds to each class
print('Prediction: ', np.argmax(predictions[0]))
print('Label: ', y_batch_lables[0])
# Use the trained model to make a single prediction
img = X_batch_images[1]
# tf.keras models are optimized to make predictions on a batch,
# or collection. Add the image to a batch where it's the only member.
img = np.expand_dims(img, 0)
img_prediction = probability_model.predict(img)
print('Prediction: ', np.argmax(img_prediction[0]))
print('Label: ', y_batch_lables[1])