猫狗大战—使用预训练模型VGG16①
- 法①: 将输出保存成Numpy数组作为独立的密集连接分类器的输入,速度快代价低,但不能使用数据增强
- 法②: 在顶部添加Dense层来扩展已有模型(即conv_base),并在输入数据上端到端的运行整个模型,可以使用数据增强。
- 本篇现将法①
数据导入
将图像复制到训练,验证和测试目录
import os
import shutil
TRAIN_DIR = 'D:\\Jupyter\\dogs-vs-cats\\'
BASE_DIR = 'E:\\BaiduNetdiskDownload\\kaggle\\train\\'
#举一个train文件夹的例子
train_dir = os.path.join(TRAIN_DIR, 'train')
#os.mkdir(train_dir)
train_cats_dir = os.path.join(train_dir, 'cats')
#os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'dogs')
#os.mkdir(train_dogs_dir)
#举一个复制图片里猫猫的例子
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(BASE_DIR, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000,1500)]
for fname in fnames:
src = os.path.join(BASE_DIR, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1500,2000)]
for fname in fnames:
src = os.path.join(BASE_DIR, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)

### 构建模型
#定义密集分类器
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc']
)
数据预处理
不使用数据增强
import os import numpy as np from keras.preprocessing.image import ImageDataGenerator base_dir = 'D:\\Jupyter\\dogs-vs-cats\\' train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') test_dir = os.path.join(base_dir, 'test') datagen = ImageDataGenerator(rescale=1./255)
使用ImageDataGenerator从目录中读取图像
batch_size = 20
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary'
)
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i+1) * batch_size] = features_batch
labels[i * batch_size : (i+1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
break
return features, labels
#张量变形
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
利用批量生成器拟合模型
history = model.fit(train_features, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features, validation_labels))
绘制训练过程中的损失曲线和精度曲线
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc)+1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validaton acc')
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()