猫狗大战—使用预训练模型VGG16①
- 法①: 将输出保存成Numpy数组作为独立的密集连接分类器的输入,速度快代价低,但不能使用数据增强
- 法②: 在顶部添加Dense层来扩展已有模型(即conv_base),并在输入数据上端到端的运行整个模型,可以使用数据增强。
- 本篇现将法②
数据导入
将图像复制到训练,验证和测试目录
import os import shutil TRAIN_DIR = 'D:\\Jupyter\\dogs-vs-cats\\' BASE_DIR = 'E:\\BaiduNetdiskDownload\\kaggle\\train\\' #举一个train文件夹的例子 train_dir = os.path.join(TRAIN_DIR, 'train') #os.mkdir(train_dir) train_cats_dir = os.path.join(train_dir, 'cats') #os.mkdir(train_cats_dir) train_dogs_dir = os.path.join(train_dir, 'dogs') #os.mkdir(train_dogs_dir) #举一个复制图片里猫猫的例子 fnames = ['cat.{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(BASE_DIR, fname) dst = os.path.join(train_cats_dir, fname) shutil.copyfile(src, dst) fnames = ['cat.{}.jpg'.format(i) for i in range(1000,1500)] for fname in fnames: src = os.path.join(BASE_DIR, fname) dst = os.path.join(validation_cats_dir, fname) shutil.copyfile(src, dst) fnames = ['cat.{}.jpg'.format(i) for i in range(1500,2000)] for fname in fnames: src = os.path.join(BASE_DIR, fname) dst = os.path.join(test_cats_dir, fname) shutil.copyfile(src, dst) #如果以上步骤之前做过,则 #import os #base_dir = 'D:\\Jupyter\\dogs-vs-cats\\' #train_dir = os.path.join(base_dir, 'train') #validation_dir = os.path.join(base_dir, 'validation') #test_dir = os.path.join(base_dir, 'test')![目录结构](https://upload-images.jianshu.io/upload_images/2145769-86964833bc33a46a.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/245)
### 构建模型
from keras.applications import VGG16 #导入VGG16 conv_base = VGG16(weights='imagenet',#指向模型初始化的权重检查点 include_top=False,#指定模型是否包含密集连接分类器 input_shape=(150, 150, 3)#输入到网络的张量形状 ) from keras import models from keras import layers #编译之前“冻结” conv_base.trainable = False model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) #编译 model.compile(optimizer=optimizers.RMSprop(lr=2e-5), loss='binary_crossentropy', metrics=['acc'])
数据预处理
不使用数据增强
from keras.preprocessing.image import ImageDataGenerator from keras import optimizers train_datagen = ImageDataGenerator( rescale = 1./255, rotation_range = 40, width_shift_range = 0.2, height_shift_range = 0.2, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True, fill_mode = 'nearest' ) test_datagen = ImageDataGenerator(rescale = 1./255)
使用ImageDataGenerator从目录中读取图像
train_generator = train_datagen.flow_from_directory( train_dir, target_size = (150, 150), batch_size = 20, class_mode = 'binary',#因为使用binary_crossentropy,所以得使用binary二进制标签 ) validation_generator = test_datagen.flow_from_directory( validation_dir, target_size = (150,150), batch_size = 20, class_mode = 'binary', )
利用批量生成器拟合模型
history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=30, validation_data = validation_generator, validation_steps=50 )
绘制训练过程中的损失曲线和精度曲线
import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc)+1) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validaton acc') plt.title('Training and Validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and Validation loss') plt.legend() plt.show()
保存模型
model.save(‘cats_and_dogs_VGG16_motehod2.h5’)