项目实践--处理高光谱数据

一维上的

模型构建

from keras import models
from keras import layers
from keras import regularizers

model = models.Sequential()
model.add(layers.convolutional.Conv1D(6, 5,strides=1, padding='valid', input_shape=(200, 1), activation="relu", name="convolution_1d_layer1"))
model.add(layers.MaxPool1D(pool_size=2, strides=2, padding="valid", name="max_pooling_layer1"))

model.add(layers.convolutional.Conv1D(12, 5,strides=1, padding='valid', activation="relu", name="convolution_1d_layer2"))
model.add(layers.MaxPool1D(pool_size=2, strides=2, padding="valid", name="max_pooling_layer2"))

model.add(layers.convolutional.Conv1D(24, 4,strides=1, padding='valid', activation="relu", name="convolution_1d_layer3"))
model.add(layers.MaxPool1D(pool_size=2, strides=2, padding="valid", name="max_pooling_layer3"))

model.add(layers.convolutional.Conv1D(48, 5,strides=1, padding='valid',activation="relu", name="convolution_1d_layer4"))
model.add(layers.MaxPool1D(pool_size=2, strides=2, padding="valid", name="max_pooling_layer4"))

model.add(layers.convolutional.Conv1D(96, 4,strides=1, padding='valid', activation="relu", name="convolution_1d_layer5"))
model.add(layers.MaxPool1D(pool_size=1, strides=1, padding="valid", name="max_pooling_layer5"))


model.add(layers.Flatten())
model.add(layers.Dense(256, kernel_regularizer=regularizers.l2(0.001), activation='relu', name="fc1"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(16,activation='softmax'))
- 五层卷基层池化层,一层全连层dropout层softmax层. ### 数据维度变化预览
#model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
convolution_1d_layer1 (Conv1 (None, 196, 6)            36        
_________________________________________________________________
max_pooling_layer1 (MaxPooli (None, 98, 6)             0         
_________________________________________________________________
convolution_1d_layer2 (Conv1 (None, 94, 12)            372       
_________________________________________________________________
max_pooling_layer2 (MaxPooli (None, 47, 12)            0         
_________________________________________________________________
convolution_1d_layer3 (Conv1 (None, 44, 24)            1176      
_________________________________________________________________
max_pooling_layer3 (MaxPooli (None, 22, 24)            0         
_________________________________________________________________
convolution_1d_layer4 (Conv1 (None, 18, 48)            5808      
_________________________________________________________________
max_pooling_layer4 (MaxPooli (None, 9, 48)             0         
_________________________________________________________________
convolution_1d_layer5 (Conv1 (None, 6, 96)             18528     
_________________________________________________________________
max_pooling_layer5 (MaxPooli (None, 6, 96)             0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 576)               0         
_________________________________________________________________
fc1 (Dense)                  (None, 256)               147712    
_________________________________________________________________
dropout_1 (Dropout)          (None, 256)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 16)                4112      
=================================================================
Total params: 177,744
Trainable params: 177,744
Non-trainable params: 0
_________________________________________________________________

制作数据集和标签集(训练,验证)

import numpy as np
import scipy.io as sio

def make_one_hot(data, num_label):
    return (np.arange(num_label) == data).astype(np.integer)

def MaxMinNormalization(matrix):
    Min = np.min(matrix)
    Max = np.max(matrix)
    mat_norm = (matrix - Min) / (Max - Min)
    return mat_norm

def get_files():
    dataset_path = 'data_sample_new.mat'
    data_mat = sio.matlab.loadmat(dataset_path)
    data_sample = np.array(data_mat['data'])  # read data from dict
    x_train = np.array(data_mat['x_train'])  # read x_train data from dict
    x_test = np.array(data_mat['x_test'])  # read x_test data from dict
    y_train = np.array(data_mat['y_train'])  # read y_train data from dict
    y_test = np.array(data_mat['y_test'])  # read y_test data from dict

    # 归一化
    x_train = MaxMinNormalization(x_train)
    x_test = MaxMinNormalization(x_test)
    # onehot
    y_train_onehot = make_one_hot(y_train, 16)
    y_test_onehot = make_one_hot(y_test, 16)

    print('x_train.shape',x_train.shape)
    print(x_train[0])
    print('y_train_onehot.shape',y_train_onehot.shape)
    print(y_train_onehot[0])

    return x_train, y_train_onehot, x_test, y_test_onehot

x_train, y_train_onehot, x_test, y_test_onehot = get_files()
- 数据集 x_train.shape (9224, 200) - 标签集 y_train_onehot.shape (9224, 16) ### 训练与验证
from keras import optimizers
#模型编译
model.compile(optimizer='rmsprop',
             loss='categorical_crossentropy',
             metrics=['accuracy'])

x_train = np.reshape(x_train ,[-1,200,1])
x_test = np.reshape(x_test ,[-1,200,1])

history = model.fit(x_train,
                    y_train_onehot,
                    epochs=200,
                    batch_size=128,
                    validation_data=(x_test, y_test_onehot))
- 采用RMSprop优化器。因为SGD损失优化无效,不知道为啥······ ### 评估模型
import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc)+1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validaton acc')
plt.title('Training and Validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()

plt.show()
- 发现波动过大,采用滑动均值模型得到光滑曲线继续观测
def smooth_curve(points, factor=0.9):
  smoothed_points = []
  for point in points:
    if smoothed_points:
      previous = smoothed_points[-1]
      smoothed_points.append(previous * factor + point * (1 - factor))
    else:
      smoothed_points.append(point)
  return smoothed_points

smooth_loss = smooth_curve(loss)
smooth_val_loss = smooth_curve(val_loss)

plt.plot(range(1, len(smooth_loss) + 1), smooth_loss, 'bo',label='Training_loss')
plt.plot(range(1, len(smooth_val_loss) + 1), smooth_val_loss, 'b',label='Validation_loss')
plt.xlabel('Epochs')
plt.ylabel('Categorical_Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.figure()

smooth_acc = smooth_curve(acc)
smooth_val_acc = smooth_curve(val_acc)

plt.plot(range(1, len(smooth_acc) + 1), smooth_acc, 'bo',label='Training_acc')
plt.plot(range(1, len(smooth_val_acc) + 1), smooth_val_acc, 'b',label='Validation_acc')
plt.xlabel('Epochs')
plt.ylabel('Categorical_Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.show()

优化

  • 在75轮左右过拟合,更改训练轮次
  • 更改优化器,其他优化器试试
  • 更改正则,L1,L2,或者一起用
  • 重回训练与绘图观测

注:数据制作见博客mat处理系列

本文代码