跳到主要内容

卷积神经网络

卷积神经网络(CNN)是处理图像数据的核心技术。本章介绍如何使用 TensorFlow 构建和训练 CNN。

CNN 基础概念

卷积操作

卷积操作通过滑动窗口在图像上提取特征:

import tensorflow as tf
from tensorflow.keras import layers

# 输入图像(batch_size, height, width, channels)
image = tf.random.normal([1, 28, 28, 1])

# 卷积层
conv_layer = layers.Conv2D(
filters=32, # 卷积核数量
kernel_size=3, # 卷积核大小
strides=1, # 步长
padding='same', # 填充方式
activation='relu'
)

output = conv_layer(image)
print(output.shape) # (1, 28, 28, 32)

填充方式

# valid:不填充,输出尺寸会减小
conv_valid = layers.Conv2D(32, 3, padding='valid')
output_valid = conv_valid(tf.random.normal([1, 28, 28, 1]))
print(output_valid.shape) # (1, 26, 26, 32)

# same:填充使输出尺寸与输入相同
conv_same = layers.Conv2D(32, 3, padding='same')
output_same = conv_same(tf.random.normal([1, 28, 28, 1]))
print(output_same.shape) # (1, 28, 28, 32)

池化层

池化层用于降低特征图的空间维度:

# 最大池化
max_pool = layers.MaxPooling2D(pool_size=2, strides=2)
pooled = max_pool(tf.random.normal([1, 28, 28, 32]))
print(pooled.shape) # (1, 14, 14, 32)

# 平均池化
avg_pool = layers.AveragePooling2D(pool_size=2, strides=2)
pooled = avg_pool(tf.random.normal([1, 28, 28, 32]))
print(pooled.shape) # (1, 14, 14, 32)

# 全局平均池化
global_pool = layers.GlobalAveragePooling2D()
global_pooled = global_pool(tf.random.normal([1, 28, 28, 32]))
print(global_pooled.shape) # (1, 32)

构建 CNN 模型

基础 CNN 架构

from tensorflow import keras

model = keras.Sequential([
# 第一个卷积块
layers.Conv2D(32, 3, padding='same', activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D(2),

# 第二个卷积块
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(2),

# 第三个卷积块
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.MaxPooling2D(2),

# 全连接层
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(10, activation='softmax')
])

model.summary()

输出:

Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 28, 28, 32) 320

max_pooling2d (MaxPooling2D (None, 14, 14, 32) 0
)

conv2d_1 (Conv2D) (None, 14, 14, 64) 18496

max_pooling2d_1 (MaxPooling (None, 7, 7, 64) 0
2D)

conv2d_2 (Conv2D) (None, 7, 7, 128) 73856

max_pooling2d_2 (MaxPooling (None, 3, 3, 128) 0
2D)

flatten (Flatten) (None, 1152) 0

dense (Dense) (None, 128) 147584

dropout (Dropout) (None, 128) 0

dense_1 (Dense) (None, 10) 1290

=================================================================
Total params: 241,546
Trainable params: 241,546
Non-trainable params: 0
_________________________________________________________________

使用 BatchNormalization

批归一化可以加速训练并提高稳定性:

model = keras.Sequential([
layers.Conv2D(32, 3, padding='same', input_shape=(28, 28, 1)),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(2),

layers.Conv2D(64, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(2),

layers.Conv2D(128, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.GlobalAveragePooling2D(),

layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(10, activation='softmax')
])

MNIST 手写数字识别

数据准备

import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt

# 加载数据
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

# 数据预处理
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0

# 添加通道维度
x_train = x_train[..., np.newaxis]
x_test = x_test[..., np.newaxis]

print(f"训练集形状: {x_train.shape}") # (60000, 28, 28, 1)
print(f"测试集形状: {x_test.shape}") # (10000, 28, 28, 1)

# 可视化部分数据
fig, axes = plt.subplots(2, 5, figsize=(10, 4))
for i, ax in enumerate(axes.flat):
ax.imshow(x_train[i, :, :, 0], cmap='gray')
ax.set_title(f"Label: {y_train[i]}")
ax.axis('off')
plt.tight_layout()
plt.show()

构建和训练模型

# 构建模型
model = keras.Sequential([
layers.Conv2D(32, 3, padding='same', activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D(2),

layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(2),

layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.GlobalAveragePooling2D(),

layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(10, activation='softmax')
])

# 编译模型
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

# 训练模型
history = model.fit(
x_train, y_train,
epochs=10,
batch_size=64,
validation_split=0.1,
callbacks=[
keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)
]
)

# 评估模型
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f"\n测试准确率: {test_acc:.4f}")

绘制训练曲线

def plot_history(history):
fig, axes = plt.subplots(1, 2, figsize=(12, 4))

axes[0].plot(history.history['loss'], label='train')
axes[0].plot(history.history['val_loss'], label='val')
axes[0].set_title('Loss')
axes[0].legend()

axes[1].plot(history.history['accuracy'], label='train')
axes[1].plot(history.history['val_accuracy'], label='val')
axes[1].set_title('Accuracy')
axes[1].legend()

plt.show()

plot_history(history)

CIFAR-10 图像分类

CIFAR-10 是更复杂的彩色图像数据集:

# 加载数据
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()

# 数据预处理
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0

# 类别名称
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']

# 构建模型
model = keras.Sequential([
# 卷积块 1
layers.Conv2D(64, 3, padding='same', input_shape=(32, 32, 3)),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Conv2D(64, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(2),
layers.Dropout(0.3),

# 卷积块 2
layers.Conv2D(128, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Conv2D(128, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(2),
layers.Dropout(0.4),

# 卷积块 3
layers.Conv2D(256, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Conv2D(256, 3, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(2),
layers.Dropout(0.5),

# 全连接层
layers.Flatten(),
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.Dense(10, activation='softmax')
])

# 编译模型
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

# 数据增强
datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True
)
datagen.fit(x_train)

# 训练模型
history = model.fit(
datagen.flow(x_train, y_train, batch_size=64),
epochs=50,
validation_data=(x_test, y_test),
callbacks=[
keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True),
keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
]
)

# 评估
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f"测试准确率: {test_acc:.4f}")

数据增强

数据增强可以扩充训练数据,提高模型泛化能力:

# 使用 ImageDataGenerator
datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range=20, # 随机旋转角度
width_shift_range=0.2, # 水平平移
height_shift_range=0.2, # 垂直平移
horizontal_flip=True, # 水平翻转
zoom_range=0.2, # 随机缩放
shear_range=0.2, # 剪切变换
fill_mode='nearest' # 填充方式
)

# 使用 Keras 预处理层
data_augmentation = keras.Sequential([
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
layers.RandomContrast(0.1),
])

# 在模型中使用
model = keras.Sequential([
layers.Input(shape=(32, 32, 3)),
data_augmentation,
layers.Conv2D(32, 3, activation='relu'),
# ... 其他层
])

迁移学习

使用预训练模型可以快速获得高性能:

# 加载预训练模型
base_model = keras.applications.ResNet50V2(
weights='imagenet',
include_top=False,
input_shape=(224, 224, 3)
)

# 冻结基础模型
base_model.trainable = False

# 构建新模型
model = keras.Sequential([
layers.Input(shape=(224, 224, 3)),
layers.Rescaling(1./255),
base_model,
layers.GlobalAveragePooling2D(),
layers.Dense(256, activation='relu'),
layers.Dropout(0.5),
layers.Dense(10, activation='softmax')
])

# 编译和训练
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

# 训练顶层
history = model.fit(train_dataset, epochs=10, validation_data=val_dataset)

# 解冻部分层进行微调
base_model.trainable = True
for layer in base_model.layers[:-20]:
layer.trainable = False

# 使用更小的学习率微调
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

history_fine = model.fit(train_dataset, epochs=10, validation_data=val_dataset)

可视化卷积特征

def visualize_feature_maps(model, image, layer_names):
"""可视化卷积层的特征图"""
feature_maps = []

for layer_name in layer_names:
layer = model.get_layer(layer_name)
feature_model = keras.Model(inputs=model.input, outputs=layer.output)
feature_map = feature_model.predict(image[np.newaxis, ...])
feature_maps.append(feature_map[0])

# 绘制特征图
for i, (name, fm) in enumerate(zip(layer_names, feature_maps)):
n_features = min(16, fm.shape[-1])
fig, axes = plt.subplots(4, 4, figsize=(8, 8))
fig.suptitle(f'Feature Maps: {name}')

for j, ax in enumerate(axes.flat):
if j < n_features:
ax.imshow(fm[:, :, j], cmap='viridis')
ax.axis('off')
plt.show()

# 使用示例
layer_names = ['conv2d', 'conv2d_1', 'conv2d_2']
visualize_feature_maps(model, x_test[0], layer_names)

小结

本章介绍了卷积神经网络的核心概念和实践:

  1. 卷积层:提取图像特征
  2. 池化层:降低空间维度
  3. BatchNormalization:加速训练
  4. 数据增强:提高泛化能力
  5. 迁移学习:利用预训练模型

CNN 是图像处理的基础架构,理解其原理对于构建视觉模型非常重要。下一章我们将学习模型保存和加载。