繁体   English   中英

使用 Imagenette 数据集的 VGG16 迁移学习精度为 0

[英]VGG16 transfer learning with Imagenette dataset accuracy is 0

我试图在多个位置的 VGG16 网络中间包含 SENet 模块,然后使用 Imagenette 数据集训练 model。 但是我的准确度为0。 我只是一个初学者用户,可以帮助我解决这个问题。

我已经定义了 senet 块,然后嵌入到多个位置创建了数据集,并且只取了其中的一部分以减少训练时间最后用训练和测试数据拟合 model

import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16 as Model
from tensorflow.keras.applications.vgg16 import preprocess_input

import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.preprocessing import image
from tensorflow.keras import backend as K
from vis.utils import utils
from tensorflow.keras.applications.vgg16 import decode_predictions
import json
import io
from tensorflow.keras import layers
import os

from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import GlobalAveragePooling2D, Reshape, Dense, Permute, Multiply

def reshape(img,label):
  img = tf.cast(img, tf.float32)
  img = tf.image.resize(img, (224,224))
  img = img/255.0
  return img, label
def reshape(img,label):
  img = tf.cast(img, tf.float32)
  img = tf.image.resize(img, (224,224))
  img = img/255.0
  return img, label
import tensorflow_datasets as tfds
imgn_dataset = tfds.builder('imagenette')

imgn_dataset.info.features['label'].num_classes == 10
imgn_dataset.download_and_prepare() 

datasets = imgn_dataset.as_dataset(as_supervised = True)

train_data, test_data = datasets['train'], datasets['validation']

train_data = train_data.map(reshape)
train_data = train_data.batch(128)
train_data = train_data.prefetch(tf.data.experimental.AUTOTUNE)

test_data = test_data.map(reshape)
test_data = test_data.batch(128)
test_data = test_data.prefetch(tf.data.experimental.AUTOTUNE)

# Load model
model = Model(weights='imagenet', include_top=True)
model.summary()

# Squeeze and Excitation
def se_block(input, channels, r=8):
    # Squeeze
    x = GlobalAveragePooling2D()(input)
    # Excitation
    x = Dense(channels//r, activation="relu")(x)
    x = Dense(channels, activation="sigmoid")(x)
    return Multiply()([input, x])

input_dim = (224,224,3)
img_input = layers.Input(shape = input_dim)

sen = se_block(img_input, 3)

vgg_model = model.get_layer("block1_conv1")(sen)
vgg_model = model.get_layer("block1_conv2")(vgg_model)
vgg_model = model.get_layer("block1_pool")(vgg_model)

sen = se_block(vgg_model, 64)

vgg_model = model.get_layer("block2_conv1")(sen)
vgg_model = model.get_layer("block2_conv2")(vgg_model)
vgg_model = model.get_layer("block2_pool")(vgg_model)

sen = se_block(vgg_model, 128)

vgg_model = model.get_layer("block3_conv1")(sen)
vgg_model = model.get_layer("block3_conv2")(vgg_model)
vgg_model = model.get_layer("block3_conv3")(vgg_model)
vgg_model = model.get_layer("block3_pool")(vgg_model)

sen = se_block(vgg_model, 256)

vgg_model = model.get_layer("block4_conv1")(sen)
vgg_model = model.get_layer("block4_conv2")(vgg_model)
vgg_model = model.get_layer("block4_conv3")(vgg_model)
vgg_model = model.get_layer("block4_pool")(vgg_model)

sen = se_block(vgg_model, 512)

vgg_model = model.get_layer("block5_conv1")(sen)
vgg_model = model.get_layer("block5_conv2")(vgg_model)
vgg_model = model.get_layer("block5_conv3")(vgg_model)
vgg_model = model.get_layer("block5_pool")(vgg_model)

sen = se_block(vgg_model, 512)

vgg_model = model.get_layer("flatten")(sen)
vgg_model = model.get_layer("fc1")(vgg_model)
vgg_model = model.get_layer("fc2")(vgg_model)


vgg_model = tf.keras.models.Model(img_input, vgg_model)

vgg_model.summary()

vgg_model.get_layer("block1_conv1").trainable = False
vgg_model.get_layer("block1_conv2").trainable = False
vgg_model.get_layer("block1_pool").trainable = False

vgg_model.get_layer("block2_conv1").trainable = False
vgg_model.get_layer("block2_conv1").trainable = False
vgg_model.get_layer("block2_pool").trainable = False

vgg_model.get_layer("block3_conv1").trainable = False
vgg_model.get_layer("block3_conv1").trainable = False
vgg_model.get_layer("block3_conv1").trainable = False
vgg_model.get_layer("block3_pool").trainable = False

vgg_model.get_layer("block4_conv1").trainable = False
vgg_model.get_layer("block4_conv1").trainable = False
vgg_model.get_layer("block4_conv1").trainable = False
vgg_model.get_layer("block4_pool").trainable = False

vgg_model.get_layer("block5_conv1").trainable = False
vgg_model.get_layer("block5_conv1").trainable = False
vgg_model.get_layer("block5_conv1").trainable = False
vgg_model.get_layer("block5_pool").trainable = False

vgg_model.get_layer("flatten").trainable = False
vgg_model.get_layer("fc1").trainable = False
vgg_model.get_layer("fc2").trainable = False

vgg_model.compile(optimizer = 'adam', loss ='mse', metrics=['accuracy'] )
vgg_model.summary()

train_data = train_data.take(1)
test_data = test_data.take(1)

vgg_model.fit(train_data, epochs=10, validation_data=test_data)

将损失 function 更改为“CategoricalCrossentropy”,从技术上讲,您可以使用均方误差 function 来最小化损失,但不能保证它会最小化。 另外,我看到你只使用了 10 个类,我建议重新训练密集层并将最后一层替换为 10 个单元。

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM