[英]VGG16 Network for Multiple Inputs Images
我正在尝试将 VGG16 网络用于多个输入图像。 使用带有 2 个输入的简单 CNN 训练这个 model 给了我一个 acc。 大约 50 %,这就是为什么我想使用已建立的 model (如 VGG16)来尝试它的原因。
这是我尝试过的:
# imports
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
def def_model():
model = VGG16(include_top=False, input_shape=(224, 224, 3))
# mark loaded layers as not trainable
for layer in model.layers:
layer.trainable = False
# return last pooling layer
pool_layer = model.layers[-1].output
return pool_layer
m1 = def_model()
m2 = def_model()
m3 = def_model()
# add classifier layers
merge = concatenate([m1, m2, m3])
# optinal_conv = Conv2D(64, (3, 3), activation='relu', padding='same')(merge)
# optinal_pool = MaxPooling2D(pool_size=(2, 2))(optinal_conv)
# flatten = Flatten()(optinal_pool)
flatten = Flatten()(merge)
dense1 = Dense(512, activation='relu')(flatten)
dense2 = Dropout(0.5)(dense1)
output = Dense(1, activation='sigmoid')(dense2)
inshape1 = Input(shape=(224, 224, 3))
inshape2 = Input(shape=(224, 224, 3))
inshape3 = Input(shape=(224, 224, 3))
model = Model(inputs=[inshape1, inshape2, inshape3], outputs=output)
Model
function 时收到此错误。ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_21:0", shape=(?, 224, 224, 3), dtype=float32) at layer "input_21". The following previous layers were accessed without issue: []`
我知道该图是断开连接的,但我找不到在哪里。
这是compile
和fit
函数。
# compile model
model.compile(optimizer="Adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit([train1, train2, train3], train,
validation_data=([test1, test2, test3], ytest))
optinal_conv
和optinal_pool
。 在concatenate
function 之后应用Conv2D
和MaxPooling2D
会产生什么影响?我建议看到这个答案Multi-input Multi-output Model with Keras Functional API 。 这是实现此目的的一种方法:
# 3 inputs
input0 = tf.keras.Input(shape=(224, 224, 3), name="img0")
input1 = tf.keras.Input(shape=(224, 224, 3), name="img1")
input2 = tf.keras.Input(shape=(224, 224, 3), name="img2")
concate_input = tf.keras.layers.Concatenate()([input0, input1, input2])
# get 3 feature maps with same size (224, 224)
# pretrained models needs that
input = tf.keras.layers.Conv2D(3, (3, 3),
padding='same', activation="relu")(concate_input)
# pass that to imagenet model
vg = tf.keras.applications.VGG16(weights=None,
include_top = False,
input_tensor = input)
# do whatever
gap = tf.keras.layers.GlobalAveragePooling2D()(vg.output)
den = tf.keras.layers.Dense(1, activation='sigmoid')(gap)
# build the complete model
model = tf.keras.Model(inputs=[input0, input1, input2], outputs=den)
def model_1_VGG16():
vgg16 = VGG16(include_top=False, input_shape=(32, 32, 3))
vgg16._name = 'VGG16_1
for layer in vgg16.layers:
layer.trainable = False
model = Sequential(name='Sequential_1')
model.add(vgg16)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(units=50, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10, activation='relu'))
return model
def model_2_VGG16():
vgg16 = VGG16(include_top=False, input_shape=(32, 32, 3))
vgg16._name = 'VGG16_2'
for layer in vgg16.layers:
layer.trainable = False
model = Sequential(name='Sequential_2')
model.add(vgg16)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10, activation='relu'))
return model
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.