[英]How to concatenate two models in keras?
我想使用這個 model 但我們不能再使用合並了。
image_model = Sequential([
Dense(embedding_size, input_shape=(2048,), activation='relu'),
RepeatVector(max_len)
])
caption_model = Sequential([
Embedding(vocab_size, embedding_size, input_length=max_len),
LSTM(256, return_sequences=True),
TimeDistributed(Dense(300))
])
final_model = Sequential([
Merge([image_model, caption_model], mode='concat', concat_axis=1),
Bidirectional(LSTM(256, return_sequences=False)),
Dense(vocab_size),
Activation('softmax')
])
我用以下方式重寫了這個,不包括final_model:
image_in = Input(shape=(2048,))
caption_in = Input(shape=(max_len, vocab_size))
merged = concatenate([image_model(image_in), caption_model(caption_in)],axis=0)
latent = Bidirectional(LSTM(256, return_sequences=False))(merged)
out = Dense(vocab_size, activation='softmax')(latent)
final_model = Model([image_in, caption_in], out)
final_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
final_model.summary()
這也給了我:
ValueError: "input_length" is 40, but received input has shape (None, 40, 8256).
任何人都可以幫助修復它嗎? 來源: https://github.com/yashk2810/Image-Captioning/blob/master/Image%20Captioning%20InceptionV3.ipynb
您應該將 caption_in 定義為 2D: Input(shape=(max_len,))
。 在您的情況下,必須在最后一個軸上操作連接: axis=-1
。 rest 似乎還可以
embedding_size=300
max_len=40
vocab_size=8256
image_model = Sequential([
Dense(embedding_size, input_shape=(2048,), activation='relu'),
RepeatVector(max_len)
])
caption_model = Sequential([
Embedding(vocab_size, embedding_size, input_length=max_len),
LSTM(256, return_sequences=True),
TimeDistributed(Dense(300))
])
image_in = Input(shape=(2048,))
caption_in = Input(shape=(max_len,))
merged = concatenate([image_model(image_in), caption_model(caption_in)],axis=-1)
latent = Bidirectional(LSTM(256, return_sequences=False))(merged)
out = Dense(vocab_size, activation='softmax')(latent)
final_model = Model([image_in, caption_in], out)
final_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
final_model.summary()
正如 Marco 所指出的,這個問題與input_length
參數有關。 您可以像這樣加入這兩個模型:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
import tensorflow as tf
from numpy.random import randint
embedding_size = 300
max_len = 40
vocab_size = 8256
image_model = Sequential([
Dense(embedding_size, input_shape=(2048,), activation='relu'),
RepeatVector(max_len)
])
caption_model = Sequential([
Embedding(vocab_size, embedding_size, input_length=max_len),
LSTM(256, return_sequences=True),
TimeDistributed(Dense(300))
])
class MyModel(tf.keras.Model):
def __init__(self, image, caption):
super(MyModel, self).__init__()
self.image = image
self.caption = caption
self.concatenate = Concatenate()
self.lstm = Bidirectional(LSTM(256, return_sequences=False))
self.dense = Dense(vocab_size, activation='softmax')
def call(self, inputs, training=None, **kwargs):
a = self.image(inputs['image'])
b = self.caption(inputs['caption'])
x = self.concatenate([a, b])
x = self.lstm(x)
x = self.dense(x)
return x
model = MyModel(image_model, caption_model)
model({'image': randint(0, 10, (1, 2048)),
'caption': randint(0, 100, (1, 40))})
<tf.Tensor: shape=(1, 8256), dtype=float32, numpy=
array([[0.00011554, 0.00014183, 0.00011184, ..., 0.0001064 , 0.00014344,
0.00012491]], dtype=float32)>
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.