简体   繁体   English

Tensor转换请求dtype float32的dtype float64

[英]Tensor conversion requested dtype float64 for Tensor with dtype float32

I have this error: 我有这个错误:

ValueError: Tensor conversion requested dtype float64 for Tensor with dtype float32: 'Tensor("pearson_r/covariance/Cast:0", shape=(), dtype=float32)'

My code is as follows: 我的代码如下:

def correlation_coefficient(y_true, y_pred):
    return tf.contrib.metrics.streaming_pearson_correlation(y_pred, y_true)[1]


def construct_embedding():
    # load the whole embedding into memory
    embeddings_index = dict()
    f = open('../input/embedding/word_embedding50d.txt')
    for line in f:
        values = line.split()
        word = values[0]
        coefs = asarray(values[1:], dtype='float32')
        embeddings_index[word] = coefs
    f.close()
    print('Loaded %s word vectors.' % len(embeddings_index))
    return embeddings_index




# define the model
def define_model(length, vocab_size, embedding_matrix):
    # channel 1
    inputs1 = Input(shape=(length,))
    embedding1 = Embedding(vocab_size, 50, weights=[embedding_matrix],trainable=False)(inputs1)
    conv1 = Conv1D(filters=350, kernel_size=3, activation='relu')(embedding1)
    drop1 = Dropout(0.2)(conv1)
    nor11=keras.layers.BatchNormalization()(drop1)

    pool1 = MaxPooling1D(pool_size=5)(nor11)
    pdrop1 = Dropout(0.2)(pool1)
    nor12=keras.layers.BatchNormalization()(pdrop1)

    ls1=LSTM(200)(nor12)
    ldrop1 = Dropout(0.2)(ls1)
    lnor1=keras.layers.BatchNormalization()(ldrop1)

    # channel 2
    inputs2 = Input(shape=(length,))
    embedding2 = Embedding(vocab_size, 50, weights=[embedding_matrix],trainable=False)(inputs2)
    conv2 = Conv1D(filters=350, kernel_size=4, activation='relu')(embedding2)
    drop2 = Dropout(0.2)(conv2)
    nor21=keras.layers.BatchNormalization()(drop2)

    pool2 = MaxPooling1D(pool_size=5)(nor21)
    pdrop2 = Dropout(0.2)(pool2) 
    nor22=keras.layers.BatchNormalization()(pdrop2)

    ls2=LSTM(200)(nor22)
    ldrop2 = Dropout(0.2)(ls2)
    lnor2=keras.layers.BatchNormalization()(ldrop2)


    # channel 3
    inputs3 = Input(shape=(length,))
    embedding3 = Embedding(vocab_size, 50, weights=[embedding_matrix],trainable=False)(inputs3)
    conv3 = Conv1D(filters=350, kernel_size=5, activation='relu')(embedding3)
    drop3 = Dropout(0.2)(conv3)
    nor31=keras.layers.BatchNormalization()(drop3)

    pool3 = MaxPooling1D(pool_size=5)(nor31)
    pdrop3 = Dropout(0.2)(pool3) 
    nor32=keras.layers.BatchNormalization()(pdrop3)


    ls3=LSTM(250)(nor32)
    ldrop3 = Dropout(0.2)(ls3)
    lnor3=keras.layers.BatchNormalization()(ldrop3)


    # merge
    merged=concatenate([lnor1, lnor2, lnor3])
    # interpretation
    dense1 = Dense(100, activation='elu')(merged)
    nor4=keras.layers.BatchNormalization()(dense1)


    outputs = Dense(1, activation='elu')(nor4)
    noroutputs=keras.layers.BatchNormalization()(outputs)
    model = Model(inputs=[inputs1, inputs2, inputs3], outputs=noroutputs)
    model.load_weights("../input/bestweight/bestweights.hdf5")
    # compile
    model.compile(loss='mse', optimizer=optimizers.Adam(lr=0.003), metrics=[correlation_coefficient, 'accuracy'])
    K.get_session().run(tf.local_variables_initializer())

    # summarize 
    print(model.summary())
    plot_model(model, show_shapes=True, to_file='multichannel.png')

    return model

# load a clean dataset
def load_dataset(filename):
    return load(open(filename, 'rb'))

#preprocessing text
def preprocess(lines):
    #print(lines)      
    ps = PorterStemmer() 
    for i in range(len(lines)):
        tokens = lines[i].split() 
        # filter out stop words then stem the remaining words
        stop_words = set(stopwords.words('english'))    
        tokens = [ps.stem(w) for w in tokens if not w in stop_words]    
        lines[i]=' '.join(tokens)  
    #print('lines: ')
    #print(lines)
    return lines



# encode a list of lines
def encode_text(tokenizer, lines, length):  
    # integer encode
    encoded = tokenizer.texts_to_sequences(lines)
    # pad encoded sequences
    padded = pad_sequences(encoded, maxlen=length, padding='post')
    return padded


# fit a tokenizer
def create_tokenizer(lines):
    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(lines)
    return tokenizer

# calculate the maximum document length
def max_length(lines):
    return max([len(s.split()) for s in lines])

# encode a list of lines
def encode_text(tokenizer, lines, length):
    # integer encode
    encoded = tokenizer.texts_to_sequences(lines)
    # pad encoded sequences
    padded = pad_sequences(encoded, maxlen=length, padding='post')
    return padded

def embed (vocab_size, embeddings_index, t):
    embedding_matrix = zeros((vocab_size, 50))
    for word, i in t.word_index.items():
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector
    return embedding_matrix


# define training data
f = pd.read_csv('../input/satexasdataset/texasDatasetForHAN.csv', encoding='ISO-8859-1')
ftest = pd.read_csv('../input/satexasdataset/testtexasDatasetForHAN.csv', encoding='ISO-8859-1')

train=[]
test=[]

#print(f.read())
data_train = pd.DataFrame(data=f) 

for i in range(data_train.shape[0]):
    train.append(data_train.manswer[i]+ ' ' +data_train.sanswer[i])

trainLabels=data_train.score
Lines=pd.DataFrame(train, columns=['train'])
trainLines=Lines.train
trainLines=preprocess(trainLines)

data_test = pd.DataFrame(data=ftest) 
for i in range(data_test .shape[0]):
    test.append(data_test.manswer[i] + ' '+data_test.sanswer[i])

testLabels=data_test.score
tLines=pd.DataFrame(test, columns=['test'])
testLines=tLines.test
testLines=preprocess(testLines)

mergedLines = [trainLines , testLines]
allmerged = pd.concat(mergedLines)

# create tokenizer
tokenizer = create_tokenizer(allmerged.str.lower())


# calculate max document length
length = max_length(allmerged)

# calculate vocabulary size
vocab_size = len(tokenizer.word_index) + 1


print('Max answerlength: %d' % length)
print('Vocabulary size: %d' % vocab_size)

# encode data
alldataX = encode_text(tokenizer, allmerged, length)


s=(trainLines.size)
trainX=alldataX[0:s]
testX=alldataX[s:]



print(trainX.shape,  testX.shape)

embeddings_index=construct_embedding()
embedding_matrix=embed (vocab_size, embeddings_index, tokenizer)


    # define model
model = define_model(length, vocab_size, embedding_matrix)

ynew = model.predict([testX,testX,testX])
corr=correlation_coefficient(ynew, array(testLabels))
print('Test Correlation: %f' % (cor))

The complete error msg is: 完整的错误消息是:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-4-3ce58b1f4f26> in <module>
    274 #testLabels = tf.convert_to_tensor(array(testLabels), dtype=tf.float64)
    275 
--> 276 corr=correlation_coefficient(ynew, array(testLabels))
    277 print('Test Correlation: %f' % (cor*100))
    278 

<ipython-input-4-3ce58b1f4f26> in correlation_coefficient(y_true, y_pred)
     45 
     46 def correlation_coefficient(y_true, y_pred):
---> 47     return tf.contrib.metrics.streaming_pearson_correlation(y_pred, y_true)[1]
     48 
     49 

/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/metrics/python/ops/metric_ops.py in streaming_pearson_correlation(predictions, labels, weights, metrics_collections, updates_collections, name)
   3334       weights = weights_broadcast_ops.broadcast_weights(weights, labels)
   3335     cov, update_cov = streaming_covariance(
-> 3336         predictions, labels, weights=weights, name='covariance')
   3337     var_predictions, update_var_predictions = streaming_covariance(
   3338         predictions, predictions, weights=weights, name='variance_predictions')

/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/metrics/python/ops/metric_ops.py in streaming_covariance(predictions, labels, weights, metrics_collections, updates_collections, name)
   3218     # batch_mean_prediction is E[x_B] in the update equation
   3219     batch_mean_prediction = math_ops.div_no_nan(
-> 3220         math_ops.reduce_sum(weighted_predictions), batch_count)
   3221     delta_mean_prediction = math_ops.div_no_nan(
   3222         (batch_mean_prediction - mean_prediction) * batch_count, update_count)

/opt/conda/lib/python3.6/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    178     """Call target, and fall back on dispatchers if there is a TypeError."""
    179     try:
--> 180       return target(*args, **kwargs)
    181     except (TypeError, ValueError):
    182       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in div_no_nan(x, y, name)
   1095   with ops.name_scope(name, "div_no_nan", [x, y]) as name:
   1096     x = ops.convert_to_tensor(x, name="x")
-> 1097     y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
   1098     x_dtype = x.dtype.base_dtype
   1099     y_dtype = y.dtype.base_dtype

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype, dtype_hint)
   1085   preferred_dtype = deprecation.deprecated_argument_lookup(
   1086       "dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
-> 1087   return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
   1088 
   1089 

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor_v2(value, dtype, dtype_hint, name)
   1143       name=name,
   1144       preferred_dtype=dtype_hint,
-> 1145       as_ref=False)
   1146 
   1147 

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx, accept_symbolic_tensors, accept_composite_tensors)
   1222 
   1223     if ret is None:
-> 1224       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1225 
   1226     if ret is NotImplemented:

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
   1016     raise ValueError(
   1017         "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
-> 1018         (dtype.name, t.dtype.name, str(t)))
   1019   return t
   1020 

ValueError: Tensor conversion requested dtype float64 for Tensor with dtype float32: 'Tensor("pearson_r/covariance/Cast:0", shape=(), dtype=float32)'

Replace this line: 替换此行:

testLabels = data_test.score

With: 带有:

testLabels = data_test.score.astype(np.float32)

That way both parameters to correlation_coefficient will be float32 . 这样, correlation_coefficient两个参数都将是float32

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

相关问题 ValueError:张量转换为具有 dtype float32 的张量请求 dtype float32_ref - ValueError: Tensor conversion requested dtype float32_ref for Tensor with dtype float32 使用dtype float32(lambda输入)的Tensor的Tensor转换请求dtype字符串 - Tensor conversion requested dtype string for Tensor with dtype float32 (lambda input) 自己的数据集ValueError:使用dtype float32的Tensor的Tensor转换请求了dtype字符串 - Own dataset ValueError: Tensor conversion requested dtype string for Tensor with dtype float32 ValueError: Tensor 转换请求 dtype int32 for Tensor with dtype float32 - LSTM Implementation(tensorflow 2.0.0) - ValueError: Tensor conversion requested dtype int32 for Tensor with dtype float32 - LSTM Implementation( tensorflow 2.0.0) Tensorflow 2.0 警告-dense_features 正在将输入张量从 dtype float64 转换为 float32 层的 dtype - Tensorflow 2.0 Warnings - dense_features is casting an input tensor from dtype float64 to the layer's dtype of float32 警告:tensorflow:Layer my_model 正在将输入张量从 dtype float64 转换为 float32 层的 dtype,这是 TensorFlow 2 中的新行为 - WARNING:tensorflow:Layer my_model is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2 TensorFlow:将float64张量强制转换为float32 - TensorFlow: cast a float64 tensor to float32 如何将 dtype= complex64 的 3D 张量转换为 tensorflow 中 dtype = float32 的 3D 张量 - How to convert a 3D tensor of dtype= complex64 to 3D tensor of dtype = float32 in tensorflow ValueError: Tensor(&quot;ExponentialDecay_4:0&quot;, shape=(), dtype=float32) - ValueError: Tensor("ExponentialDecay_4:0", shape=(), dtype=float32) Tensorflow 图像生成器通过 dtype=string 的张量而不是 dtype=float32 的张量来丢失 function - Tensorflow Image Generator passing Tensor with dtype=string instead of Tensor with dtype=float32 to loss function
 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM