简体   繁体   English

如何在Tensorflow中获取张量的值

[英]How to get the value of the tensor in Tensorflow

I am training to execute the CNN algorithm on medical data (images) and I need to recover the value of the tensor of the last layer to perform other calculations.我正在训练在医疗数据(图像)上执行 CNN 算法,我需要恢复最后一层的张量值以执行其他计算。

def _create_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1):

inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel])  # shape=(?, 32, 32, 1)

#inputX= tf.keras.backend.reshape(X, [-1, image_z, image_width, image_height, image_channel])

#print('inputs', inputX.shape)
# Vnet model
# layer1->convolution
layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop,
                           scope='layer0')
layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop,
                           scope='layer1')
layer1 = resnet_Add(x1=layer0, x2=layer1)
# down sampling1
down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1')
# layer2->convolution
layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop,
                           scope='layer2_1')
layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop,
                           scope='layer2_2')
layer2 = resnet_Add(x1=down1, x2=layer2)
# down sampling2
down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2')
# layer3->convolution
layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_1')
layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_2')
layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_3')
layer3 = resnet_Add(x1=down2, x2=layer3)
# down sampling3
down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3')
# layer4->convolution
layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_1')
layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_2')
layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_3')
layer4 = resnet_Add(x1=down3, x2=layer4)
# down sampling4
down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4')
# layer5->convolution
layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_1')
layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_2')
layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_3')
layer5 = resnet_Add(x1=down4, x2=layer5)

# layer9->deconvolution
deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1')
# layer8->convolution
layer6 = crop_and_concat(layer4, deconv1)
_, Z, H, W, _ = layer4.get_shape().as_list()
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_1')
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_2')
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_3')
layer6 = resnet_Add(x1=deconv1, x2=layer6)
# layer9->deconvolution
deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2')
# layer8->convolution
layer7 = crop_and_concat(layer3, deconv2)
_, Z, H, W, _ = layer3.get_shape().as_list()
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_1')
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_2')
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_3')
layer7 = resnet_Add(x1=deconv2, x2=layer7)
# layer9->deconvolution
deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3')
# layer8->convolution
layer8 = crop_and_concat(layer2, deconv3)
_, Z, H, W, _ = layer2.get_shape().as_list()
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_1')
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_2')
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_3')
layer8 = resnet_Add(x1=deconv3, x2=layer8)
# layer9->deconvolution
deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4')
# layer8->convolution
layer9 = crop_and_concat(layer1, deconv4)
_, Z, H, W, _ = layer1.get_shape().as_list()
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_1')
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_2')
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_3')
layer9 = resnet_Add(x1=deconv4, x2=layer9)
# layer14->output
output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 32, n_class), scope='output')
y =tf.shape(output_map)
#print('output map shape of output', y)

sess = tf.InteractiveSession()
print(output_map.eval())

'''with tf.Session() as s:
         tf.initialize_all_variables().run()
         xx= tf.rank(output_map)
         print ('rank_output_map is ',s.run(xx))'''


return output_map

I used the two methods to get the value of the tensor:我使用了两种方法来获取张量的值:

  1. tensor.eval ()张量.eval()
  2. session.run (tensor) session.run(张量)

But I have the same error if you can help me.但如果你能帮助我,我有同样的错误。 enter image description here在此处输入图片说明

You can just perform sess.run on the tensor to get the values.您可以对张量执行 sess.run 以获取值。 First you need the tensor.首先你需要张量。 You can give it a name inside build_model by adding a name argument (which you can do for any tensor), eg:您可以通过添加名称参数(您可以为任何张量执行此操作)在 build_model 中为其命名,例如:

Layer_name = tf.add(tf.multiply(Flat, W1), b1, name="Layer_name")

Later, you can get the tensor for the layer and evaluate it:稍后,您可以获取该层的张量并对其进行评估:

with tf.Session() as sess:
    Layer_name = tf.get_default_graph().get_tensor_by_name('Layer_name:0')
    FC1_values = sess.run(Layer_name, feed_dict={x: input_img_arr})

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM