簡體   English   中英

Tensorflow:從頭開始實現實例規范

[英]Tensorflow: Implementing instance norm from scratch

  1. 是否可以從tf.contrib.layers.instance_norm獲得 mean 和 var ?
  2. 似乎這些實現給了我關於批量大小 1 的相同答案,但例如,對於批量大小 32 最大 abs diff 是2.1885605296772486 ,我是否錯過了與批量維度相關的內容?

代碼:

import tensorflow as tf
import numpy as np
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

np.random.seed(2019)


def get_data_batch():
    bs = 1
    h = 3
    w = 3
    c = 4

    x_np = np.random.rand(bs, h, w, c)
    print('x_np.shape', x_np.shape)
    return x_np


def my_instance_norm(x_np):
    print('='*60)
    with tf.Session() as sess:
        print('np.sum(x_np)', np.sum(x_np))

        x_tf = tf.convert_to_tensor(x_np)

        x_m, x_v = tf.nn.moments(x_tf, [0, 1, 2])

        x_std = tf.sqrt(x_v + 1e-8)
        x_normalized = (x_tf - x_m) / x_std

        x_normalized_np, x_m_np, x_v_np = sess.run(fetches=[x_normalized, x_m, x_v], feed_dict={x_tf: x_np})
        print('x_m_np.shape', x_m_np.shape)
        print('x_m_np:', x_m_np)
        print('x_v_np.shape', x_v_np.shape)
        print('x_v_np:', x_v_np)
        print('x_normalized_np.shape:', x_normalized_np.shape)
        print('x_normalized_np:', x_normalized_np)

        return x_normalized_np


def tensorflow_instance_norm(x_np):
    print('=' * 60)

    # https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/contrib/layers/instance_norm?hl=ca

    with tf.Session() as sess:
        print('np.sum(x_np)', np.sum(x_np))

        x_tf = tf.convert_to_tensor(x_np)

        z_tf = tf.contrib.layers.instance_norm(x_tf,
                                               center=True,
                                               scale=True,
                                               epsilon=1e-06,
                                               activation_fn=None,
                                               param_initializers=None,
                                               reuse=None,
                                               variables_collections=None,
                                               outputs_collections=None,
                                               trainable=True,
                                               data_format="NHWC",
                                               scope="instance_norm_scope")

        sess.run(tf.global_variables_initializer())
        z_np = sess.run(fetches=[z_tf], feed_dict={x_tf: x_np})[0]
        print('z_np.shape', z_np.shape)
        print('z_np', z_np)

        for item in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='instance_norm_scope'):
            print('-'*60)
            print(item.name)
            print(type(item))
            var = sess.run(item)
            print(var.shape)
            print(var)

        return z_np


x_np = get_data_batch()
out_1 = my_instance_norm(x_np)
out_2 = tensorflow_instance_norm(x_np)

print('abs diff:', np.max(np.fabs(out_1-out_2)))

輸出示例:

x_np.shape (1, 3, 3, 4)
============================================================
np.sum(x_np) 20.481414243908816
x_m_np.shape (4,)
x_m_np: [0.56814751 0.50049309 0.50423491 0.70283719]
x_v_np.shape (4,)
x_v_np: [0.08732506 0.05503668 0.07861654 0.09638579]
x_normalized_np.shape: (1, 3, 3, 4)
x_normalized_np: [[[[ 1.13477312 -0.45785613  0.42703584 -0.20923679]
   [ 1.05699812 -0.85814979  0.70603761  0.64539253]
   [ 1.05998571 -0.4038521  -0.18470326 -1.40361391]]

  [[-1.37147699  1.65696195 -1.26881563  0.90796148]
   [-1.81310153  0.06333216 -1.08102901  0.59000616]
   [-0.18452143  0.33166593 -0.73096354  0.43277698]]

  [[-0.14042819 -1.68649001 -0.80640672 -2.11376961]
   [-0.19989755 -0.11963737  1.42750286  0.77515475]
   [ 0.45766874  1.47402535  1.51134183  0.37532841]]]]
============================================================
np.sum(x_np) 20.481414243908816
z_np.shape (1, 3, 3, 4)
z_np [[[[ 1.13476668 -0.45785201  0.42703315 -0.20923572]
   [ 1.05699213 -0.85814207  0.70603317  0.64538921]
   [ 1.0599797  -0.40384847 -0.18470209 -1.40360671]]

  [[-1.37146922  1.65694705 -1.26880764  0.90795682]
   [-1.81309125  0.06333159 -1.0810222   0.59000313]
   [-0.18452038  0.33166295 -0.73095893  0.43277476]]

  [[-0.14042739 -1.68647484 -0.80640164 -2.11375875]
   [-0.19989642 -0.11963629  1.42749387  0.77515077]
   [ 0.45766614  1.47401209  1.51133232  0.37532648]]]]
------------------------------------------------------------
instance_norm_scope/beta:0
<class 'tensorflow.python.ops.variables.RefVariable'>
(4,)
[0. 0. 0. 0.]
------------------------------------------------------------
instance_norm_scope/gamma:0
<class 'tensorflow.python.ops.variables.RefVariable'>
(4,)
[1. 1. 1. 1.]
abs diff: 1.5168087757144733e-05

固定版本: max abs diff: 4.440892098500626e-16

import tensorflow as tf
import numpy as np
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

np.random.seed(2019)

EPS = 1e-3

print('tf.__version__', tf.__version__)

def get_data_batch():
    bs = 2
    h = 3
    w = 3
    c = 4

    x_np = np.random.rand(bs, h, w, c)
    print('x_np.shape', x_np.shape)
    return x_np


def my_instance_norm(x_np):
    print('='*60)
    with tf.Session() as sess:
        print('np.sum(x_np)', np.sum(x_np))

        x_tf = tf.convert_to_tensor(x_np)

        x_m, x_v = tf.nn.moments(x_tf, [1, 2], keep_dims=True)
        x_std = tf.sqrt(x_v + EPS)
        x_normalized = (x_tf - x_m) / x_std

        x_normalized_np, x_m_np, x_v_np = sess.run(fetches=[x_normalized, x_m, x_v], feed_dict={x_tf: x_np})
        print('x_m_np.shape', x_m_np.shape)
        print('x_m_np:', x_m_np)
        print('x_v_np.shape', x_v_np.shape)
        print('x_v_np:', x_v_np)
        print('x_normalized_np.shape:', x_normalized_np.shape)
        print('x_normalized_np:', x_normalized_np)

        return x_normalized_np


def tensorflow_instance_norm(x_np):
    print('=' * 60)

    # https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/contrib/layers/instance_norm?hl=ca

    with tf.Session() as sess:
        print('np.sum(x_np)', np.sum(x_np))

        x_tf = tf.convert_to_tensor(x_np)

        z_tf = tf.contrib.layers.instance_norm(x_tf,
                                               center=True,
                                               scale=True,
                                               epsilon=EPS,
                                               activation_fn=None,
                                               param_initializers=None,
                                               reuse=None,
                                               variables_collections=None,
                                               outputs_collections=None,
                                               trainable=True,
                                               data_format="NHWC",
                                               scope="instance_norm_scope")

        sess.run(tf.global_variables_initializer())
        z_np = sess.run(fetches=[z_tf], feed_dict={x_tf: x_np})[0]
        print('z_np.shape', z_np.shape)
        print('z_np', z_np)

        for item in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='instance_norm_scope'):
            print('-'*60)
            print(item.name)
            print(type(item))
            var = sess.run(item)
            print(var.shape)
            print(var)

        return z_np


def run_test():
    x_np = get_data_batch()
    out_1 = my_instance_norm(x_np)
    out_2 = tensorflow_instance_norm(x_np)
    print('max abs diff:', np.max(np.fabs(out_1-out_2)))


run_test()

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM