簡體   English   中英

如何使用 Tensorflow tf.nn.conv2 制作卷積層?

[英]How do I use Tensorflow tf.nn.conv2 to make a convolutional layer?

使用tf.nn.conv2d ,您可以對張量執行卷積運算。 例如

import tensorflow as tf

x = tf.random.uniform(shape=(1, 224, 224, 3), dtype=tf.float32)

filters = tf.random.uniform(shape=(1, 3, 3, 10))

tf.nn.conv2d(input=x, filters=filters, strides=1, padding='VALID')
<tf.Tensor: shape=(1, 224, 222, 10), dtype=float32, numpy=
array([[[[2.1705112, 1.2065555, 1.7674012, ..., 1.705754 , 1.3659815,
          1.7028458],
         [2.0048866, 1.4835871, 1.2038497, ..., 1.8981357, 1.4605963,
          2.148876 ],
         [2.4999123, 1.856892 , 1.0806457, ..., 2.270382 , 1.5633923,
          1.5280294],
         ...,
         [3.2492838, 1.9597337, 2.3294296, ..., 2.8038855, 2.1928523,
          3.065394 ],
         [2.5742679, 1.4919059, 1.4522426, ..., 2.158071 , 1.9074411,
          2.2769275],
         [2.8084617, 2.315342 , 1.554437 , ..., 2.2483544, 2.0936842,
          1.997768 ]]]], dtype=float32)>

但是過濾器不是學習的,也不是自動調整的。 它們需要提前指定。 如何在具有學習權重的自定義 Keras 層中使用此操作?

當您將tf.keras.layers.Layer子類化時, model 將跟蹤內部的所有tf.Variable作為可訓練變量。 然后您需要做的是創建一個具有卷積濾波器形狀的tf.Variable ,這些變量將在訓練期間適應任務(即學習)。 過濾器需要這個輸入形狀:

(filter_height, filter_width, in_channels, out_channels)

這個tf.keras.layers.Layer object 的行為與 CNN 中的 Keras 卷積層完全相同:

class CustomLayer(tf.keras.layers.Layer):
    def __init__(self, filters, kernel_size, padding, strides, activation,
                 kernel_initializer, bias_initializer, use_bias):
        super(CustomLayer, self).__init__()
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.padding = padding
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer
        self.strides = strides
        self.use_bias = use_bias
        self.w = None
        self.b = None

    def build(self, input_shape):
        *_, n_channels = input_shape
        self.w = tf.Variable(
            initial_value=self.kernel_initializer(shape=(*self.kernel_size,
                                                         n_channels,
                                                         self.filters),
                                 dtype='float32'), trainable=True)
        if self.use_bias:
            self.b = tf.Variable(
                initial_value=self.bias_initializer(shape=(self.filters,), dtype='float32'),
                trainable=True)

    def call(self, inputs, training=None):
        x =  tf.nn.conv2d(inputs, filters=self.w, strides=self.strides, padding=self.padding)
        if self.use_bias:
            x = x + self.b
        x = self.activation(x)
        return x

您可以看到權重是tf.nn.conv2d操作的過濾器,即tf.Variable ,因此它們是將通過 model 訓練更新的權重。

如果您運行整個腳本,您將看到它執行與 Keras 卷積層完全相同的任務。

import tensorflow as tf
tf.random.set_seed(42)

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()

train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))

rescale = lambda x, y: (tf.divide(tf.expand_dims(x, axis=-1), 255), y)

AUTOTUNE = tf.data.experimental.AUTOTUNE

train_ds = train_ds.map(rescale).\
    shuffle(128, reshuffle_each_iteration=False, seed=11).\
    batch(8).\
    prefetch(AUTOTUNE)
test_ds = test_ds.map(rescale).\
    shuffle(128, reshuffle_each_iteration=False, seed=11).\
    batch(8).\
    prefetch(AUTOTUNE)


class CustomLayer(tf.keras.layers.Layer):
    def __init__(self, filters, kernel_size, padding, strides, activation,
                 kernel_initializer, bias_initializer, use_bias):
        super(CustomLayer, self).__init__()
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.padding = padding
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer
        self.strides = strides
        self.use_bias = use_bias
        self.w = None
        self.b = None

    def build(self, input_shape):
        *_, n_channels = input_shape
        self.w = tf.Variable(
            initial_value=self.kernel_initializer(shape=(*self.kernel_size,
                                                         n_channels,
                                                         self.filters),
                                 dtype='float32'), trainable=True)
        if self.use_bias:
            self.b = tf.Variable(
                initial_value=self.bias_initializer(shape=(self.filters,), 
                                                    dtype='float32'),
                trainable=True)

    def call(self, inputs, training=None):
        x =  tf.nn.conv2d(inputs, filters=self.w, strides=self.strides, 
                          padding=self.padding)
        if self.use_bias:
            x = x + self.b
        x = self.activation(x)
        return x


class ModelWithCustomConvLayer(tf.keras.Model):
    def __init__(self, conv_layer):
        super(ModelWithCustomConvLayer, self).__init__()
        self.conv1 = conv_layer(filters=16,
                                kernel_size=(3, 3),
                                strides=(1, 1),
                                activation=tf.nn.relu,
                                padding='VALID',
                                kernel_initializer=tf.initializers.GlorotUniform(seed=42),
                                bias_initializer=tf.initializers.Zeros(),
                                use_bias=True)
        self.maxp = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
        self.conv2 = conv_layer(filters=32,
                                kernel_size=(3, 3),
                                strides=(1, 1),
                                activation=tf.nn.relu,
                                padding='VALID',
                                kernel_initializer=tf.initializers.GlorotUniform(seed=42),
                                bias_initializer=tf.initializers.Zeros(),
                                use_bias=True)
        self.flat = tf.keras.layers.Flatten()
        self.dense1 = tf.keras.layers.Dense(32, activation='relu',
                            kernel_initializer=tf.initializers.GlorotUniform(seed=42))
        self.dense2 = tf.keras.layers.Dense(10, activation='softmax',
                            kernel_initializer=tf.initializers.GlorotUniform(seed=42))

    def call(self, inputs, training=None, mask=None):
        x = self.conv1(inputs)
        x = self.maxp(x)
        x = self.conv2(x)
        x = self.maxp(x)
        x = self.flat(x)
        x = self.dense1(x)
        x = self.dense2(x)
        return x


custom = ModelWithCustomConvLayer(CustomLayer)
custom.compile(loss=tf.losses.SparseCategoricalCrossentropy(), optimizer='adam',
               metrics=tf.metrics.SparseCategoricalAccuracy())
custom.build(input_shape=next(iter(train_ds))[0].shape)
custom.summary()

normal = ModelWithCustomConvLayer(tf.keras.layers.Conv2D)
normal.compile(loss=tf.losses.SparseCategoricalCrossentropy(), optimizer='adam',
               metrics=tf.metrics.SparseCategoricalAccuracy())
normal.build(input_shape=next(iter(train_ds))[0].shape)
normal.summary()

history_custom = custom.fit(train_ds, validation_data=test_ds, epochs=25,
                            steps_per_epoch=10, verbose=0)
history_normal = normal.fit(train_ds, validation_data=test_ds, epochs=25,
                            steps_per_epoch=10, verbose=0)

import matplotlib.pyplot as plt

plt.plot(history_custom.history['loss'], color='red', alpha=.5, lw=4)
plt.plot(history_custom.history['sparse_categorical_accuracy'],
         color='blue', alpha=.5, lw=4)
plt.plot(history_custom.history['val_loss'], color='green', alpha=.5, lw=4)
plt.plot(history_custom.history['val_sparse_categorical_accuracy'],
         color='orange', alpha=.5, lw=4)

plt.plot(history_normal.history['loss'], ls=':', color='red')
plt.plot(history_normal.history['sparse_categorical_accuracy'], ls=':',
         color='blue')
plt.plot(history_normal.history['val_loss'], ls=':', color='green')
plt.plot(history_normal.history['val_sparse_categorical_accuracy'], ls=':',
         color='orange')
plt.legend(list(map(lambda x: 'custom_' + x, list(history_custom.history.keys()))) +
           list(map(lambda x: 'keras_' + x, list(history_normal.history.keys()))))
plt.title('Custom Conv Layer vs Keras Conv Layer')
plt.show()

虛線表示使用 Keras 層時的 model 性能,實線是使用我使用tf.nn.conv2d的自定義層時的性能。 當種子被設置時,它們是完全相同的東西

在此處輸入圖像描述

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM