简体   繁体   English

如何在 keras 中实现高斯噪声攻击?

[英]how can i implement Gaussian noise attack in keras?

I try to implement the code of https://arxiv.org/abs/1810.07248 that is about watermarking using deep learning, but I need to apply some attacks like Gaussian noise during learning.我尝试实现https://arxiv.org/abs/1810.07248的代码,该代码是关于使用深度学习进行水印的,但我需要在学习过程中应用一些攻击,例如高斯噪声。 I used GaussianNoise layer but it said it only used during training.我使用了 GaussianNoise 层,但它说它只在训练期间使用。 so, I am confused this means when I want to test my network this noise layer does not work?所以,我很困惑,这意味着当我想测试我的网络时,这个噪声层不起作用吗? what should I do if I want to use Gaussian noise layer?如果我想使用高斯噪声层应该怎么做? how can I implement it?我该如何实施? I also need other attacks like cropping that I do not know how can I implement them in layers:((我还需要其他攻击,例如裁剪,我不知道如何在层中实现它们:((

from keras.layers import Input, Concatenate, GaussianNoise,Dropout,BatchNormalization
from keras.layers import Conv2D, AtrousConv2D
from keras.models import Model
from keras.datasets import mnist
from keras.callbacks import TensorBoard
from keras import backend as K
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import keras as Kr
from keras.optimizers import SGD,RMSprop,Adam
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import numpy as np
import pylab as pl
import matplotlib.cm as cm
import keract
from matplotlib import pyplot
from keras import optimizers
from keras import regularizers

from tensorflow.python.keras.layers import Lambda;
#-----------------building w train---------------------------------------------
w_expand=np.zeros((49999,28,28),dtype='float32')
wv_expand=np.zeros((9999,28,28),dtype='float32')
wt_random=np.random.randint(2, size=(49999,4,4))
wt_random=wt_random.astype(np.float32)
wv_random=np.random.randint(2, size=(9999,4,4))
wv_random=wv_random.astype(np.float32)
w_expand[:,:4,:4]=wt_random
wv_expand[:,:4,:4]=wv_random
x,y,z=w_expand.shape
w_expand=w_expand.reshape((x,y,z,1))
x,y,z=wv_expand.shape
wv_expand=wv_expand.reshape((x,y,z,1))

#-----------------building w test---------------------------------------------
w_test = np.random.randint(2,size=(1,4,4))
w_test=w_test.astype(np.float32)
wt_expand=np.zeros((1,28,28),dtype='float32')
wt_expand[:,0:4,0:4]=w_test
wt_expand=wt_expand.reshape((1,28,28,1))

#-----------------------encoder------------------------------------------------
#------------------------------------------------------------------------------
wtm=Input((28,28,1))
image = Input((28, 28, 1))
conv1 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl1e')(image)
conv2 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl2e')(conv1)
conv3 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl3e')(conv2)
#conv3 = Conv2D(8, (3, 3), activation='relu', padding='same', name='convl3e', kernel_initializer='Orthogonal',bias_initializer='glorot_uniform')(conv2)
BN=BatchNormalization()(conv3)
#DrO1=Dropout(0.25,name='Dro1')(BN)
encoded =  Conv2D(1, (5, 5), activation='relu', padding='same',name='encoded_I')(BN)

#-----------------------adding watermark---------------------------------------
#add_const = Kr.layers.Lambda(lambda x: x + Kr.backend.constant(w_expand))
#encoded_merged=keras.layers.Add()([encoded,wtm])
#add_const = Kr.layers.Lambda(lambda x: x + wtm)
#encoded_merged = add_const(encoded)
#encoder=Model(inputs=image, outputs= encoded_merged)
#encoded_merged = Concatenate(axis=3)([encoded, wtm])
add_const = Kr.layers.Lambda(lambda x: x[0] + x[1])
encoded_merged = add_const([encoded,wtm])
#encoder=Model(inputs=[image,wtm], outputs= encoded_merged ,name='encoder')
#encoder.summary()

#-----------------------decoder------------------------------------------------
#------------------------------------------------------------------------------
#deconv_input=Input((28,28,1),name='inputTodeconv')
#encoded_merged = Input((28, 28, 2))
deconv1 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl1d')(encoded_merged)
deconv2 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl2d')(deconv1)
deconv3 = Conv2D(64, (5, 5), activation='relu',padding='same', name='convl3d')(deconv2)
deconv4 = Conv2D(64, (5, 5), activation='relu',padding='same', name='convl4d')(deconv3)
BNd=BatchNormalization()(deconv3)
#DrO2=Dropout(0.25,name='DrO2')(BNd)

decoded = Conv2D(1, (5, 5), activation='sigmoid', padding='same', name='decoder_output')(BNd) 
#model=Model(inputs=image,outputs=decoded)

model=Model(inputs=[image,wtm],outputs=decoded)

decoded_noise = GaussianNoise(0.5)(decoded)

#----------------------w extraction------------------------------------
convw1 = Conv2D(16, (3,3), activation='relu', padding='same', name='conl1w')(decoded_noise)
convw2 = Conv2D(16, (3, 3), activation='relu', padding='same', name='convl2w')(convw1)
convw3 = Conv2D(16, (3, 3), activation='relu', padding='same', name='conl3w')(convw2)
convw4 = Conv2D(8, (3, 3), activation='relu', padding='same', name='conl4w')(convw3)
convw5 = Conv2D(8, (3, 3), activation='relu', padding='same', name='conl5w')(convw4)
convw6 = Conv2D(4, (3, 3), activation='relu', padding='same', name='conl6w')(convw5)
#BNed=BatchNormalization()(convw6)
#DrO3=Dropout(0.25, name='DrO3')(BNed)
pred_w = Conv2D(1, (1, 1), activation='sigmoid', padding='same', name='reconstructed_W')(convw6)  
# reconsider activation (is W positive?)
# should be filter=1 to match W
watermark_extraction=Model(inputs=[image,wtm],outputs=[decoded,pred_w])

watermark_extraction.summary()
#----------------------training the model--------------------------------------
#------------------------------------------------------------------------------
#----------------------Data preparation----------------------------------------

(x_train, _), (x_test, _) = mnist.load_data()
x_validation=x_train[1:10000,:,:]
x_train=x_train[10001:60000,:,:]
#
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_validation = x_validation.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))  # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))  # adapt this if using `channels_first` image data format
x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1))

#---------------------compile and train the model------------------------------
#opt=SGD(momentum=0.99)
watermark_extraction.compile(optimizer='adam', loss={'decoder_output':'mse','reconstructed_W':'binary_crossentropy'}, loss_weights={'decoder_output': 0.1, 'reconstructed_W': 1.0},metrics=['mae'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
#rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, min_delta=1E-4, verbose=1)
mc = ModelCheckpoint('best_model_5x5F_dp_gn_add_adam.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
history=watermark_extraction.fit([x_train,w_expand], [x_train,w_expand],
          epochs=200,
          batch_size=32, 
          validation_data=([x_validation,wv_expand], [x_validation,wv_expand]),
          callbacks=[TensorBoard(log_dir='E:/concatnatenetwork', histogram_freq=0, write_graph=False),es,mc])
watermark_extraction.summary()
WEIGHTS_FNAME = 'v1_adam_model_5x5F_add_dp_gn.hdf'
watermark_extraction.save_weights(WEIGHTS_FNAME, overwrite=True)

my mistake was I used the network for testing too, but I should divide the network into two parts and then used an image with Gaussian attack as input, not the same network.我的错误是我也使用网络进行测试,但我应该将网络分成两部分,然后使用高斯攻击的图像作为输入,而不是同一个网络。 two new networks with the same structure and learned weights.两个具有相同结构和学习权重的新网络。

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM