去噪自编码网络

介绍

要想取得好的特征只靠重构输入数据是不够的,实际上,还需要让这些特征具有抗干扰的能力,即当输入数据发生一定程度的扰动时,生成的特征仍然保持不变。这时需要添加噪声为模型增加更大的困难。在这种情况下训练出来的模型才会有更好的鲁棒性,于是就有了去噪自编码网络

去噪自编码网络(DA)是在自动编码的基础上,训练数据加入噪声,输出的标签仍是原始的样本(没有加入噪声的),这样自动编码器必须学习去除噪声而获得真正的没有被噪声污染过的输入特征。因此,这就迫使编码器去学习输入信号的更加鲁棒的特征表达,即具有更加强悍的泛化能力

在实际训练中,人为加入噪声有两种途径:

(1)在选择训练数据集时,额外选择一些样本集以外的数据

(2)改变已有的样本数据集中的数据

代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("/data/", one_hot=True)

train_X = mnist.train.images
train_Y = mnist.train.labels
test_X = mnist.test.images
test_Y = mnist.test.labels

tf.reset_default_graph()

n_input = 784
n_hidden_1 = 256

x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_input])
dropout_keep_prob = tf.placeholder("float")

weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_1])),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_input]))
}
biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'b2': tf.Variable(tf.zeros([n_hidden_1])),
'out': tf.Variable(tf.zeros([n_input]))
}

def denoise_auto_encoder(_X, _weights, _biases, _keep_prob):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
layer_1out = tf.nn.dropout(layer_1, _keep_prob)
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1out, _weights['h2']), _biases['b2']))
layer_2out = tf.nn.dropout(layer_2, _keep_prob)
return tf.nn.sigmoid(tf.matmul(layer_2out, _weights['out']) + _biases['out'])

reconstruction = denoise_auto_encoder(x, weights, biases, dropout_keep_probcost = tf.reduce_mean(tf.pow(reconstruction-y, 2))
optm = tf.train.AdamOptimizer(0.01).minimize(cost)

epochs = 20
batch_size = 256
disp_step = 2

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())

print ("开始训练")
for epoch in range(epochs):
num_batch = int(mnist.train.num_examples/batch_size)
total_cost = 0.
for i in range(num_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
#制造噪声
batch_xs_noisy = batch_xs + 0.3*np.random.randn(batch_size, 784)
feeds = {x: batch_xs_noisy, y: batch_xs, dropout_keep_prob: 1.}
sess.run(optm, feed_dict=feeds)
total_cost += sess.run(cost, feed_dict=feeds)
if epoch % disp_step == 0:
print ("Epoch %02d/%02d average cost: %.6f"
% (epoch, epochs, total_cost/num_batch))
print ("完成")
show_num = 10
test_noisy = mnist.test.images[:show_num] + 0.3*np.random.randn(show_num, 784)
encode_decode = sess.run(
reconstruction, feed_dict={x: test_noisy, dropout_keep_prob: 1.})
f, a = plt.subplots(3, 10, figsize=(10, 3))
for i in range(show_num):
a[0][i].imshow(np.reshape(test_noisy[i], (28, 28)))
a[1][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[2][i].matshow(np.reshape(encode_decode[i], (28, 28)), cmap=plt.get_cmap('gray'))
plt.show()
----本文结束,感谢您的阅读。如有错,请指正。----
大哥大嫂过年好!支持我一下呗
0%