Tensorflow的学习笔记--基础总结

这几天终于把tensorflow的基础学完了,虽然还有点云里雾里,但毕竟入门是一件困难的事情。下面把这几次的写的代码整理出来。

generateds.py文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import numpy as np
import matplotlib.pyplot as plt

seed=2

def generateds():
# 基于seed产生随机数
rdm=np.random.RandomState(seed)
# 随机数返回300行2列的矩阵,表示300组坐标点
X=rdm.randn(300,2)

Y_=[int(x0*x0+x1*x1<2) for (x0,x1) in X]

# 遍历Y中的每个元素,1赋值'red',其余的赋值blue
Y_c=[['red' if y else 'blue' for y in Y_]]

# 对数据集X和标签Y进行形状的整理,第一个元素为-1 ,表示跟随第二列计算,第二个元素表示多少列,可见X为两列,Y为1列
X=np.vstack(X).reshape(-1,2)
Y_=np.vstack(Y_).reshape(-1,1)

return X,Y_,Y_c

if __name__=="__main__":
X,Y_,Y_c=generateds()
print("X:\n",X) # 300*2
print("Y_:\n",Y_)# 300*1
print("Y_c:\n",Y_c) # 1*300

```

`forward.py文件`
``` python
# coding:utf-8

import tensorflow as tf

def get_weight(shape,regularizer):
w=tf.Variable(tf.random_normal(shape),dtype=tf.float32)
tf.add_to_collection("losses",tf.contrib.layers.l2_regularizer(regularizer)(w))
return w

def get_bias(shape):
b=tf.Variable(tf.constant(0.01,shape=shape))
return b


def forward(x,regularizer):
w1=get_weight([2,11],0.01)
b1=get_bias([11])
y1=tf.nn.relu(tf.matmul(x,w1)+b1)

w2=get_weight([11,1],0.01)
b2=get_bias([1])
y=tf.matmul(y1,w2)+b2
return y

```

最终利用所有的方法拟合的代码:


``` python
# coding:utf-8

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import generateds
import forward

STEPS = 40000
BATCH_SIZE = 30
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.999
REGULARIZER = 0.01


def backward():
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))

X, Y_, Y_c = generateds.generateds()
y = forward.forward(x, REGULARIZER)

global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, 300 / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)

# 定义损失函数
loss_mse = tf.reduce_mean(tf.square(y - y_))


loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))
# 定义反向传播方法:包含正则化
train_step = tf.train.AdadeltaOptimizer(learning_rate).minimize(loss_total)

with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)

for i in range(STEPS):
start = (i * BATCH_SIZE) % 300
end = start + BATCH_SIZE
sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})

if i % 2000 == 0:
loss_v = sess.run(loss_total, feed_dict={x: X, y_: Y_})
print("After %d training steps,loss on all data is %f" % (i, loss_v))

xx, yy = np.mgrid[-3:3:0.01, -3:3:0.01]
grid = np.c_[xx.ravel(), yy.ravel()]
probs = sess.run(y, feed_dict={x: grid})
probs = probs.reshape(xx.shape)

plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
plt.contour(xx, yy, probs, levels=[.5])
plt.show()


if __name__ == "__main__":
backward()