tensorflow的学习笔记--基础总结

付威     2019-04-08   2880   8min  

这几天终于把tensorflow的基础学完了,虽然还有点云里雾里,但毕竟入门是一件困难的事情。下面把这几次的写的代码整理出来。

generateds.py文件

import numpy as np
import matplotlib.pyplot as plt

seed=2  

def generateds():
    # 基于seed产生随机数 
    rdm=np.random.RandomState(seed)
    # 随机数返回300行2列的矩阵,表示300组坐标点
    X=rdm.randn(300,2)

    Y_=[int(x0*x0+x1*x1<2) for (x0,x1) in X]

    # 遍历Y中的每个元素,1赋值'red',其余的赋值blue
    Y_c=[['red' if y else 'blue' for y in Y_]]
   
   # 对数据集X和标签Y进行形状的整理,第一个元素为-1 ,表示跟随第二列计算,第二个元素表示多少列,可见X为两列,Y为1列
    X=np.vstack(X).reshape(-1,2)
    Y_=np.vstack(Y_).reshape(-1,1)

    return X,Y_,Y_c

if __name__=="__main__":
    X,Y_,Y_c=generateds()
    print("X:\n",X) # 300*2
    print("Y_:\n",Y_)# 300*1 
    print("Y_c:\n",Y_c) # 1*300

forward.py文件

# coding:utf-8

import tensorflow as tf

def get_weight(shape,regularizer):
    w=tf.Variable(tf.random_normal(shape),dtype=tf.float32)
    tf.add_to_collection("losses",tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape):
    b=tf.Variable(tf.constant(0.01,shape=shape))
    return b 


def forward(x,regularizer):
    w1=get_weight([2,11],0.01)
    b1=get_bias([11])
    y1=tf.nn.relu(tf.matmul(x,w1)+b1)

    w2=get_weight([11,1],0.01)
    b2=get_bias([1])
    y=tf.matmul(y1,w2)+b2
    return y

最终利用所有的方法拟合的代码:

# coding:utf-8

import tensorflow as tf
import numpy as np
import matplotlib.pyplot  as plt
import generateds
import forward

STEPS = 40000
BATCH_SIZE = 30
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.999
REGULARIZER = 0.01


def backward():
    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ = tf.placeholder(tf.float32, shape=(None, 1))

    X, Y_, Y_c = generateds.generateds()
    y = forward.forward(x, REGULARIZER)

    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, 300 / BATCH_SIZE, LEARNING_RATE_DECAY,
                                               staircase=True)

    # 定义损失函数
    loss_mse = tf.reduce_mean(tf.square(y - y_))


    loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))
    # 定义反向传播方法:包含正则化
    train_step = tf.train.AdadeltaOptimizer(learning_rate).minimize(loss_total)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        for i in range(STEPS):
            start = (i * BATCH_SIZE) % 300
            end = start + BATCH_SIZE
            sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})

            if i % 2000 == 0:
                loss_v = sess.run(loss_total, feed_dict={x: X, y_: Y_})
                print("After %d training steps,loss on all data is %f" % (i, loss_v))

        xx, yy = np.mgrid[-3:3:0.01, -3:3:0.01]
        grid = np.c_[xx.ravel(), yy.ravel()]
        probs = sess.run(y, feed_dict={x: grid})
        probs = probs.reshape(xx.shape)

    plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
    plt.contour(xx, yy, probs, levels=[.5])
    plt.show()


if __name__ == "__main__":
    backward()


(本文完)

作者:付威

博客地址:http://blog.laofu.online

如果觉得对您有帮助,可以下方的RSS订阅,谢谢合作

如有任何知识产权、版权问题或理论错误,还请指正。

本文是付威的网络博客原创,自由转载-非商用-非衍生-保持署名,请遵循:创意共享3.0许可证

交流请加群113249828: 点击加群   或发我邮件 laofu_online@163.com

付威

获得最新的博主文章,请关注上方公众号