久久国产成人av_抖音国产毛片_a片网站免费观看_A片无码播放手机在线观看,色五月在线观看,亚洲精品m在线观看,女人自慰的免费网址,悠悠在线观看精品视频,一级日本片免费的,亚洲精品久,国产精品成人久久久久久久

分享

TensorFlow學(xué)習(xí)之旅(一)入門知識記錄

 小生凡一 2021-11-30

Session 會議控制

Session 是 TensorFlow 為了控制和輸出文件的執(zhí)行語句,運(yùn)行 Session.run() 可以獲得你想要的運(yùn)算結(jié)果

import tensorflow as tf
# session 會議控制

tf.compat.v1.disable_eager_execution()   # 保證sess.run()能夠正常運(yùn)行
matrix1 = tf.constant([[3, 3]])   # 建立兩個矩陣
matrix2 = tf.constant([[2], [2]])
product = tf.matmul(matrix1, matrix2)    # 矩陣乘法-->np.dot(m1,m2)
# 方法一
sess = tf.compat.v1.Session()
result = sess.run(product)
print(result)   # 12  矩陣相乘的結(jié)果
sess.close()

# 方法二
# with tf.compat.v1.Session() as session:  # 自動關(guān)上的
#     result = session.run(product)
#     print(result)

Variable 變量定義

import tensorflow as tf

tf.compat.v1.disable_eager_execution()
state = tf.Variable(0,name='counter')  # Variable 變量
# print(state.name)   # result=counter:0
one = tf.constant(1)  # 加上常量

new_value = tf.add(state,one)
update = tf.compat.v1.assign(state, new_value)   # 更新這個變量的值

init = tf.compat.v1.global_variables_initializer()    # 更新過了之后就會這么一個函數(shù)的進(jìn)行,初始化所有變量才能激活這些變量
with tf.compat.v1.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)  # 每一次都更新這個函數(shù)里面的值
        print(sess.run(state))  # 這樣才會有這個state的出現(xiàn),不然打印不出來的,要必須先在這個里面變成
        # 1
        # 2
        # 3

Placeholder 控制輸入

import tensorflow as tf

# input1 = tf.compat.v1.placeholder(tf.float32,[2,2])  # [2,2] 輸入兩行兩列的數(shù)據(jù)
tf.compat.v1.disable_eager_execution()
input1 = tf.compat.v1.placeholder(tf.float32)
input2 = tf.compat.v1.placeholder(tf.float32)

output = tf.compat.v1.multiply(input1, input2)  # 相乘
with tf.compat.v1.Session() as sess:
    print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))  # 14

Activation_Function 激活函數(shù)

Activation_Function 激活函數(shù)是用來加入非線性因素的,解決線性模型所不能解決的問題

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

tf.compat.v1.disable_eager_execution()

def add_layer(inputs, in_size, out_size,n_layer, activation_function):  
    # 添加隱藏層 即使添加神經(jīng)層數(shù) 以達(dá)到不斷迭代的過程
    layer_name = 'layer%s'% n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # 定義一個矩陣,隨機(jī)定義參數(shù),初始值
            tf.summary.histogram(layer_name+'/weights',Weights)
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  # biases 的初始值是推薦不要為零,所以現(xiàn)在就是要加上0.1
            tf.summary.histogram(layer_name+'/biases',biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.compat.v1.matmul(inputs, Weights,name='wb') + biases  # input*Weights + biases 這是預(yù)測的值 還沒激活
        if activation_function is None:
            outputs = Wx_plus_b  # 這個是線性方程,所以就不需要加上非線性的激活函數(shù)
        else:
            outputs = activation_function(Wx_plus_b)
        tf.summary.histogram(layer_name+'/outputs',outputs)
        return outputs


x_data = np.linspace(-1, 1, 300)[:, np.newaxis].astype(np.float32)  # 建立一個-1 1 的等差數(shù)列  最后加上一個維度,變成有維度的矩陣形式
# 現(xiàn)在是[300,1] 的矩陣 輸入層 300行1列的矩陣
# x_data=tf.cast(tf.float32,x_data)
noise = np.random.normal(0, 0.05, x_data.shape)  # 手動添加噪點  方差0.05
# noise=tf.cast(tf.float32,noise)
y_data = np.square(x_data) - 0.5 + noise
# y_data=tf.cast(tf.float32,y_data)

with tf.name_scope('input'):  # 輸入層
    xs = tf.compat.v1.placeholder(tf.float32, [None, 1], name='x_input')  # 這里是傳入數(shù)據(jù)用的,這里無論是傳入多少個例子都是可以的
    ys = tf.compat.v1.placeholder(tf.float32, [None, 1], name='y_input')  # 這里是一個None來表達(dá) ,但是是一個矩陣的形式:未知行1列

l1 = add_layer(xs, 1, 10, 1, tf.nn.relu)
# 這里是添加了第一層的隱藏層[1,10] 這里是1行10列的矩陣
predition = add_layer(l1, 10, 1, 2, None)
# 最后的輸出層是一個[10,1] 是一個10行1列的矩陣

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), axis=1))  # 平均的誤差
    tf.summary.scalar('loss', loss)


with tf.name_scope('train'):
    train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss)
# 學(xué)習(xí)效率0.1  用這個優(yōu)化器以0.1的效率對這個誤差進(jìn)行更正

init = tf.compat.v1.global_variables_initializer()

# 隨機(jī)的梯度下降
fig = plt.figure()  # 先生成一個框框
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x_data, y_data)  # 生成原來的圖片
plt.ion()
plt.show()


with tf.compat.v1.Session() as sess:
    writer = tf.compat.v1.summary.FileWriter("logs/", sess.graph)
    sess.run(init)
    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})  # 方便封裝
        if i % 50 == 0:
            # print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
            try:
                ax.lines.remove(lines[0])  # 顯示之后要去除掉那條線
            except Exception:  # 第一次是沒有的,
                pass
            predition_value = sess.run(predition, feed_dict={xs: x_data, ys: y_data})
            lines = ax.plot(x_data, predition_value, 'r-', lw=5)  # 將預(yù)測的這個值打上去
            plt.pause(0.1)

# loss_function不斷在減小,所以就會一直在學(xué)習(xí),減少誤差
# 1.9184123
# 0.053955305
# 0.03053456
# 0.017190851
# 0.010993273
# 0.008209449
# 0.0067526144
# 0.0058726957
# 0.005269445
# 0.00477808
# 0.0044394922
# 0.0041766805
# 0.0039696493
# 0.003815
# 0.0036952242
# 0.0036034652
# 0.0035240129
# 0.0034543637
# 0.0033897285
# 0.0033306282

# Tips:空間不足的時候,有可能會報錯的

TensorBorad

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


tf.compat.v1.disable_eager_execution()


def add_layer(inputs, in_size, out_size, n_layer, activation_function):  # 添加隱藏層 即使添加神經(jīng)層數(shù) 以達(dá)到不斷迭代的過程吧
    layer_name = 'layer%s' % n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # 定義一個矩陣,隨機(jī)定義參數(shù),初始值
            tf.summary.histogram(layer_name + '/weights', Weights)
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  # biases 的初始值是推薦不要為零,所以現(xiàn)在就是要加上0.1
            tf.summary.histogram(layer_name + '/biases', biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.compat.v1.matmul(inputs, Weights, name='wb') + biases  # input*Weights + biases 這是預(yù)測的值 還沒激活
        if activation_function is None:
            outputs = Wx_plus_b  # 這個是線性方程,所以就不需要加上非線性的激活函數(shù)
        else:
            outputs = activation_function(Wx_plus_b)
        tf.summary.histogram(layer_name + '/outputs', outputs)
        return outputs



x_data = np.linspace(-1, 1, 300)[:, np.newaxis].astype(np.float32)  # 建立一個-1 1 的等差數(shù)列  最后加上一個維度,變成有維度的矩陣形式
# 現(xiàn)在是[300,1] 的矩陣 輸入層 300行1列的矩陣
# x_data=tf.cast(tf.float32,x_data)
noise = np.random.normal(0, 0.05, x_data.shape)  # 手動添加噪點  方差0.05
# noise=tf.cast(tf.float32,noise)
y_data = np.square(x_data) - 0.5 + noise
# y_data=tf.cast(tf.float32,y_data)

with tf.name_scope('input'):  # 輸入層
    xs = tf.compat.v1.placeholder(tf.float32, [None, 1], name='x_input')  # 這里是傳入數(shù)據(jù)用的,這里無論是傳入多少個例子都是可以的
    ys = tf.compat.v1.placeholder(tf.float32, [None, 1], name='y_input')  # 這里是一個None來表達(dá) ,但是是一個矩陣的形式:未知行1列

l1 = add_layer(xs, 1, 10, 1, tf.nn.relu)
# 這里是添加了第一層的隱藏層[1,10] 這里是1行10列的矩陣
predition = add_layer(l1, 10, 1, 2, None)
# 最后的輸出層是一個[10,1] 是一個10行1列的矩陣

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), axis=1))  # 平均的誤差
    tf.summary.scalar('loss', loss)

with tf.name_scope('train'):
    train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss)
# 學(xué)習(xí)效率0.1  用這個優(yōu)化器以0.1的效率對這個誤差進(jìn)行更正

init = tf.compat.v1.global_variables_initializer()

with tf.compat.v1.Session() as sess:
    writer = tf.compat.v1.summary.FileWriter("logs/", sess.graph)
    sess.run(init)
    merged = tf.compat.v1.summary.merge_all()
    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})  # 方便封裝
        if i % 50 == 0:
            result = sess.run([merged,train_step], feed_dict={xs: x_data, ys: y_data})
            writer.add_summary(result, i)

# Tips:空間不足的時候,有可能會報錯的

Classification 分類器

識別手寫數(shù)字為例子

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)


def add_layer(inputs, in_size, out_size, activation_function):  # 添加隱藏層 即使添加神經(jīng)層數(shù) 以達(dá)到不斷迭代的過程吧
    Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # 定義一個矩陣,隨機(jī)定義參數(shù),初始值
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  # biases 的初始值是推薦不要為零,所以現(xiàn)在就是要加上0.1
    Wx_plus_b = tf.compat.v1.matmul(inputs, Weights, name='wb') + biases  
    # input*Weights + biases 這是預(yù)測的值 還沒激活
    if activation_function is None:
        outputs = Wx_plus_b  # 這個是線性方程,所以就不需要加上非線性的激活函數(shù)
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


def compute_accracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs})
    corrent_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))  # 生成預(yù)測值
    accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
    return result


tf.compat.v1.disable_eager_execution()
xs = tf.compat.v1.placeholder(tf.float32, [None, 784])  # 28*28
ys = tf.compat.v1.placeholder(tf.float32, [None, 10])

prediction = add_layer(xs, 784, 10, activation_function=tf.nn.softmax)

cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.math.log(prediction), axis=1))  # 交叉熵?fù)p失函數(shù)

train_step = tf.compat.v1.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.initialize_all_variables())
for i in range(1000):
    batch_xs, batch_ys = mnist_data.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})  # 進(jìn)行學(xué)習(xí)1000次
    if i % 50 == 0:
        print(compute_accracy(mnist_data.test.images, mnist_data.test.labels))

sess.close()

OverFitting 過擬合

在訓(xùn)練集當(dāng)中表現(xiàn)優(yōu)秀,但是在測試集當(dāng)中表現(xiàn)好,舉個例子:在自己圈子里很強(qiáng),但是在放到別處就很水了,。

解決過擬合

1.增加數(shù)據(jù)量,只要圈子足夠大,就能減少過擬合的現(xiàn)象

2.使用正規(guī)化,y=w*x+b

L1正規(guī)化:
c o s t = ( W ? x ? R e a l y ) 2 + a b s ( W ) cost=(W*x-Realy)^2+abs(W) cost=(W?x?Realy)2+abs(W)
L2正規(guī)化:
c o s t = ( W ? x ? R e a l y ) 2 + W 2 cost=(W*x-Realy)^2+W^2 cost=(W?x?Realy)2+W2
L3–>立方

Dropout regularization: 隨機(jī)丟棄其中的神經(jīng)元,使得訓(xùn)練的時候不會依賴這些神經(jīng)元,。

CNN 卷積神經(jīng)網(wǎng)絡(luò)

CNN 卷積神經(jīng)網(wǎng)絡(luò)原理過程

把RGB的圖片進(jìn)行壓縮,將圖片的長和寬壓小,把高度增高。

最終將厚度變成一個分類器.

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os

mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)

graph = tf.compat.v1.get_default_graph()

tf.compat.v1.disable_eager_execution()
xs = tf.compat.v1.placeholder(tf.float32, [None, 784])  # 28*28
ys = tf.compat.v1.placeholder(tf.float32, [None, 10])
keep_prob = tf.compat.v1.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 28, 28, 1])  # 像素點 28*28  黑白的所以通道數(shù)就是為1
# print(x_image.shape) # [n_smaples,28,28,1]



def add_layer(inputs, in_size, out_size, activation_function):  # 添加隱藏層 即使添加神經(jīng)層數(shù) 以達(dá)到不斷迭代的過程吧
    Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')  # 定義一個矩陣,隨機(jī)定義參數(shù),初始值
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')  # biases 的初始值是推薦不要為零,所以現(xiàn)在就是要加上0.1
    Wx_plus_b = tf.compat.v1.matmul(inputs, Weights, name='wb') + biases  # input*Weights + biases 這是預(yù)測的值 還沒激活
    if activation_function is None:
        outputs = Wx_plus_b  # 這個是線性方程,所以就不需要加上非線性的激活函數(shù)
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


def compute_accracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    corrent_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))  # 生成預(yù)測值
    accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result


# 初始化weight bias
def weight_variable(shape):
    initial = tf.compat.v1.truncated_normal(shape, stddev=0.1)  # stddev標(biāo)準(zhǔn)差
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.compat.v1.constant(0.1, shape=shape)
    return tf.Variable(initial)


def conv2d(x, W):  # strides=[batchsize,寬,高,通道數(shù)]
    # [1,x_movement,y_movement,1]
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')  # 2維的CNN 三層的跨度都是1


def max_pool_2x2(x):
    # [1,x_movement,y_movement,1]
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  #


## conv1 layer
W_conv1 = weight_variable([5, 5, 1, 32])  # patch 5*5的像素, in_size 1 一個單位的結(jié)果,out_size 32 的高度
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)  # 類似y=w*x+b 加relu實現(xiàn)非線性化  outsize=28*28*32
h_pool1 = max_pool_2x2(h_conv1)  # outsize=14*14*32  SAME是指像素/步長

## conv2 layer
W_conv2 = weight_variable([5, 5, 32, 64])  # 不斷變高變厚
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)  # 類似y=w*x+b 加relu實現(xiàn)非線性化  outsize=14*14*64
h_pool2 = max_pool_2x2(h_conv2)  # outsize=7*7*64

## func1 layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])  # 變得更高更寬 1024自己定義 1024個神經(jīng)元
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# 將h_pool2原來的形狀由[n_samples,7,7,64]---->[n_samples,7*7*64]的形狀 展平  降維
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
f_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

## func2 layer
W_fc2 = weight_variable([1024, 10])  # 變得更高更寬 1024自己定義 1024個神經(jīng)元
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.nn.relu(tf.matmul(f_fc1_drop, W_fc2) + b_fc2))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.math.log(prediction), axis=1))  # 交叉熵?fù)p失函數(shù)

train_step = tf.compat.v1.train.AdamOptimizer(1e-4).minimize(cross_entropy)

# 這樣配置GPU才不會爆出內(nèi)存不足
gpu_no = '0'  # or '1'
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_no
# 定義TensorFlow配置
config = tf.compat.v1.ConfigProto()
# 配置GPU內(nèi)存分配方式,按需增長,很關(guān)鍵
config.gpu_options.allow_growth = True
# 配置可使用的顯存比例
config.gpu_options.per_process_gpu_memory_fraction = 0.1
# 在創(chuàng)建session的時候把config作為參數(shù)傳進(jìn)去
sess = tf.compat.v1.InteractiveSession(config=config)

sess.run(tf.compat.v1.initialize_all_variables())
for i in range(1000):
    batch_xs, batch_ys = mnist_data.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
    if i % 50 == 0:
        print(compute_accracy(mnist_data.test.images, mnist_data.test.labels))

sess.close()

Saver 保存提取

![IMG_0784](D:\294350394\FileRecv\MobileFile\IMG_0784.PNG)import tensorflow as tf
import numpy as np

tf.compat.v1.disable_eager_execution()
# W = tf.Variable([[1, 2, 3], [3, 4, 5]], dtype=tf.float32, name='weights')
# b = tf.Variable([[1, 2, 3]], dtype=tf.float32, name='biases')
# init = tf.compat.v1.initialize_all_variables()
# saver = tf.compat.v1.train.Saver()
# with tf.compat.v1.Session() as sess:
#     sess.run(init)
#     save_path = saver.save(sess,"my_net/save_net.ckpt")
#     print("Save to path:",save_path)

W = tf.Variable(np.arange(6).reshape((2, 3)), dtype=tf.float32, name='weights')
b = tf.Variable(np.arange(3).reshape((1, 3)), dtype=tf.float32, name='biases')

saver =tf.compat.v1.train.Saver()
with tf.compat.v1.Session() as sess:
    saver.restore(sess,"my_net/save_net.ckpt")
    print("weights:",sess.run(W))
    print("biases:",sess.run(b))
#weights: [[1. 2. 3.]   這里的就是上面存出的矩陣
# [3. 4. 5.]]
#biases: [[1. 2. 3.]]

RNN 循環(huán)神經(jīng)網(wǎng)絡(luò)

強(qiáng)化神經(jīng)網(wǎng)絡(luò)

每一次的輸出都是對前面的總結(jié).會對前面的內(nèi)容進(jìn)行記憶,。

在這里插入圖片描述
就是跟著時間會將這其中變成前面的內(nèi)容的疊加,按照時間的順序,每一次的輸出都是會有對前面的記憶。

梯度消失

梯度爆裝

在循環(huán)的過程中,如果<1的時候就會有一個不斷減小的過程,這一過程就叫做梯度消失

有可能當(dāng)這個誤差>1的時候就會有一個不斷累加的過程 這就會導(dǎo)致梯度爆炸

反向傳遞的時候就不會傳到最初的狀態(tài) (鏈?zhǔn)角髮?dǎo)過程)

為了解決這一些的問題:

LSTM模型 (長短期記憶:

在這里插入圖片描述

gate門控單元,用于控制擱置輸入的類型,要不要記錄當(dāng)時的這個點。

Write 要不要寫,Read要不要讀 , Forget要不要忘記主線

這里是為了防止分線的劇情會不會影響到主線的劇情,最后是HappyEnd Or BadEnd

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os

mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)

graph = tf.compat.v1.get_default_graph()
tf.compat.v1.disable_eager_execution()

lr = 0.001
training_iters = 10000
batch_size = 128
display_step = 10

n_input = 28  # MNIST data的輸入
n_step = 28  # 時間的間隔
n_hidden_unis = 128  # 隱藏層神經(jīng)元的個數(shù)
n_classes = 10  # MNIST 里面的個數(shù)(0-9數(shù)字類型)

x = tf.compat.v1.placeholder(tf.float32, [None, n_step, n_input])  # 28*28
y = tf.compat.v1.placeholder(tf.float32, [None, n_classes])

keep_prob = tf.compat.v1.placeholder(tf.float32)

weights = {
    'in': tf.Variable(tf.random.normal([n_input, n_hidden_unis])),  # (28,128)
    'out': tf.Variable(tf.random.normal([n_hidden_unis, n_classes]))  # (128,10)
}
biases = {
    'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_unis, ])),  # (128,)
    'out': tf.constant(0.1, shape=[n_classes, ])  # (10,)
}


def RNN(X, weights, biases):
    # 隱藏層所輸入的結(jié)構(gòu)
    # X(128 batch,28 step,28 input)
    # ==>(128*28,28 input)
    X = tf.reshape(X, [-1, n_input])
    # X_in ==> (128batch * 28 steps,128 hidden)
    X_in = tf.matmul(X, weights['in']) + biases['in']
    # X_in ==> (128batch , 28steps, 128 hidden)
    X_in = tf.reshape(X_in, [-1, n_step, n_hidden_unis])

    # cell
    lstm_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(n_hidden_unis, forget_bias=1.0, state_is_tuple=True)
    # (c_state,m_state)# c_state主線劇情,m_state 分線劇情.
    _init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
    outputs, states = tf.compat.v1.nn.dynamic_rnn(lstm_cell, X_in, initial_state=_init_state, time_major=False)
    # outputs 是一個list
    results = tf.matmul(states[1], weights['out']) + biases['out']  # states[1] 是分線劇情的結(jié)果 states[0] 是主線劇情的結(jié)果
    return results


prediction = RNN(x, weights, biases)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))  # 交叉熵?fù)p失函數(shù)
train_op = tf.compat.v1.train.AdamOptimizer(lr).minimize(cross_entropy)

correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# 這樣配置GPU才不會爆出內(nèi)存不足
gpu_no = '0'  # or '1'
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_no
# 定義TensorFlow配置
config = tf.compat.v1.ConfigProto()
# 配置GPU內(nèi)存分配方式,按需增長,很關(guān)鍵
config.gpu_options.allow_growth = True
# 配置可使用的顯存比例
config.gpu_options.per_process_gpu_memory_fraction = 0.1
# 在創(chuàng)建session的時候把config作為參數(shù)傳進(jìn)去
sess = tf.compat.v1.InteractiveSession(config=config)
sess.run(tf.compat.v1.initialize_all_variables())
step = 0
while step * batch_size < training_iters:
    batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
    batch_xs = batch_xs.reshape([batch_size, n_step, n_input])  # 28*28
    sess.run([train_op], feed_dict={
        x: batch_xs,
        y: batch_ys
    })
    if step % 20 == 0:
        print(sess.run(accuracy, feed_dict={
            x: batch_xs,
            y: batch_ys
        }))
sess.close()

Autoencoder自編碼(非監(jiān)督學(xué)習(xí))

將圖片壓縮再進(jìn)行解壓的過程,也叫非監(jiān)督學(xué)習(xí),因為這是在防止需要處理的信息過大而導(dǎo)致讀取緩慢的結(jié)果,。

在這里插入圖片描述

上圖便是如此 , 一般只需要左邊的X的數(shù)據(jù),encode壓縮,decode解壓

以上是目前階段關(guān)于TF我所學(xué)習(xí)的知識。

    轉(zhuǎn)藏 分享 獻(xiàn)花(0

    0條評論

    發(fā)表

    請遵守用戶 評論公約

    類似文章