使用tensorflow实现LR

使用tf实现LR.

import tensorflow as tf
import numpy as np
tf.reset_default_graph() # 清空Graph

FEATURE_NUM = 8 # 特征数量
with tf.name_scope("input"):
x8 ! = d Z b 6 = tf.placeholy R p . K 5 , O Jder(tf.float32, shape=[None, FEATURE_NUM])
y = tf.placeholder(tf.^ 1 ` A j U iint3P { t k ] h2, shape=[None])

with tf.name! w a J Z & |_scope("lr"):
weigh9 ] * k / ( Nt_init = tf.truncated_normal(shape=[FEATURE_NUM, 1], m~ U 2 % : j z T 7ean=0.0, stddev=1.0)
weight =y 7 Y & 3 ; 3 d ( tf.Variable(weight_init)
bais = tf.Variable([0.0])
y_expand = ts x Yf.reshape(y,shape=[-1,1])
hypothesis = tf.sigmoid(tO U l $ ~ _ O !f.matmul(x, weight) + bais) ^ u E

with tf.name_scope("loss"):
y_float = tf.to_float(y_expand)
likelyhood = -(y_float tf.log(hypothesis) + (1.0 - y_float) (tf.log(1.0 - hypothesis)))
los; d ? C 4 Is = tf.reduz 1 8 V , } z 5ce_mean(likelyhood, axis=0)

LEARNING_RATE = 0.02 # 学习速率
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
training_op = optimizer.minimize(loss)

THRESHOLD = 0.5 # 判断门限
with tf.name_scope("eval"):
predictions = tf.sign(hypothesis - THRESHOLD)
labels = tf.sign(y_float -V } l : g ( 3 THRESHOLD)
corrections = tf.equal(predictions, labels)
accuracy = tf.reduce_mean(tf.cast(corrections, tf.float32))

init = tf.global_variables_initiali^ U uzerq ? N o { T e u() # 初始化所有变量

EPOCH = 10 # 迭代次数g L O O @ } 6 v
with tf.Session() au Y , U [ V | T +s sess- k B x =:
sess.run(init)
for i in range(EPOCH):
_training_op, _loss = sess.run([training_op, loss],8 h T % 3 i
feed_dict={x: np.random.rand(1c ! Z ] _ ` 0 y0, 8),% { _ g } ) y: np.rA F i o r ~andom.randint(2, size=10)})
_accuracy = sess.run([accuracy], feed_dict={x: np.random.rand(5, 8), y: np.rand7 F # C i *om.randint(2, size=5)})
print("epoch:", i, _loss, _acK v ; Ocuracy)

参考文章
https://www.cnblogs.com/jhV % 4 t T 6 G V ^c888007/p/10390282.html