Tensorflow基於神經網路進行多元變量回歸分析
阿新 • • 發佈:2018-12-02
#引入相關庫 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn import preprocessing #讀取資料 train_data=np.array(pd.read_csv("train.txt")) test_data=np.array(pd.read_csv("test.txt")) #提取特徵列,即X (共18列,代表18個變數) train_feature=np.array(train_data[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]]) #提取預測結果列,即Y train_label=np.array(train_data[:,[18]]) #提取測試集特徵列 test_x=np.array(test_data[:,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]]) print(test_data.shape) #搭建神經網路 #定義x y x = tf.placeholder(tf.float32,[None,18]) #長度為18,代表18個特徵 y = tf.placeholder(tf.float32,[None,1]) #長度為1,代表要預測的變數只有1個 train_feature=preprocessing.scale(train_feature) #資料預處理,歸一化 test_xs=preprocessing.scale(test_x) #也對測試集進行預處理 print(test_xs.shape) #定義神經網路隱藏層 #初始化權值。 為18*20矩陣 20代表20個神經元 Weights_L1 = tf.Variable(tf.random_normal([18,20])) #偏置矩陣 biases_L1 = tf.Variable(tf.zeros([1,20])) Wx_plus_b_L1 = tf.matmul(x,Weights_L1)+biases_L1 #啟用函式私有tanh L1 = tf.nn.tanh(Wx_plus_b_L1) #定義神經網路輸出層 Weights_L2 = tf.Variable(tf.random_normal([20,1])) biases_L2 = tf.Variable(tf.zeros([1,20])) Wx_plus_b_L2 = tf.matmul(L1,Weights_L2)+biases_L2 prediction = Wx_plus_b_L2 #代價函式 loss = tf.reduce_mean(tf.square(y-prediction)) saver = tf.train.Saver() #定義優化器。使用動量法 也可以使用隨機梯度下降法等 train_step = tf.train.MomentumOptimizer(0.05,0.05).minimize(loss) with tf.Session() as sess: #初始化變數 sess.run(tf.global_variables_initializer()) #writer=tf.summary.FileWriter("gra",graph=tf.get_default_graph()) print(sess.run(loss, feed_dict={x: train_feature, y: train_label})) for i in range(15000): sess.run(train_step, feed_dict={x: train_feature, y: train_label}) print(i) #print(sess.run(L1,feed_dict={x: train_feature, y: train_label})) print(sess.run(loss,feed_dict={x: train_feature, y: train_label})) prd=sess.run(prediction, feed_dict={x:test_xs }) #獲取對測試集的預測結果 f = open('re.txt', 'w') #寫到檔案 for i in range(test_data.shape[0]): f.writelines(str(int(test_data[i][0]))+","+str(prd[i][0])+"\n") f.close() #儲存模型 saver.save(sess, "model/my-model") print(test_data.shape) print(test_data)