1. 程式人生 > >訓練分類器,並將生成結果分類

訓練分類器,並將生成結果分類

import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np 
import os 
np.set_printoptions(threshold=np.inf)
#######################################################################
# 讀取資料和標籤
finaldata = np.load("data/單張999分類訓練集.npy") # 316, 999
finalval = np.load("data/單張999分類測試集.npy")  # 316, 999
np.random.seed(1000)
np.random.shuffle(finaldata)
np.random.shuffle(finalval)
########################################################################
# 超引數
batch_size = 100
learning_rate = 0.001
epochs = 10000
n_class = 999
#######################################################################
# 用numpy進行one_hot編碼
def np_one_hot(labels):
  n_labels = np.max(labels) + 1
  one_hot = np.eye(n_labels)[labels]
  
  return one_hot
######################################################################
# 將資料與標籤分離,並對標籤進行one_hot編碼
def input_data(finaldata):
  data = []
  label = []
  for i in range(len(finaldata)):
    data.append(finaldata[i][0])
    label.append(finaldata[i][1])
  data = np.array(data)
  data = data.reshape(-1,16,16,1)
  label = np.array(label)
  label = np_one_hot(label)
  
  return data, label
# 訓練集
data, label = input_data(finaldata)
# 測試集
data_val, label_val = input_data(finalval)
###########################################################################
# 排除管徑對分類的影響
# ones = np.ones((len(data),10,16,1))
# data[:,6::,:,:]  = ones
# val_ones = np.ones((len(data_val),10,16,1))
# data_val[:,6::,:,:] = val_ones
###########################################################################
# 得到batch個數據
# 有資料有label時
def get_batch(inputs=None, labels=None, batch_size=None, shuffle=True):
  assert len(inputs) == len(labels)
  indices = np.arange(len(inputs))
  if shuffle:
    np.random.shuffle(indices)
  # start_idx為batch_size個數
  for start_idx in range(0, len(inputs) -batch_size + 1, batch_size):
    if shuffle:
      excerpt = indices[start_idx:start_idx + batch_size]
    else:
      excerpt = indices[start_idx:start_idx + batch_size]
    yield inputs[excerpt] , labels[excerpt]
#####################################################################
def inference(images):
  
  # conv1 16x16x1->16x16x96
  with tf.variable_scope("conv1") as scope:
    weights = tf.get_variable("weights",
                              shape = [3, 3, 1, 96],
                              dtype = tf.float32,
                              initializer = tf.truncated_normal_initializer(stddev=0.05,dtype=tf.float32))
    biases = tf.get_variable("biases",
                             shape = [96],
                             dtype = tf.float32,
                             initializer = tf.constant_initializer(0.0))
    conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding="SAME")
    pre_activation = tf.nn.bias_add(conv, biases)
    conv1 = tf.nn.relu(pre_activation, name=scope.name)
    
  
  # pool1 and norm1 16x16x96->8x8x96
  with tf.variable_scope("pooling1_lrn") as scope:
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2 ,1],
                           padding="SAME", name="pooing1")
    norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
                      beta=0.75, name="norm1")
  
  # conv2 8x8x96->8x8x64
  with tf.variable_scope("conv2") as scope:
    weights = tf.get_variable("weights",
                              shape = [3, 3, 96, 64],
                              dtype = tf.float32,
                              initializer = tf.truncated_normal_initializer(stddev=0.05,dtype=tf.float32))
    biases = tf.get_variable('biases',
                                 shape=[64], 
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
    conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')
    pre_activation = tf.nn.bias_add(conv, biases)
    conv2 = tf.nn.relu(pre_activation, name='conv2')
    
    
  #pool2 and norm2 8x8x64->4x4x64
  with tf.variable_scope('pooling2_lrn') as scope:
      norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
                        beta=0.75,name='norm2')
      pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,1,1,1],
                             padding='SAME',name='pooling2')
  print(pool2.shape)
  # fc3
  with tf.variable_scope("fc3") as scope:
    reshape = tf.reshape(pool2, shape=[-1, 8*8*64])
    print(reshape.shape)
    dim = reshape.get_shape()[1].value
    weights = tf.get_variable("weights",
                              shape=[dim, 512],
                              dtype=tf.float32,
                              initializer=tf.truncated_normal_initializer(stddev=0.004,dtype=tf.float32))
    biases = tf.get_variable("biases",
                             shape=[512],
                             dtype = tf.float32,
                             initializer = tf.constant_initializer(0.1) )
    fc3 = tf.nn.relu(tf.matmul(reshape,weights) + biases, name=scope.name)
    
  
  # fc4
  with tf.variable_scope('fc4') as scope:
      weights = tf.get_variable('weights',
                                shape=[512,256],
                                dtype=tf.float32, 
                                initializer=tf.truncated_normal_initializer(stddev=0.004,dtype=tf.float32))
      biases = tf.get_variable('biases',
                               shape=[256],
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(0.1))
      local4 = tf.nn.relu(tf.matmul(fc3, weights) + biases, name='fc4')
 
  # softmax
  with tf.variable_scope('softmax_linear') as scope:
      weights = tf.get_variable('softmax_linear',
                                shape=[256, n_class],
                                dtype=tf.float32,
                                initializer=tf.truncated_normal_initializer(stddev=0.004,dtype=tf.float32))
      biases = tf.get_variable('biases', 
                               shape=[n_class],
                               dtype=tf.float32, 
                               initializer=tf.constant_initializer(0.1))
      softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
  
  return softmax_linear
 
######################################################################
# loss函式
def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        # to use this loss fuction, one-hot encoding is needed!
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy_per_example')
        loss = tf.reduce_mean(cross_entropy, name='loss')
        if not os.path.exists('loss'):
          os.makedirs('loss')
        tf.summary.scalar(scope.name+'/loss', loss)
        
    return loss
###################################################################################
images = tf.placeholder(tf.float32,[None, 16, 16, 1])
labels = tf.placeholder(tf.float32,[None, n_class])
####################################################################################
# 訓練模型
def train():
  my_global_step = tf.Variable(0, name='global_step', trainable=False)
  logits = inference(images)
  
  prediction = tf.nn.softmax(logits)
  correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(labels,1))
  
  # loss值
  loss = losses(logits, labels)
  
  # 準確率
  accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
  
  
  train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step = my_global_step)
  
  
  saver = tf.train.Saver(tf.global_variables())
  
  summary_op = tf.summary.merge_all()
  
  sess = tf.Session()
  sess.run(tf.global_variables_initializer())
  
  if not os.path.exists('logs'):
    os.makedirs('logs')
  summary_writer = tf.summary.FileWriter('logs',sess.graph)
  
  for e in range(epochs):
    for data_batch,label_batch in get_batch(data,label,batch_size):
      
      _, loss_value ,train_accuracy_value= sess.run([train_op, loss, accuracy], feed_dict={images:data_batch,labels:label_batch}) 
      # 測試機準確率
      test_accuraccy_value = sess.run(accuracy,feed_dict={images:data_val,labels:label_val})
      print("第%d個epoch: , loss: %.4f, train_accuracy: %.4f, test_accuracy: %.4f" % (e, loss_value, train_accuracy_value,test_accuraccy_value))
    
    if e % 3 ==0:
      summary_str = sess.run(summary_op, feed_dict={images:data_batch, labels:label_batch})
      summary_writer.add_summary(summary_str, e)
      saver.save(sess,"classfier_checkpoints/model.ckpt", global_step=e)
  
  sess.close()
 
def evaluate():
    gen_data = np.load("data/model60.npy")
    gen_data = gen_data.reshape(-1, 16, 16, 1)
#     gen_data = gen_data[0:5000,:,:,:]
    logits = inference(images)
    # softmax變為概率,若預測正確,其中最大值接近1
    prediction = tf.nn.softmax(logits)
    # 預測的最大值的索引,也即使哪一類
    prediction_max = tf.argmax(prediction,1)
    # 真實的label
    label_max = tf.argmax(labels,1)
    # 為布林值
    correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(labels,1))
    # 將布林值變為小數
    accuracy_arr = tf.cast(correct_prediction, tf.float32)
    # 取這一批的平均值
#     accuracy_arr = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    with tf.Session() as sess:
      saver = tf.train.Saver(tf.global_variables())
      saver.restore(sess, tf.train.latest_checkpoint("classfier_checkpoints"))
      #############################################################################################
#       for test_data, test_label in get_batch(data_val, label_val, 1):
#         prediction_index,label_index,acc = sess.run([prediction_max,label_max,accuracy_arr],feed_dict={images:test_data,labels:test_label})
#         prob = acc[0]
#         print("prob: %.4f " % prob," pre_index:",prediction_index," label_index:",label_index)
      ##########################################################################################
      # 預測生成的資料的標籤,並將標籤與生成資料合併
      concat_label = []
      pre_index_list = []
      for i in range(len(gen_data)):
        single_data_val = gen_data[i].reshape(-1,16,16,1)
        pre_index = sess.run(prediction_max, feed_dict={images:single_data_val})
        pre_index = pre_index.tolist()[0]
        pre_index_list.append(pre_index)
        concat_label.append((gen_data[i],pre_index))
      print(len(pre_index_list))
      concat_label = np.array(concat_label)
      print(concat_label.shape)
      np.save("data/gen.npy",concat_label)
      ##########################################################################################
# train()
################################################################################
# 視覺化每一類圖片
gen = np.load("data/gen.npy")
def plot_image(finaldata,pre_label):
  for i in range(len(finaldata)):
    data = finaldata[i][0]
    label = finaldata[i][1]
    if label == pre_label:
      print(label)
      data = data.reshape(16,16)
      zhu_x = data[0]
      zuo_x = data[1]
      you_x = data[2]
      zhu_y = data[3]
      zuo_y = data[4]
      you_y = data[5]
      zhu_diam = data[6][0]
      zuo_diam = data[7][0]
      you_diam = data[8][0]
      plt.plot(zhu_x, zhu_y, color="red", linewidth=20*zhu_diam)
      plt.plot(zuo_x, zuo_y, color="green", linewidth=20*zuo_diam)
      plt.plot(you_x, you_y, color="blue", linewidth=20*you_diam)
      plt.xlim(0,1)
      plt.ylim(0,1)
      plt.xticks(np.arange(0,1,0.1))
      plt.yticks(np.arange(0,1,0.1))
      plt.axis('off')
#       plt.show()
      if not os.path.exists("label"):
        os.makedirs("label")
      plt.savefig("label/label%d_%d.jpg" % (pre_label,i))
      plt.close()
# plot_image(gen,2)
plot_image(finaldata,2)
#############################################################################
# train()
# evaluate()