1. 程式人生 > >TensorFlow:自己搭建卷積網路識別交通標誌

TensorFlow:自己搭建卷積網路識別交通標誌

在網上找到一篇博文,進行交通標誌的識別:http://www.jianshu.com/p/3c7f329b29ee ,該網路直接使用一個一層的全連線網路,輸出為交通標誌的類別數,識別率可以達到59.4%

在已有資料的基礎上,自己利用AlexNet網路對其進行分類。程式碼如下:

import os
import random
import skimage.transform
import matplotlib.pyplot as plt
import numpy as np
import skimage.data
import tensorflow as tf

def load_data (data_dir)
:
dictories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir,d))] labels = [] images = [] for d in dictories: label_dir = os.path.join(data_dir,d) file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".ppm")] for
f in file_names: images.append(skimage.data.imread(f)) labels.append(int(d)) return images,labels train_data_dir = "datasets/BelgiumTS/Training/" images,labels = load_data(train_data_dir) images224 = [skimage.transform.resize(image,(224,224)) for image in images] labels_a = np.array(labels) images_a = np.array(images224) graph = tf.Graph() with
graph.as_default(): images_ph = tf.placeholder(tf.float32,[None,224,224,3]) labels_ph = tf.placeholder(tf.int32,[None]) #第一層卷積層 conv1 = tf.layers.conv2d(inputs=images_ph, filters=96, kernel_size=[11,11], strides=(4,4), padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer()) lrn1 = tf.nn.lrn(conv1,bias=1.0,alpha=0.001/9,beta=0.71) pool1 = tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',) #第二層卷積層 conv2 = tf.layers.conv2d(inputs=pool1, filters=256, kernel_size=[5,5], strides=(1,1), padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer()) lrn2 = tf.nn.lrn(conv2,bias=1.0,alpha=0.001/9,beta=0.71) pool2 = tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',) #第三層卷積層 conv3 = tf.layers.conv2d(inputs=pool2, filters=384, kernel_size=[3,3], strides=(1,1), padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer()) #第四層卷積層 conv4 = tf.layers.conv2d(inputs=conv3, filters=384, kernel_size=[3,3], strides=(1,1), padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer()) #第五層卷積層 conv5 = tf.layers.conv2d(inputs=conv4, filters=256, kernel_size=[3,3], strides=(1,1), padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer()) pool5 = tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',) #全連線層 pool5_flat = tf.contrib.layers.flatten(pool5) fc1 = tf.contrib.layers.fully_connected(pool5_flat,4096,tf.nn.relu) fc2 = tf.contrib.layers.fully_connected(fc1,4096,tf.nn.relu) logits = tf.contrib.layers.fully_connected(fc2,62,tf.nn.relu) predicted_labels = tf.argmax(logits,1) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels_ph)) correct_prediction = tf.equal(tf.cast(predicted_labels,tf.int32), labels_ph) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) init = tf.global_variables_initializer() session = tf.Session(graph=graph) _ = session.run([init]) batch_size = 64 #定義一個函式,按批次取資料 def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): assert len(inputs) == len(targets) if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): if shuffle: excerpt = indices[start_idx:start_idx + batch_size] else: excerpt = slice(start_idx, start_idx + batch_size) yield inputs[excerpt], targets[excerpt] for i in range(101): train_loss,train_acc,n_batch = 0,0,0 for x_train,y_train in minibatches(images_a,labels_a,batch_size,shuffle=True): _,err,ac = session.run([train,loss,accuracy],feed_dict={images_ph:x_train,labels_ph:y_train}) train_loss += err; train_acc += ac; n_batch += 1 train_loss = train_loss/ n_batch train_acc = train_acc/ n_batch print("{0} step,train loss: {1},train acc:{2}".format(i,train_loss,train_acc)) print("======================================") print("----------------Testing-----------------") test_dir = "datasets/BelgiumTS/Testing/" test_images,test_labels = load_data(test_dir) test_images224 = [skimage.transform.resize(image,(224,224)) for image in test_images] predicted = session.run([predicted_labels],feed_dict={images_ph:test_images224})[0] match_count = sum([int(y == y_) for y,y_ in zip(test_labels,predicted)]) accuracy = match_count / len(test_labels) print("Accuracy:{:.3f}".format(accuracy)) session.close()

結果輸出:

"""
0 step,train loss: 113.01258935055263,train acc:0.20642605633802816
======================================
1 step,train loss: 2.6073241771106987,train acc:0.44124119718309857
======================================
2 step,train loss: 2.2904123252546285,train acc:0.495818661971831
======================================
3 step,train loss: 2.1401563379126536,train acc:0.515625
======================================
4 step,train loss: 2.118869195521717,train acc:0.519806338028169
======================================
5 step,train loss: 2.0390830006397946,train acc:0.5321302816901409
======================================
6 step,train loss: 1.9258808384478932,train acc:0.5547975352112676
======================================
7 step,train loss: 1.9147456246362606,train acc:0.5547975352112676
======================================
8 step,train loss: 1.8560859233560696,train acc:0.5684419014084507
======================================
9 step,train loss: 1.822118812883404,train acc:0.5728433098591549
======================================
10 step,train loss: 1.802945078258783,train acc:0.5776848591549296
======================================
11 step,train loss: 1.8829283697504393,train acc:0.563600352112676
======================================
12 step,train loss: 1.8190392524423733,train acc:0.5743838028169014
======================================
13 step,train loss: 1.7929599990307445,train acc:0.577024647887324
======================================
14 step,train loss: 1.8246907348364172,train acc:0.5743838028169014
======================================
15 step,train loss: 1.808565166634573,train acc:0.5772447183098591
======================================
16 step,train loss: 1.8132070621974032,train acc:0.5732834507042254
======================================
17 step,train loss: 1.7810608014254503,train acc:0.5803257042253521
======================================
18 step,train loss: 1.781205009406721,train acc:0.5803257042253521
======================================
19 step,train loss: 1.7206030932950303,train acc:0.5911091549295775
======================================
20 step,train loss: 1.683935155331249,train acc:0.5994718309859155
======================================
21 step,train loss: 1.7209655483004074,train acc:0.5952904929577465
======================================
22 step,train loss: 1.7249857271221323,train acc:0.5935299295774648
======================================
23 step,train loss: 1.6856124686523222,train acc:0.5985915492957746
======================================
24 step,train loss: 1.7246575103679174,train acc:0.5933098591549296
======================================
25 step,train loss: 1.8570020652153123,train acc:0.5785651408450704
======================================
26 step,train loss: 1.7528714478855403,train acc:0.5878080985915493
======================================
27 step,train loss: 1.7649714207985032,train acc:0.5858274647887324
======================================
28 step,train loss: 1.7499825047775053,train acc:0.5904489436619719
======================================
29 step,train loss: 1.7029336086461242,train acc:0.5972711267605634
======================================
30 step,train loss: 1.6807842993400466,train acc:0.6005721830985915
======================================
31 step,train loss: 1.6731584391123813,train acc:0.5994718309859155
======================================
32 step,train loss: 1.7349763856807225,train acc:0.5928697183098591
======================================
33 step,train loss: 1.6735657016995926,train acc:0.6005721830985915
======================================
34 step,train loss: 1.7172416054027182,train acc:0.5955105633802817
======================================
35 step,train loss: 1.7231659687740701,train acc:0.5939700704225352
======================================
36 step,train loss: 1.7800732783868278,train acc:0.5847271126760564
======================================
37 step,train loss: 1.730364529179855,train acc:0.5902288732394366
======================================
38 step,train loss: 1.696294040747092,train acc:0.5972711267605634
======================================
39 step,train loss: 1.6596270111245168,train acc:0.6040933098591549
======================================
40 step,train loss: 1.6771654461471128,train acc:0.6014524647887324
======================================
41 step,train loss: 1.7108523106910813,train acc:0.5966109154929577
======================================
42 step,train loss: 1.79677607979573,train acc:0.5827464788732394
======================================
43 step,train loss: 1.88314250153555,train acc:0.5728433098591549
======================================
44 step,train loss: 1.7575012378289665,train acc:0.5902288732394366
======================================
45 step,train loss: 1.7041403642842468,train acc:0.5977112676056338
======================================
46 step,train loss: 1.6756691428977,train acc:0.6012323943661971
======================================
47 step,train loss: 1.7099954762928922,train acc:0.5944102112676056
======================================
48 step,train loss: 1.7147126264975106,train acc:0.5946302816901409
======================================
49 step,train loss: 1.7125586086595561,train acc:0.5952904929577465
======================================
50 step,train loss: 1.7511238917498522,train acc:0.5900088028169014
======================================
51 step,train loss: 1.7602741953352807,train acc:0.5928697183098591
======================================
52 step,train loss: 1.7790368728234733,train acc:0.5871478873239436
======================================
53 step,train loss: 1.7333955529709937,train acc:0.5950704225352113
======================================
54 step,train loss: 1.7078297205374275,train acc:0.596830985915493
======================================
55 step,train loss: 1.6944165414487813,train acc:0.5970510563380281
======================================
56 step,train loss: 1.707183194832063,train acc:0.596830985915493
======================================
57 step,train loss: 1.6699603947115615,train acc:0.6014524647887324
======================================
58 step,train loss: 1.674659126241442,train acc:0.6018926056338029
======================================
59 step,train loss: 1.7515487402257786,train acc:0.589568661971831
======================================
60 step,train loss: 1.7355649001161817,train acc:0.5935299295774648
======================================
61 step,train loss: 1.7412649443451786,train acc:0.59375
======================================
62 step,train loss: 1.854392429472695,train acc:0.573943661971831
======================================
63 step,train loss: 1.749652043194838,train acc:0.5908890845070423
======================================
64 step,train loss: 1.7440595240660117,train acc:0.5944102112676056
======================================
65 step,train loss: 1.6968568613831425,train acc:0.5990316901408451
======================================
66 step,train loss: 1.665332643079086,train acc:0.6018926056338029
======================================
67 step,train loss: 1.6558693445904153,train acc:0.6038732394366197
======================================
68 step,train loss: 1.6560608299685196,train acc:0.6038732394366197
======================================
69 step,train loss: 1.6422550107391787,train acc:0.6056338028169014
======================================
70 step,train loss: 1.6501463903507716,train acc:0.6040933098591549
======================================
71 step,train loss: 1.6482023319727939,train acc:0.6045334507042254
======================================
72 step,train loss: 1.666060189126243,train acc:0.6029929577464789
======================================
73 step,train loss: 1.6772781795179341,train acc:0.601012323943662
======================================
74 step,train loss: 1.7644219675534207,train acc:0.5902288732394366
======================================
75 step,train loss: 1.815279718855737,train acc:0.5774647887323944
======================================
76 step,train loss: 2.044364769693831,train acc:0.5415933098591549
======================================
77 step,train loss: 1.9472954239643796,train acc:0.5503961267605634
======================================
78 step,train loss: 1.8854262392285843,train acc:0.5647007042253521
======================================
79 step,train loss: 1.916717700555291,train acc:0.5578785211267606
======================================
80 step,train loss: 1.8576889239566428,train acc:0.5735035211267606
======================================
81 step,train loss: 1.802852106765962,train acc:0.5814260563380281
======================================
82 step,train loss: 1.7828384738572887,train acc:0.5829665492957746
======================================
83 step,train loss: 1.75363616372498,train acc:0.589568661971831
======================================
84 step,train loss: 1.674269481444023,train acc:0.6014524647887324
======================================
85 step,train loss: 1.6613795119272152,train acc:0.6016725352112676
======================================
86 step,train loss: 1.6534495387278811,train acc:0.6029929577464789
======================================
87 step,train loss: 1.6661330662982565,train acc:0.602112676056338
======================================
88 step,train loss: 1.6983716588624767,train acc:0.5977112676056338
======================================
89 step,train loss: 1.9233247360713046,train acc:0.5783450704225352
======================================
90 step,train loss: 1.8316034733409612,train acc:0.5801056338028169
======================================
91 step,train loss: 1.867490506507981,train acc:0.5779049295774648
======================================
92 step,train loss: 1.8248649298305242,train acc:0.5785651408450704
======================================
93 step,train loss: 1.7160055419089089,train acc:0.5972711267605634
======================================
94 step,train loss: 1.6825246810913086,train acc:0.601012323943662
======================================
95 step,train loss: 1.6588783952551829,train acc:0.6023327464788732
======================================
96 step,train loss: 1.6521214196379757,train acc:0.6032130281690141
======================================
97 step,train loss: 1.6504786870848964,train acc:0.6034330985915493
======================================
98 step,train loss: 1.6485072483479137,train acc:0.6038732394366197
======================================
99 step,train loss: 1.6484953870236034,train acc:0.6038732394366197
======================================
100 step,train loss: 1.6503105297894545,train acc:0.6034330985915493
======================================
----------------Testing-----------------
Accuracy:0.467

"""

訓練時間大約一天,可是最終結果差強人意,識別率還沒有一層全連線網路的識別率高,只有46.7%,可能資料集太少,不同標籤的圖片太相似,具體不清楚是什麼原因?