1. 程式人生 > >基於神經卷積網路的人臉識別

基於神經卷積網路的人臉識別

1.人臉識別整體設計方案:


客_服互動流程圖:


2.服務端程式碼展示:

sk = socket.socket()
# s.bind(address) 將套接字繫結到地址。在AF_INET下,以元組(host,port)的形式表示地址。
sk.bind(("172.29.25.11",8007))
# 開始監聽傳入連線。
sk.listen(True)

while True:
    for i in range(100):
        # 接受連線並返回(conn,address),conn是新的套接字物件,可以用來接收和傳送資料。address是連線客戶端的地址。
        conn,address = sk.accept()

        # 建立圖片儲存路徑
        path = str(i+1) + '.jpg'

        # 接收圖片大小(位元組數)
        size = conn.recv(1024)
        size_str = str(size,encoding="utf-8")
        size_str = size_str[2 :]
        file_size = int(size_str)

        # 響應接收完成
        conn.sendall(bytes('finish', encoding="utf-8"))

        # 已經接收資料大小 has_size
        has_size = 0
        # 建立圖片並寫入資料
        f = open(path,"wb")
        while True:
            # 獲取
            if file_size == has_size:
                break
            date = conn.recv(1024)
            f.write(date)
            has_size += len(date)
        f.close()

        # 圖片縮放
        resize(path)
        # cut_img(path):圖片裁剪成功返回True;失敗返回False
        if cut_img(path):
            yuchuli()
            result = test('test.jpg')
            conn.sendall(bytes(result,encoding="utf-8"))
        else:
            print('falue')
            conn.sendall(bytes('人眼檢測失敗,請保持圖片眼睛清晰',encoding="utf-8"))
        conn.close()

3.圖片預處理

1)圖片縮放

# 根據圖片大小等比例縮放圖片
def resize(path):
    image=cv2.imread(path,0)
    row,col = image.shape
    if row >= 2500:
        x,y = int(row/5),int(col/5)
    elif row >= 2000:
        x,y = int(row/4),int(col/4)
    elif row >= 1500:
        x,y = int(row/3),int(col/3)
    elif row >= 1000:
        x,y = int(row/2),int(col/2)
    else:
        x,y = row,col
    # 縮放函式
    res=cv2.resize(image,(y,x),interpolation=cv2.INTER_CUBIC)
    cv2.imwrite(path,res)

2)直方圖均衡化和中值濾波

# 直方圖均衡化
eq = cv2.equalizeHist(img)
# 中值濾波
lbimg=cv2.medianBlur(eq,3)
3)人眼檢測
# -*- coding: utf-8 -*-
# 檢測人眼,返回眼睛資料

import numpy as np
import cv2

def eye_test(path):
    # 待檢測的人臉路徑
    imagepath = path

    # 獲取訓練好的人臉引數
    eyeglasses_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')

    # 讀取圖片
    img = cv2.imread(imagepath)
    # 轉為灰度影象
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # 檢測並獲取人眼資料
    eyeglasses = eyeglasses_cascade.detectMultiScale(gray)
    # 人眼數為2時返回左右眼位置資料
    if len(eyeglasses) == 2:
        num = 0
        for (e_gx,e_gy,e_gw,e_gh) in eyeglasses:
            cv2.rectangle(img,(e_gx,e_gy),(e_gx+int(e_gw/2),e_gy+int(e_gh/2)),(0,0,255),2)
            if num == 0:
                x1,y1 = e_gx+int(e_gw/2),e_gy+int(e_gh/2)
            else:
                x2,y2 = e_gx+int(e_gw/2),e_gy+int(e_gh/2)
            num += 1
        print('eye_test')
        return x1,y1,x2,y2
    else:
        return False

4)人眼對齊並裁剪
# -*- coding: utf-8 -*-
# 人眼對齊並裁剪

# 引數含義:
#  CropFace(image, eye_left, eye_right, offset_pct, dest_sz)
# eye_left is the position of the left eye
# eye_right is the position of the right eye
# 比例的含義為:要保留的影象靠近眼鏡的百分比,
# offset_pct is the percent of the image you want to keep next to the eyes (horizontal, vertical direction)
# 最後保留的影象的大小。
# dest_sz is the size of the output image
#
import sys,math
from PIL import Image
from eye_test import eye_test

 # 計算兩個座標的距離
def Distance(p1,p2):
    dx = p2[0]- p1[0]
    dy = p2[1]- p1[1]
    return math.sqrt(dx*dx+dy*dy)

 # 根據引數,求仿射變換矩陣和變換後的影象。
def ScaleRotateTranslate(image, angle, center =None, new_center =None, scale =None, resample=Image.BICUBIC):
    if (scale is None)and (center is None):
        return image.rotate(angle=angle, resample=resample)
    nx,ny = x,y = center
    sx=sy=1.0
    if new_center:
        (nx,ny) = new_center
    if scale:
        (sx,sy) = (scale, scale)
    cosine = math.cos(angle)
    sine = math.sin(angle)
    a = cosine/sx
    b = sine/sx
    c = x-nx*a-ny*b
    d =-sine/sy
    e = cosine/sy
    f = y-nx*d-ny*e
    return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)

 # 根據所給的人臉影象,眼睛座標位置,偏移比例,輸出的大小,來進行裁剪。
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
    # calculate offsets in original image 計算在原始影象上的偏移。
    offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
    offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
    # get the direction  計算眼睛的方向。
    eye_direction = (eye_right[0]- eye_left[0], eye_right[1]- eye_left[1])
    # calc rotation angle in radians  計算旋轉的方向弧度。
    rotation =-math.atan2(float(eye_direction[1]),float(eye_direction[0]))
    # distance between them  # 計算兩眼之間的距離。
    dist = Distance(eye_left, eye_right)
    # calculate the reference eye-width    計算最後輸出的影象兩隻眼睛之間的距離。
    reference = dest_sz[0]-2.0*offset_h
    # scale factor   # 計算尺度因子。
    scale =float(dist)/float(reference)
    # rotate original around the left eye  # 原影象繞著左眼的座標旋轉。
    image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
    # crop the rotated image  # 剪下
    crop_xy = (eye_left[0]- scale*offset_h, eye_left[1]- scale*offset_v)  # 起點
    crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)   # 大小
    image = image.crop((int(crop_xy[0]),int(crop_xy[1]),int(crop_xy[0]+crop_size[0]),int(crop_xy[1]+crop_size[1])))
    # resize it 重置大小
    image = image.resize(dest_sz, Image.ANTIALIAS)
    return image

def cut_img(path):
    image =  Image.open(path)

    # 人眼識別成功返回True;否則,返回False
    if eye_test(path):
        print('cut_img')
        # 獲取人眼資料
        leftx,lefty,rightx,righty = eye_test(path)

        # 確定左眼和右眼位置
        if leftx > rightx:
            temp_x,temp_y = leftx,lefty
            leftx,lefty = rightx,righty
            rightx,righty = temp_x,temp_y

        # 進行人眼對齊並儲存截圖
        CropFace(image, eye_left=(leftx,lefty), eye_right=(rightx,righty), offset_pct=(0.30,0.30), dest_sz=(92,112)).save('test.jpg')
        return True
    else:
        print('falue')
        return False

4.用神經卷積網路訓練資料
# -*- coding: utf-8 -*-

from numpy import *
import cv2
import tensorflow as tf

# 圖片大小
TYPE = 112*92
# 訓練人數
PEOPLENUM = 42
# 每人訓練圖片數
TRAINNUM = 15 #( train_face_num )
# 單人訓練人數加測試人數
EACH = 21 #( test_face_num + train_face_num )

# 2維=>1維
def img2vector1(filename):
    img = cv2.imread(filename,0)
    row,col = img.shape
    vector1 = zeros((1,row*col))
    vector1 = reshape(img,(1,row*col))
    return vector1

# 獲取人臉資料
def ReadData(k):
    path = 'face_flip/'
    train_face = zeros((PEOPLENUM*k,TYPE),float32)
    train_face_num = zeros((PEOPLENUM*k,PEOPLENUM))
    test_face = zeros((PEOPLENUM*(EACH-k),TYPE),float32)
    test_face_num = zeros((PEOPLENUM*(EACH-k),PEOPLENUM))

    # 建立42個人的訓練人臉集和測試人臉集
    for i in range(PEOPLENUM):
        # 單前獲取人
        people_num = i + 1
        for j in range(k):
            #獲取圖片路徑
            filename = path + 's' + str(people_num) + '/' + str(j+1) + '.jpg'
            #2維=>1維
            img = img2vector1(filename)

            #train_face:每一行為一幅圖的資料;train_face_num:儲存每幅圖片屬於哪個人
            train_face[i*k+j,:] = img/255
            train_face_num[i*k+j,people_num-1] = 1

        for j in range(k,EACH):
            #獲取圖片路徑
            filename = path + 's' + str(people_num) + '/' + str(j+1) + '.jpg'

            #2維=>1維
            img = img2vector1(filename)

            # test_face:每一行為一幅圖的資料;test_face_num:儲存每幅圖片屬於哪個人
            test_face[i*(EACH-k)+(j-k),:] = img/255
            test_face_num[i*(EACH-k)+(j-k),people_num-1] = 1

    return train_face,train_face_num,test_face,test_face_num

# 獲取訓練和測試人臉集與對應lable
train_face,train_face_num,test_face,test_face_num = ReadData(TRAINNUM)

# 計算測試整合功率
def compute_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result

# 神經元權重
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

# 神經元偏置
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 卷積
def conv2d(x, W):
    # stride [1, x_movement, y_movement, 1]
    # Must have strides[0] = strides[3] = 1
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

# 最大池化,x,y步進值均為2
def max_pool_2x2(x):
    # stride [1, x_movement, y_movement, 1]
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')


# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 10304])/255.   # 112*92
ys = tf.placeholder(tf.float32, [None, PEOPLENUM])    # 42個輸出
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 112, 92, 1])
# print(x_image.shape)  # [n_samples, 112,92,1]

# 第一層卷積層
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 112x92x32
h_pool1 = max_pool_2x2(h_conv1)                          # output size 56x46x64


# 第二層卷積層
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 56x46x64
h_pool2 = max_pool_2x2(h_conv2)                          # output size 28x23x64


# 第一層神經網路全連線層
W_fc1 = weight_variable([28*23*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 28, 23, 64] ->> [n_samples, 28*23*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 28*23*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# 第二層神經網路全連線層
W_fc2 = weight_variable([1024, PEOPLENUM])
b_fc2 = bias_variable([PEOPLENUM])
prediction = tf.nn.softmax((tf.matmul(h_fc1_drop, W_fc2) + b_fc2))


# 交叉熵損失函式
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = tf.matmul(h_fc1_drop, W_fc2)+b_fc2, labels=ys))
regularizers = tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(b_fc1) +tf.nn.l2_loss(W_fc2) + tf.nn.l2_loss(b_fc2)
# 將正則項加入損失函式
cost += 5e-4 * regularizers
# 優化器優化誤差值
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)

sess = tf.Session()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)

# 訓練1000次,每50次輸出測試集測試結果
for i in range(1000):
    sess.run(train_step, feed_dict={xs: train_face, ys: train_face_num, keep_prob: 0.5})
    if i % 50 == 0:
        print(sess.run(prediction[0],feed_dict= {xs: test_face,ys: test_face_num,keep_prob: 1}))
        print(compute_accuracy(test_face,test_face_num))
# 儲存訓練資料
save_path = saver.save(sess,'my_data/save_net.ckpt')

5.用神經卷積網路測試資料

# -*- coding: utf-8 -*-
# 兩層神經卷積網路加兩層全連線神經網路

from numpy import *
import cv2
import tensorflow as tf

# 神經網路最終輸出個數
PEOPLENUM = 42

# 2維=>1維
def img2vector1(img):
    row,col = img.shape
    vector1 = zeros((1,row*col),float32)
    vector1 = reshape(img,(1,row*col))
    return vector1

# 神經元權重
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

# 神經元偏置
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 卷積
def conv2d(x, W):
    # stride [1, x_movement, y_movement, 1]
    # Must have strides[0] = strides[3] = 1
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

# 最大池化,x,y步進值均為2
def max_pool_2x2(x):
    # stride [1, x_movement, y_movement, 1]
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 10304])/255.   # 112*92
ys = tf.placeholder(tf.float32, [None, PEOPLENUM])    # 42個輸出
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 112, 92, 1])
# print(x_image.shape)  # [n_samples, 112,92,1]

# 第一層卷積層
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 112x92x32
h_pool1 = max_pool_2x2(h_conv1)                          # output size 56x46x64


# 第二層卷積層
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 56x46x64
h_pool2 = max_pool_2x2(h_conv2)                          # output size 28x23x64


# 第一層神經網路全連線層
W_fc1 = weight_variable([28*23*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 28, 23, 64] ->> [n_samples, 28*23*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 28*23*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# 第二層神經網路全連線層
W_fc2 = weight_variable([1024, PEOPLENUM])
b_fc2 = bias_variable([PEOPLENUM])
prediction = tf.nn.softmax((tf.matmul(h_fc1_drop, W_fc2) + b_fc2))

sess = tf.Session()
init = tf.global_variables_initializer()

# 下載訓練資料
saver = tf.train.Saver()
saver.restore(sess,'my_data/save_net.ckpt')

# 返回簽到人名
def find_people(people_num):
    if people_num == 41:
        return '任童霖'
    elif people_num == 42:
        return 'LZT'
    else:
        return 'another people'

def test(path):
    # 獲取處理後人臉
    img = cv2.imread(path,0)/255
    test_face = img2vector1(img)
    print('true_test')

    # 計算輸出比重最大的人及其所佔比重
    prediction1 = sess.run(prediction,feed_dict={xs:test_face,keep_prob:1})
    prediction1 = prediction1[0].tolist()
    people_num = prediction1.index(max(prediction1))+1
    result = max(prediction1)/sum(prediction1)
    print(result,find_people(people_num))

    # 神經網路輸出最大比重大於0.5則匹配成功
    if result > 0.50:
        # 儲存簽到資料
        qiandaobiao = load('save.npy')
        qiandaobiao[people_num-1] = 1
        save('save.npy',qiandaobiao)

        # 返回 人名+簽到成功
        print(find_people(people_num) + '已簽到')
        result = find_people(people_num) + ' 簽到成功'
    else:
        result = '簽到失敗'
    return result

神經卷積網路入門簡介