Windows 7 下Vs2013呼叫tensorflow表情識別模型
在Windows下安裝tensorflow-gpu,可參考windows+tensorflow-gpu+anaconda3+cuda8.0+cudnn安裝指南https://blog.csdn.net/hdd0411/article/details/71305931?locationNum=8&fps=1。
2.1具體呼叫指南:
新建vs2013工程,複製anaconda3資料夾下的libs和include檔案拷貝到.sln目錄下,並新增到工程屬性包含目錄和庫目錄下;複製libs資料夾下的python36.lib,命名為python36_d.lib,新增到屬性-》聯結器中;複製anaconda3資料夾下的python36.dll檔案放在debug檔案下,使.exe,python36.dll在同一個debug資料夾中。
C++程式碼:(呼叫表情識別模型)
DWORD WINAPI testImage(LPVOID lParam)
{
char msg[256] = "11111 ";
PyObject* pFunc = NULL;
PyObject* pArg = NULL;
PyObject* module = NULL;
Py_Initialize();
module = PyImport_ImportModule("predictd");//myModel:Python檔名
if (!module) {
printf("cannot open module!");
//Py_Finalize();
}
pFunc = PyObject_GetAttrString(module, "test_one_image");//test_one_image:Python檔案中的函式名
if (!pFunc) {
printf("cannot open FUNC!");
//Py_Finalize();
}
//開始呼叫model
pArg = Py_BuildValue("(s)", "00028.jpg");
if (module != NULL) {
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
PyEval_CallObject(pFunc, pArg);
PyGILState_Release(gstate);
}
return 0;
}
int main()
{
cout << "建立執行緒" << endl;
CreateThread(NULL, 0, testImage, 0, 0, NULL);
cout << "建立成功" << endl;
system("pause");
return 0;
}
Python程式碼:建立predictd.py檔案,將predictd.py檔案和python36.dll檔案放在同一個資料夾中。
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# First, pass the path of the image
def test_one_image(test_dir):
stdout_backup = sys.stdout
log_file = open("log.txt", "w")
sys.stdout = log_file
print(test_dir)
image_size=96#跟face-expression-model.meta中的大小一致。
num_channels=3
images = []
# Reading the image using OpenCV
image = cv2.imread(test_dir)
print(image)
# Resizing the image to our desired size and preprocessing will be done exactly as done during training
image = cv2.resize(image, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0 / 255.0)
# The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
x_batch = images.reshape(1, image_size, image_size, num_channels)
## Let us restore the saved model
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph('face-expression-model.meta')
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint('./'))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the network
y_pred = graph.get_tensor_by_name('y_pred:0')
## Let's feed the images to the input placeholders
x = graph.get_tensor_by_name('x:0')
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, len(os.listdir('training_data'))))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result = sess.run(y_pred, feed_dict=feed_dict_testing)
# result is of this format [probabiliy_of_rose probability_of_sunflower]
print(result)
#print(feed_dict_testing)
log_file.close()
sys.stdout = stdout_backup
sess.close()
將result儲存到log.txt檔案中;
2、2讀取log.txt中的結果值實現程式碼:
#include "stdafx.h"
#include "string.h"
#include "stdlib.h"
#include <stdio.h>
int main(){
FILE *fp;
fp = fopen("data.txt","r");
float x;
char a[100] = "0";
char str1[100] = "0";
char ch;
int i,j,len;
double b[100] = {0};
for (i =j= 0; (ch = fgetc(fp)) != EOF; i++)
{
if (ch=='['||ch==']')
{
continue;
}
a[j] = ch;
j++;
}
printf("%s\n", a);
len = strlen(a);
int n = 0;
for ( i=j = 0; i <= len; i++)
{
if (a[i] != ' ')
{
str1[j++] = a[i];
}
else
{
str1[j] = 0;
if (j>0)
{
b[n++] = atof(str1);
}
j = 0;
}
}
if (j>0)
{
str1[j] = 0;
b[n++] = atof(str1);
}
for ( i = 0; i <n; i++)
{
printf("%lf\n", b[i]);
}
return 0;
}