基於ROS實現人臉跟隨(OpenCV,arduino)
該測試欲達成目標是實現人臉跟隨!
1 硬體
攝像頭:1個;
USB資料線:1個;
舵機:1個;
Arduino控制板:1個。
此處應有圖:

1.1 準備
這裡需要使用usb-cam軟體包:
$ cd ~/catkin_ws/src $ git clone https://github.com/bosch-ros-pkg/usb_cam.git $ cd ~/catkin_ws $ catkin_make
注意:下面的程式執行前,需要啟動usb_cam節點。
roslaunch usb_cam usb_cam-test.launch
執行上面的節點,這時該節點會不斷的釋出/usb_cam/image_raw話題。如下圖所示:

下面的程式將使用這個話題資料。
2 程式
2.1 人臉識別節點,釋出一個話題"chatter",為人臉在影象的位置。
#include <ros/ros.h> #include <image_transport/image_transport.h> #include <cv_bridge/cv_bridge.h> #include <sensor_msgs/image_encodings.h> #include <std_msgs/Int16.h> #include <opencv2/objdetect.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core.hpp> using namespace std; using namespace cv; CascadeClassifier face_cascade; static const std::string OPENCV_WINDOW = "Raw Image window"; class Face_Detector { ros::NodeHandle nh_; image_transport::ImageTransport it_; image_transport::Subscriber image_sub_; ros::Publisher chatter_pub; public: Face_Detector() : it_(nh_) { // Subscribe to input video feed and publish output video feed image_sub_ = it_.subscribe("/usb_cam/image_raw", 1, &Face_Detector::imageCb, this); chatter_pub = nh_.advertise<std_msgs::Int16>("chatter", 100); cv::namedWindow(OPENCV_WINDOW); } ~Face_Detector() { cv::destroyWindow(OPENCV_WINDOW); } void imageCb(const sensor_msgs::ImageConstPtr& msg) { cv_bridge::CvImagePtr cv_ptr; namespace enc = sensor_msgs::image_encodings; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what()); return; } // Draw an example circle on the video stream if (cv_ptr->image.rows > 400 && cv_ptr->image.cols > 600){ detect_faces(cv_ptr->image); //chatter_pub.publish(position_x); } } void detect_faces(cv::Mat img) { RNG rng( 0xFFFFFFFF ); std_msgs::Int16 position_x; int data; int lineType = 8; Mat frame_gray; cvtColor( img, frame_gray, COLOR_BGR2GRAY ); equalizeHist( frame_gray, frame_gray ); //-- Detect faces std::vector<Rect> faces; face_cascade.detectMultiScale( frame_gray, faces ); if (faces.size()>=1){ size_t i = 0; Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 ); ellipse( img, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4 ); data = cvRound(faces[i].x + faces[i].width/2); position_x.data = data; chatter_pub.publish(position_x); } imshow(OPENCV_WINDOW,img); waitKey(3); } }; int main(int argc, char** argv) { //從命令列讀取必要的資訊,注意路徑 CommandLineParser parser(argc, argv, "{help h||}" "{face_cascade|/home/junjun/projects/main2/haarcascade_frontalface_alt.xml|Path to face cascade.}"); parser.about( "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n" "You can use Haar or LBP features.\n\n" ); parser.printMessage(); String face_cascade_name = parser.get<String>("face_cascade"); //-- 1\. Load the cascades if( !face_cascade.load( face_cascade_name ) ) { cout << "--(!)Error loading face cascade\n"; return -1; }; ros::init(argc, argv, "Face_Detector"); Face_Detector ic; ros::spin(); return 0; }
2.1.1 CMakeLists.txt檔案
這個很重要,以後再寫程式可以直接參考這個檔案
cmake_minimum_required(VERSION 2.8.3) project(cv_bridge_tutorial_pkg) find_package(catkin REQUIRED COMPONENTS cv_bridge image_transport roscpp sensor_msgs std_msgs ) find_package( OpenCV REQUIRED ) catkin_package() include_directories( ${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ) add_executable(sample_cv_bridge_node src/sample_cv_bridge_node.cpp) target_link_libraries(sample_cv_bridge_node ${catkin_LIBRARIES} ${OpenCV_LIBRARIES} ) include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
2.1.2 package.xml檔案
<?xml version="1.0"?> <package> <name>cv_bridge_tutorial_pkg</name> <version>0.0.0</version> <description>The cv_bridge_tutorial_pkg package</description> <maintainer email="[email protected]">lentin</maintainer> <license>TODO</license> <buildtool_depend>catkin</buildtool_depend> <build_depend>cv_bridge</build_depend> <build_depend>image_transport</build_depend> <build_depend>roscpp</build_depend> <build_depend>sensor_msgs</build_depend> <build_depend>std_msgs</build_depend> <run_depend>cv_bridge</run_depend> <run_depend>image_transport</run_depend> <run_depend>roscpp</run_depend> <run_depend>sensor_msgs</run_depend> <run_depend>std_msgs</run_depend> <export> </export> </package>
2.2 人臉跟隨的控制程式(arduino)
訂閱話題"chatter"
#include <ros.h> #include <std_msgs/Int16.h> #include <Servo.h> ros::NodeHandle nh; Servo myservo; int servoPin=9; int servo_step_distance = 5; //此外可以修改,太大容易超調,不容易穩定;太小,舵機不動; int position_x = 90; int screenmaxx = 640; int center_offset = 80; int center_right = (screenmaxx / 2) - center_offset; int center_left = (screenmaxx / 2) + center_offset; void messageCb(std_msgs::Int16 servo_msg){ digitalWrite(13,HIGH); int position = servo_msg.data; //當人臉位於左側,舵機向左轉。 if (position > center_left){ position_x -= servo_step_distance; if (position_x >= 10 and position_x <=170) myservo.write(position_x); } //當人臉位於右側,舵機向右轉。 else if (position < center_right){ position_x += servo_step_distance; if (position_x >= 10 and position_x <=170) myservo.write(position_x); } delay(1); digitalWrite(13,LOW); } ros::Subscriber<std_msgs::Int16> sub("chatter", &messageCb ); void setup() { pinMode(13, OUTPUT); myservo.attach(servoPin); nh.initNode(); nh.subscribe(sub); myservo.write(position_x);// tell servo to go to position in variable 'pos' } void loop() { nh.spinOnce(); delay(1); }
把程式燒入Arduino板子中。
3 執行
首先執行:roscore
然後:roslaunch usb_cam usb_cam-test.launch
然後:rosrun rosserial_python serial_node.py /dev/ttyACM0
最後:rosrun cv_bridge_tutorial_pkg sample_cv_bridge_node
應該放一個視訊的,但貌似不支援,拍了一個小視訊放到了抖音上!最後截一張圖。

人臉跟隨,人體識別,身份證,車牌號識別等技術都歸納在NDK體系。這裡整理的了技能圖,以及視訊資料;

NDK開發
加群免費領取安卓進階視訊教程,原始碼,面試資料,群內有大牛一起交流討論技術;964557053。點選連結加入群聊
(包括自定義控制元件、NDK、架構設計、混合式開發工程師(React native,Weex)、效能優化、完整商業專案開發等)

騰訊T3 Android高階技術視訊教程.png