1. 程式人生 > >Android JNI開發 通過C++實現眼瞼標註

Android JNI開發 通過C++實現眼瞼標註

C++程式碼:

#include <jni.h>
#include <string>
#include <opencv2/opencv.hpp>
#include <android/log.h>
#include <iostream>
#include <stdio.h>

#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, "keymatch", __VA_ARGS__)
extern "C"{
    using namespace cv;
    using namespace std;

void printMAtMessage(Mat &mat);
    JNIEXPORT jintArray  JNICALL
Java_com_findai_xkk_myopencv_MainActivity_stringFromJNI(
        JNIEnv *env,
        jobject /* this */,jintArray pixels_
        , jint w
        , jint h) {
        jint* pixels = env->GetIntArrayElements(pixels_, NULL);
        if (pixels==NULL){
            return 0;
        }

//圖片一進來時是ARGB  通過mat轉換BGRA
        Mat img(h,w,CV_8UC4,(uchar *)pixels);  //pixels 操作的是同一份資料
        h = h/2,w = w/2;
        resize(img,img,Size(w,h));
        printMAtMessage(img);
        Mat temp;
//轉化為單通道灰度圖,並列印資訊
        cvtColor(img,temp,COLOR_RGBA2GRAY);
//        printMAtMessage(temp);

        int summat[h][w] ;

//        Mat cal_mat = temp.clone();
        int max_cha = 0;
//        LOGD("rows: %d",temp.rows);
//        LOGD("cols: %d",temp.cols);
//        LOGD("w: %d",w);
//        LOGD("h: %d",h);
//        int kk=0;
        for(int i =6 ;i<h-6;i++){
//            int sum_d = 0;
            for(int j =6;j<w-6;j++){
//                LOGD("DDD %d",temp.at<uchar >(i,j));
                int sum_s = temp.at<uchar >(i-1,j)+temp.at<uchar >(i-2,j)+temp.at<uchar >(i-3,j)+temp.at<uchar >(i-4,j)+temp.at<uchar >(i-5,j);
//                LOGD("------================211111111111111111111111111===------------ %d %s",i,j);
                int sum_x = temp.at<uchar >(i+1,j)+temp.at<uchar >(i+2,j)+temp.at<uchar >(i+3,j)+temp.at<uchar >(i+4,j)+temp.at<uchar >(i+5,j);
//                LOGD("------==============22222222222222222222=====------------");
                int cha = sum_s - sum_x;
//                LOGD("------===============3333333333333333333333333====------------");
                summat[i][j] = abs(cha);
//                LOGD("------===============33333333333 %d 33333333333333====------------",summat[i][j]);
//                temp.at<uchar >(i,j)=255;
//                kk++;
//                LOGD("i: %d",i);
//                LOGD("j: %d",j);
//                LOGD("------==============444444444444444444444444=====------------");
                if(max_cha<cha){
                    max_cha = cha;
//                    LOGD("max %d",max_cha);
//                    LOGD("------============44666666666666666666666666666666666=======------------");
                }
//                sum_d = sum_d + ;

            }
        }

        int std_max = max_cha*0.4;
//        LOGD("------=========== %d ========------------",max_cha);
        for(int i =6 ;i<h-6;i++){
//            int max_col = 0;
//            int max_i = 0;
//            int max_j = 0;
            for(int j =6;j<w-6;j++){
//                LOGD("DDD %d",temp.at<uchar >(i,j));
//                int sum_s = temp.at<uchar >(i-1,j)+temp.at<uchar >(i-2,j)+temp.at<uchar >(i-3,j)+temp.at<uchar >(i-4,j)+temp.at<uchar >(i-5,j);
//                int sum_x = temp.at<uchar >(i+1,j)+temp.at<uchar >(i+2,j)+temp.at<uchar >(i+3,j)+temp.at<uchar >(i+4,j)+temp.at<uchar >(i+5,j);
//                int cha = sum_s - sum_x;
//                int cur_gray  = summat[i][j];
//                    LOGD("------=======cur_gray==== %d ========------------",cur_gray);
                if(summat[i][j] > std_max){

//                    LOGD("------=======cur_gray==== %d ========------------",cal_mat.at<int>(i,j));
                    temp.at<uchar>(i,j)=255;
                }
//                if(max_cha<cha){
//                    max_cha = cha;
//                }
//                sum_d = sum_d + ;

            }



        }


//轉化為四通道。特別注意:在呼叫ov影象處理函式時,一定要好好考慮一下圖片的位數和通道.否則可能出現各種問題.
        cvtColor(temp,temp,COLOR_GRAY2BGRA);
//        printMAtMessage(temp);

        uchar* tempData = temp.data;

//對應資料指標
        int size = w*h;
        jintArray result = env->NewIntArray(size);
//env->SetIntArrayRegion(result,0,size,pixels);
        env->SetIntArrayRegion(result, 0, size, (const jint *) tempData);

        env->ReleaseIntArrayElements(pixels_, pixels, 0);

        return result;
}
void printMAtMessage(Mat &mat) {
    LOGD("***************************Mat資訊開始************************");
    LOGD("mat.rows %d",mat.rows);
    LOGD("mat.cols %d",mat.cols);
    LOGD("mat.total %d",mat.total());
    LOGD("mat.channels %d",mat.channels());
    LOGD("mat.depth %d",mat.depth());
    LOGD("mat.type %d",mat.type());
    LOGD("mat.flags %d",mat.flags);
    LOGD("mat.elemSize %d",mat.elemSize());
    LOGD("mat.elemSize1 %d",mat.elemSize1());
    LOGD("***************************Mat資訊結束************************");
}

}

 

JAVA程式碼:

package com.findai.xkk.myopencv;

import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.widget.ImageView;
import android.widget.TextView;

import org.opencv.android.OpenCVLoader;
import org.opencv.android.Utils;
import org.opencv.core.Mat;

public class MainActivity extends AppCompatActivity {

    // Used to load the 'native-lib' library on application startup.
    static {
        System.loadLibrary("native-lib");
    }

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        //非常重要,啟動opencv,必須寫在最前面
        OpenCVLoader.initDebug();
        setContentView(R.layout.activity_main);
        Bitmap bmp = BitmapFactory.decodeResource(getResources(),R.mipmap.eye);
        ImageView imageView = findViewById(R.id.iv_pro_img);

        int w = bmp.getWidth();
        int h = bmp.getHeight();
//        System.out.println(w);
//        System.out.println(h);
        int[] pixels = new int[w*h];
        bmp.getPixels(pixels, 0, w, 0, 0, w, h);

        long startTime = System.currentTimeMillis();
        int[] resultInt = stringFromJNI(pixels, w, h);
        long endTime = System.currentTimeMillis();

        Log.e("JNITime",""+(endTime-startTime));
        Bitmap resultImg = Bitmap.createBitmap(w/2,h/2, Bitmap.Config.ARGB_8888);

        //(@ColorInt int[] pixels, int offset, int stride,int x, int y, int width, int height)
//        System.out.println(w/2);
//        System.out.println(h/2);
        resultImg.setPixels(resultInt, 0, w/2, 0, 0, w/2,h/2);
        imageView.setImageBitmap(resultImg);
    }

    /**
     * A native method that is implemented by the 'native-lib' native library,
     * which is packaged with this application.
     */
    public native int[] stringFromJNI(int[] pixels, int w, int h);
}

 核心思路:

眼瞼處,灰度變化最大,所以上下采集一定空間範圍的灰度值進行計算,同時減少噪聲的干擾。演算法實時性較高,能達到100FPS。

之前嘗試過深度學習,MRCNN等方法,效果是不錯,但是實時性跟不上,不適合在移動端部署。故採用傳統影象處理方法,嘗試過Sobel的梯度變化等等方法,最終本演算法 簡單有效實時性高。

效果圖:

 

 

GitHUB:

https://github.com/xkkjiayou/MYOPencv_real

JNI C++參考:

https://blog.csdn.net/wulafly/article/details/71076594

https://blog.csdn.net/qq_29540745/article/details/52487832

https://blog.csdn.net/wjb820728252/article/details/78357269

https://blog.csdn.net/brcli/article/details/76407986