1. 程式人生 > >opencv影象處理常用完整示例程式碼總結

opencv影象處理常用完整示例程式碼總結

顯示影象

#include "StdAfx.h"
#include <string>
#include <iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
 
using namespace cv;
using namespace std;
 
int main()
{
         string imageName = “lena.jpg”;
 
         //讀入影象
         Mat img = imread(imageName, CV_LOAD_IMAGE_COLOR);
 
         //如果讀入影象失敗
         if (img.empty())
         {
                   cout<<”Could not open or find the image!”<<endl;
                   return -1;
         }
 
         //建立視窗
         namedWindow(“lena”, CV_WINDOW_AUTOSIZE);

         //顯示影象
         imshow(“lena”, img);
 
         //等待按鍵,按鍵盤任意鍵返回
         waitKey();

         return 0;
}

載入-RGB轉灰度圖-儲存

#include “StdAfx.h”
#include <cv.h>
#include <highgui.h>
#include <string>
 
using namespace cv;
using namespace std;
 
int main()
{
         char* imageName = “lena.jpg”;
         Mat image = imread(imageName, 1);
 
         if (!image.data)
         {
                   cout<<”Could not open or find the image!”<<endl;
                   return -1;
         }
 
         Mat gray_image;
         String grayImageName = “lena_gray”;
 
         cvtColor(image,gray_image,CV_RGB2GRAY);//將RGB影象轉換成灰度影象
         imwrite(“../../lena_gray.jpg”,gray_image);//儲存影象
 
         namedWindow(imageName, CV_WINDOW_AUTOSIZE);//建立用於顯示元影象視窗
         namedWindow(grayImageName,CV_WINDOW_AUTOSIZE);//建立用於顯示轉換後圖像視窗
 
         imshow(imageName,image);
         imshow(“grayImageName”, gray_image);
 
        waitKey(0);
        return 0;
}
<h1>膨脹操作示例</h1><pre name="code" class="cpp">#include <opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>

using namespace std;
using namespace cv;
 
int main(  )
{
 
  //載入原圖 
  Mat image = imread("1.jpg");
 
  //建立視窗 
  namedWindow("原圖-膨脹操作");
  namedWindow("效果圖-膨脹操作");
 
  //顯示原圖
  imshow("原圖-膨脹操作", image);
 
  //獲取自定義核
  Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
  Mat out;
  //進行膨脹操作
  dilate(image,out, element);
 
  //顯示效果圖
  imshow("效果圖-膨脹操作", out);
 
  waitKey(0);
 
  return 0;
}

腐蝕操作示例

#include <opencv2/core/core.hpp>

#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main(  )
{
  //載入原圖 
  Matimage = imread("1.jpg");
 
   //建立視窗 
  namedWindow("原圖-腐蝕操作");
  namedWindow("效果圖-腐蝕操作");
 
  //顯示原圖
  imshow("原圖-腐蝕操作", image);
 
   
  //獲取自定義核
  Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
  Mat out;
 
  //進行腐蝕操作
  erode(image,out, element);
 
  //顯示效果圖
  imshow("效果圖-腐蝕操作", out);
 
  waitKey(0);
 
  return 0;
}

膨脹與腐蝕綜合示例

#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>
 
using namespace std;
using namespace cv;
 
Mat g_srcImage, g_dstImage;//原始圖和效果圖
int g_nTrackbarNumer = 0;//0表示腐蝕erode, 1表示膨脹dilate
int g_nStructElementSize = 3; //結構元素(核心矩陣)的尺寸
 
void Process();//膨脹和腐蝕的處理函式
void on_TrackbarNumChange(int, void *);//回撥函式
void on_ElementSizeChange(int, void *);//回撥函式
 
int main( )
{
  //改變console字型顏色
  system("color5E"); 
 
  //載入原圖
  g_srcImage= imread("1.jpg");
  if(!g_srcImage.data ) { printf("Oh,no,讀取srcImage錯誤~!\n"); return false; }
      
  //顯示原始圖
  namedWindow("原始圖");
  imshow("原始圖", g_srcImage);
      
  //進行初次腐蝕操作並顯示效果圖
  namedWindow("效果圖");
  //獲取自定義核
  Matelement = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize

+1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));
  erode(g_srcImage,g_dstImage, element);
  imshow("效果圖", g_dstImage);
 
  //建立軌跡條
  createTrackbar("腐蝕/膨脹", "效果圖", &g_nTrackbarNumer, 1, on_TrackbarNumChange);
  createTrackbar("核心尺寸", "效果圖",&g_nStructElementSize, 21, on_ElementSizeChange);
 
  //輸出一些幫助資訊
  cout<<endl<<"\t嗯。執行成功,請調整滾動條觀察影象效果~\n\n"
    <<"\t按下“q”鍵時,程式退出~!\n"
    <<"\n\n\t\t\t\tby毛毛";
 
  //輪詢獲取按鍵資訊,若下q鍵,程式退出
  while(char(waitKey(1))!= 'q') {}
 
  return 0;
}
 

//進行自定義的腐蝕和膨脹操作
void Process()
{
  //獲取自定義核
  Mat element = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize

+1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));
 
  //進行腐蝕或膨脹操作
  if(g_nTrackbarNumer== 0) {   
    erode(g_srcImage,g_dstImage, element);
  }
  else{
    dilate(g_srcImage,g_dstImage, element);
  }
 
  //顯示效果圖
  imshow("效果圖", g_dstImage);
}
 
 
//腐蝕和膨脹之間切換開關的回撥函式
void on_TrackbarNumChange(int, void *)
{
  //腐蝕和膨脹之間效果已經切換,回撥函式體內需呼叫一次Process函式,使改變後的效果立即生效並

顯示出來
  Process();
}
 
//腐蝕和膨脹操作核心改變時的回撥函式
void on_ElementSizeChange(int, void *)
{
  //核心尺寸已改變,回撥函式體內需呼叫一次Process函式,使改變後的效果立即生效並顯示出來
  Process();
}

膨脹與腐蝕綜合示例2

#include "cv.h" 
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"

using namespace std;
using namespace cv;

#define TYPE_MORPH_RECT      (0)
#define TYPE_MORPH_CROSS     (1)
#define TYPE_MORPH_ELLIPSE   (2)

#define MAX_ELE_TYPE         (2)
#define MAX_ELE_SIZE         (20)

Mat src, erode_dst, dilate_dst;

const char *erode_wn  = "eroding demo";
const char *dilate_wn = "dilating demo";

int erode_ele_type;
int dilate_ele_type;
int erode_ele_size;
int dilate_ele_size;

static void Erosion(int, void *);
static void Dilation(int, void *);

/*
 * @brief   
 * @inputs  
 * @outputs 
 * @retval  
 */
int main(int argc, char *argv[])
{
    if (argc < 2) {
        cout<<"Usage: ./eroding_and_dilating [file name]"<<endl;
        return -1;
    }

    src = imread(argv[1]);
    if (!src.data) {
        cout<<"Read image failure."<<endl;
        return -1;
    }

    // Windows
    namedWindow(erode_wn, WINDOW_AUTOSIZE);
    namedWindow(dilate_wn, WINDOW_AUTOSIZE);

    // Track Bar for Erosion
    createTrackbar("Element Type\n0:Rect\n1:Cross\n2:Ellipse", erode_wn, 
            &erode_ele_type, MAX_ELE_TYPE, Erosion);  // callback @Erosion
    createTrackbar("Element Size: 2n+1", erode_wn, 
            &erode_ele_size, MAX_ELE_SIZE, Erosion);

    // Track Bar for Dilation
    createTrackbar("Element Type\n0:Rect\n1:Cross\n2:Ellipse", dilate_wn, 
            &dilate_ele_type, MAX_ELE_TYPE, Dilation);  // callback @Erosion
    createTrackbar("Element Size: 2n+1", dilate_wn, 
            &dilate_ele_size, MAX_ELE_SIZE, Dilation);

    // Default start
    Erosion(0, 0);
    Dilation(0, 0);

    waitKey(0);
    return 0;
}


/*
 * @brief   腐蝕操作的回撥函式
 * @inputs  
 * @outputs 
 * @retval  
 */
static void Erosion(int, void *)
{
    int erode_type;

    switch (erode_ele_type) {
    case TYPE_MORPH_RECT:
       erode_type = MORPH_RECT; 
       break;
    case TYPE_MORPH_CROSS:
       erode_type = MORPH_CROSS;
       break;
    case TYPE_MORPH_ELLIPSE:
       erode_type = MORPH_ELLIPSE;
       break;
    default:
       erode_type = MORPH_RECT;
       break;
    }

    Mat ele = getStructuringElement(erode_type, Size(2*erode_ele_size+1, 2*erode_ele_size

+1), 
            Point(erode_ele_size, erode_ele_size));

    erode(src, erode_dst, ele);

    imshow(erode_wn, erode_dst);
}

/*
 * @brief   膨脹操作的回撥函式
 * @inputs  
 * @outputs 
 * @retval  
 */
static void Dilation(int, void *)
{
    int dilate_type;

    switch (dilate_ele_type) {
    case TYPE_MORPH_RECT:
       dilate_type = MORPH_RECT; 
       break;
    case TYPE_MORPH_CROSS:
       dilate_type = MORPH_CROSS;
       break;
    case TYPE_MORPH_ELLIPSE:
       dilate_type = MORPH_ELLIPSE;
       break;
    default:
       dilate_type = MORPH_RECT;
       break;
    }

    Mat ele = getStructuringElement(dilate_type, Size(2*dilate_ele_size+1, 

2*dilate_ele_size+1), 
            Point(dilate_ele_size, dilate_ele_size));

    dilate(src, dilate_dst, ele);

    imshow(dilate_wn, dilate_dst);
}

Qt影象的縮放顯示

#include "widget.h"
#include "ui_widget.h"
#include <QDebug>
Widget::Widget(QWidget *parent) :
    QWidget(parent),
    ui(new Ui::Widget)
{
    ui->setupUi(this);
}


Widget::~Widget()
{
    delete ui;
}

void Widget::on_openButton_clicked()
{
    QString fileName = QFileDialog::getOpenFileName(this,tr("Open Image"),
                                ".",tr("Image Files (*.png *.jpg *.bmp)"));
    qDebug()<<"filenames:"<<fileName;
    image = cv::imread(fileName.toAscii().data());
    ui->imgfilelabel->setText(fileName);
    //here use 2 ways to make a copy
//    image.copyTo(originalimg);          //make a copy
    originalimg = image.clone();        //clone the img
    qimg = Widget::Mat2QImage(image);
    display(qimg);                      //display by the label
    if(image.data)
    {
        ui->saltButton->setEnabled(true);
        ui->originalButton->setEnabled(true);
        ui->reduceButton->setEnabled(true);
    }
}

QImage Widget::Mat2QImage(const cv::Mat &mat)
{
    QImage img;
    if(mat.channels()==3)
    {
        //cvt Mat BGR 2 QImage RGB
        cvtColor(mat,rgb,CV_BGR2RGB);
        img =QImage((const unsigned char*)(rgb.data),
                    rgb.cols,rgb.rows,
                    rgb.cols*rgb.channels(),
                    QImage::Format_RGB888);
    }
    else
    {
        img =QImage((const unsigned char*)(mat.data),
                    mat.cols,mat.rows,
                    mat.cols*mat.channels(),
                    QImage::Format_RGB888);
    }
    return img;
}

void Widget::display(QImage img)
{
    QImage imgScaled;
    imgScaled = img.scaled(ui->imagelabel->size(),Qt::KeepAspectRatio);
//  imgScaled = img.QImage::scaled(ui->imagelabel->width(),ui->imagelabel->height

(),Qt::KeepAspectRatio);
    ui->imagelabel->setPixmap(QPixmap::fromImage(imgScaled));
}

void Widget::on_originalButton_clicked()
{
    qimg = Widget::Mat2QImage(originalimg);
    display(qimg);
}

void Widget::on_saltButton_clicked()
{
    salt(image,3000);
    qimg = Widget::Mat2QImage(image);
    display(qimg);
}
void Widget::on_reduceButton_clicked()
{
    colorReduce0(image,64);
    qimg = Widget::Mat2QImage(image);
    display(qimg);
}
void Widget::salt(cv::Mat &image, int n)
{
    int i,j;
    for (int k=0; k<n; k++)
    {
        i= qrand()%image.cols;
        j= qrand()%image.rows;

        if (image.channels() == 1)
        { // gray-level image
            image.at<uchar>(j,i)= 255;
        }
        else if (image.channels() == 3)
        { // color image
            image.at<cv::Vec3b>(j,i)[0]= 255;
            image.at<cv::Vec3b>(j,i)[1]= 255;
            image.at<cv::Vec3b>(j,i)[2]= 255;
        }
    }
}

// using .ptr and []
void Widget::colorReduce0(cv::Mat &image, int div)
{
      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line

      for (int j=0; j<nl; j++)
      {
          uchar* data= image.ptr<uchar>(j);

          for (int i=0; i<nc; i++)
          {
            // process each pixel ---------------------
                data[i]= data[i]/div*div+div/2;

            // end of pixel processing ----------------
          } // end of line
      }
}

#ifndef WIDGET_H
#define WIDGET_H

#include <QWidget>
#include <QImage>
#include <QFileDialog>
#include <QTimer>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

using namespace cv;

namespace Ui {
class Widget;
}

class Widget : public QWidget
{
    Q_OBJECT
    
public:
    explicit Widget(QWidget *parent = 0);
    ~Widget();
private slots:
    void on_openButton_clicked();
    QImage Mat2QImage(const cv::Mat &mat);
    void display(QImage image);
    void salt(cv::Mat &image, int n);

    void on_saltButton_clicked();
    void on_reduceButton_clicked();
    void colorReduce0(cv::Mat &image, int div);
    void on_originalButton_clicked();

private:
    Ui::Widget *ui;
    cv::Mat image;
    cv::Mat originalimg; //store the original img
    QImage qimg;
    QImage imgScaled;
    cv::Mat rgb;
};

#endif // WIDGET_H

#include <iostream>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

// using .ptr and []
void colorReduce0(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);
          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
                  data[i]= data[i]/div*div + div/2;
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// using .ptr and * ++ 
void colorReduce1(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);
          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
                 *data++= *data/div*div + div/2;
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// using .ptr and * ++ and modulo
void colorReduce2(cv::Mat &image, int div=64) {

      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);

          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
                  int v= *data;
                  *data++= v - v%div + div/2;
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// using .ptr and * ++ and bitwise
void colorReduce3(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line
      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);
          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
            *data++= *data&mask + div/2;
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// direct pointer arithmetic
void colorReduce4(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line
      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      int step= image.step; // effective width
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
              
      // get the pointer to the image buffer
      uchar *data= image.data;

      for (int j=0; j<nl; j++) {
          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
            *(data+i)= *data&mask + div/2;
            // end of pixel processing ----------------
            } // end of line                   
            data+= step;  // next line
      }
}

// using .ptr and * ++ and bitwise with image.cols * image.channels()
void colorReduce5(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);
          for (int i=0; i<image.cols * image.channels(); i++) {
            // process each pixel ---------------------
            *data++= *data&mask + div/2;
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// using .ptr and * ++ and bitwise (continuous)
void colorReduce6(cv::Mat &image, int div=64) {

      int nl= image.rows; // number of lines
      int nc= image.cols * image.channels(); // total number of elements per line

      if (image.isContinuous())  {
          // then no padded pixels
          nc= nc*nl; 
          nl= 1;  // it is now a 1D array
       }

      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);

          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
            *data++= *data&mask + div/2;
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// using .ptr and * ++ and bitwise (continuous+channels)
void colorReduce7(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols ; // number of columns

      if (image.isContinuous())  {
          // then no padded pixels
          nc= nc*nl; 
          nl= 1;  // it is now a 1D array
       }

      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
              
      for (int j=0; j<nl; j++) {
          uchar* data= image.ptr<uchar>(j);

          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
                 
            *data++= *data&mask + div/2;
            *data++= *data&mask + div/2;
            *data++= *data&mask + div/2;
 
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// using Mat_ iterator 
void colorReduce8(cv::Mat &image, int div=64) {
      // get iterators
      cv::Mat_<cv::Vec3b>::iterator it= image.begin<cv::Vec3b>();
      cv::Mat_<cv::Vec3b>::iterator itend= image.end<cv::Vec3b>();

      for ( ; it!= itend; ++it) {
        // process each pixel ---------------------

        (*it)[0]= (*it)[0]/div*div + div/2;
        (*it)[1]= (*it)[1]/div*div + div/2;
        (*it)[2]= (*it)[2]/div*div + div/2;

        // end of pixel processing ----------------
      }
}

// using Mat_ iterator and bitwise
void colorReduce9(cv::Mat &image, int div=64) {

      // div must be a power of 2
      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0

      // get iterators
      cv::Mat_<cv::Vec3b>::iterator it= image.begin<cv::Vec3b>();
      cv::Mat_<cv::Vec3b>::iterator itend= image.end<cv::Vec3b>();

      // scan all pixels
      for ( ; it!= itend; ++it) {
        // process each pixel ---------------------

        (*it)[0]= (*it)[0]&mask + div/2;
        (*it)[1]= (*it)[1]&mask + div/2;
        (*it)[2]= (*it)[2]&mask + div/2;

        // end of pixel processing ----------------
      }
}

// using MatIterator_ 
void colorReduce10(cv::Mat &image, int div=64) {
      // get iterators
      cv::Mat_<cv::Vec3b> cimage= image;
      cv::Mat_<cv::Vec3b>::iterator it=cimage.begin();
      cv::Mat_<cv::Vec3b>::iterator itend=cimage.end();

      for ( ; it!= itend; it++) { 
        // process each pixel ---------------------

        (*it)[0]= (*it)[0]/div*div + div/2;
        (*it)[1]= (*it)[1]/div*div + div/2;
        (*it)[2]= (*it)[2]/div*div + div/2;

        // end of pixel processing ----------------
      }
}

void colorReduce11(cv::Mat &image, int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols; // number of columns
              
      for (int j=0; j<nl; j++) {
          for (int i=0; i<nc; i++) {
            // process each pixel ---------------------
                 
                  image.at<cv::Vec3b>(j,i)[0]=image.at<cv::Vec3b>(j,i)[0]/div*div + div/2;
                  image.at<cv::Vec3b>(j,i)[1]=image.at<cv::Vec3b>(j,i)[1]/div*div + div/2;
                  image.at<cv::Vec3b>(j,i)[2]=image.at<cv::Vec3b>(j,i)[2]/div*div + div/2;
 
            // end of pixel processing ----------------
            } // end of line                   
      }
}

// with input/ouput images
void colorReduce12(const cv::Mat &image, // input image 
                 cv::Mat &result,      // output image
                 int div=64) {
      int nl= image.rows; // number of lines
      int nc= image.cols ; // number of columns

      // allocate output image if necessary
      result.create(image.rows,image.cols,image.type());

      // created images have no padded pixels
      nc= nc*nl; 
      nl= 1;  // it is now a 1D array

      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0
              
      for (int j=0; j<nl; j++) {
          uchar* data= result.ptr<uchar>(j);
          const uchar* idata= image.ptr<uchar>(j);

          for (int i=0; i<nc; i++) { 
            // process each pixel ---------------------
                 
            *data++= (*idata++)&mask + div/2;
            *data++= (*idata++)&mask + div/2;
            *data++= (*idata++)&mask + div/2;
 
            // end of pixel processing ---------------- 
          } // end of line                   
      }
}

// using overloaded operators
void colorReduce13(cv::Mat &image, int div=64) {    
      int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));
      // mask used to round the pixel value
      uchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0

      // perform color reduction
      image=(image&cv::Scalar(mask,mask,mask))+cv::Scalar(div/2,div/2,div/2);
}

影象銳化1

sharp.h

#pragma once
#include <opencv\cv.h>
using namespace cv;
namespace ggicci
{
    void sharpen(const Mat& img, Mat& result);
}

sharp.cpp
#include "sharp.h"
void ggicci::sharpen(const Mat& img, Mat& result)
{    
    result.create(img.size(), img.type());
    //處理邊界內部的畫素點, 影象最外圍的畫素點應該額外處理
    for (int row = 1; row < img.rows-1; row++)
    {
        //前一行畫素點
        const uchar* previous = img.ptr<const uchar>(row-1);
        //待處理的當前行
        const uchar* current = img.ptr<const uchar>(row);
        //下一行
        const uchar* next = img.ptr<const uchar>(row+1);
        uchar *output = result.ptr<uchar>(row);
        int ch = img.channels();
        int starts = ch;
        int ends = (img.cols - 1) * ch;
        for (int col = starts; col < ends; col++)
        {
            //輸出影象的遍歷指標與當前行的指標同步遞增, 以每行的每一個畫素點的每一個通道值

為一個遞增量, 因為要考慮到影象的通道數
            *output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current

[col+ch] - previous[col] - next[col]);
        }
    } //end loop
    //處理邊界, 外圍畫素點設為 0
    result.row(0).setTo(Scalar::all(0));
    result.row(result.rows-1).setTo(Scalar::all(0));
    result.col(0).setTo(Scalar::all(0));
    result.col(result.cols-1).setTo(Scalar::all(0));
}

main.cpp
#include <opencv\highgui.h>
#pragma comment(lib, "opencv_core231d.lib")
#pragma comment(lib, "opencv_highgui231d.lib")
#pragma comment(lib, "opencv_imgproc231d.lib")

using namespace cv;
 
#include "sharp.h"
 
int main()
{    
    Mat lena = imread("lena.jpg");
    Mat sharpenedLena;
    ggicci::sharpen(lena, sharpenedLena);
 
    imshow("lena", lena);
    imshow("sharpened lena", sharpenedLena);
    cvWaitKey();
    return 0;
}

影象銳化2

int main()
{    
    Mat lena = imread("lena.jpg");
    Mat sharpenedLena;
    Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
    cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
 
    imshow("lena", lena);
    imshow("sharpened lena", sharpenedLena);
    cvWaitKey();
    return 0;
}

簡單的灰度影象的直方圖計算

   int main()
   {    
       Mat img = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
   
       Mat* arrays = &img;
       int narrays = 1;
       int channels[] = { 0 };
       InputArray mask = noArray();
       Mat hist;
       int dims = 1;
       int histSize[] = { 256 };    
       float hranges[] = { 0.0, 255.0 };
       const float *ranges[] = { hranges };
       //呼叫 calcHist 計算直方圖, 結果存放在 hist 中
       calcHist(arrays, narrays, channels, mask, hist, dims, histSize, ranges);
       
       //呼叫一個我自己寫的簡單的函式用於獲取一張顯示直方圖資料的圖片,
       //輸入引數為直方圖資料 hist 和期望得到的圖片的尺寸
       Mat histImg = ggicci::getHistogram1DImage(hist, Size(600, 420));
       imshow("lena gray image histogram", histImg);
       waitKey();
   }
   
   Mat ggicci::getHistogram1DImage(const Mat& hist, Size imgSize)
   {
       Mat histImg(imgSize, CV_8UC3);
       int Padding = 10;
       int W = imgSize.width - 2 * Padding;
       int H = imgSize.height - 2 * Padding;
       double _max;
       minMaxLoc(hist, NULL, &_max);
       double Per = (double)H / _max;
       const Point Orig(Padding, imgSize.height-Padding);
       int bin = W / (hist.rows + 2);
   
       //畫方柱
       for (int i = 1; i <= hist.rows; i++)
       {
           Point pBottom(Orig.x + i * bin, Orig.y);
           Point pTop(pBottom.x, pBottom.y - Per * hist.at<float>(i-1));
           line(histImg, pBottom, pTop, Scalar(255, 0, 0), bin);
       }
   
       //畫 3 條紅線標明區域
       line(histImg, Point(Orig.x + bin, Orig.y - H), Point(Orig.x + hist.rows *  bin, 

Orig.y - H), Scalar(0, 0, 255), 1);
       line(histImg, Point(Orig.x + bin, Orig.y), Point(Orig.x + bin, Orig.y - H), Scalar

(0, 0, 255), 1);
       line(histImg, Point(Orig.x + hist.rows * bin, Orig.y), Point(Orig.x + hist.rows *  

bin, Orig.y - H), Scalar(0, 0, 255), 1);
       drawArrow(histImg, Orig, Orig+Point(W, 0), 10, 30, Scalar::all(0), 2);
       drawArrow(histImg, Orig, Orig-Point(0, H), 10, 30, Scalar::all(0), 2);
       
       return histImg;
   }

影象縮放-最近鄰插值-雙線性插值

#include "stdafx.h"
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <cmath>

using namespace std;
using namespace cv;

int main(int argc ,char ** argv)
{
    IplImage *scr=0;
    IplImage *dst=0;
    double scale=4;
    CvSize dst_cvsize;
    if (argc==2&&(scr=cvLoadImage(argv[1],-1))!=0)
    {
        dst_cvsize.width=(int)(scr->width*scale);
        dst_cvsize.height=(int)(scr->height*scale);
        dst=cvCreateImage(dst_cvsize,scr->depth,scr->nChannels);

        cvResize(scr,dst,CV_INTER_NN);//
//             CV_INTER_NN - 最近鄰插值,
//             CV_INTER_LINEAR - 雙線性插值 (預設使用)
//             CV_INTER_AREA - 使用象素關係重取樣。當影象縮小時候,該方法可以避免波紋出現。
         /*當影象放大時,類似於 CV_INTER_NN 方法..*/
//             CV_INTER_CUBIC - 立方插值.

        cvNamedWindow("scr",CV_WINDOW_AUTOSIZE);
        cvNamedWindow("dst",CV_WINDOW_AUTOSIZE);
        cvShowImage("scr",scr);
        cvShowImage("dst",dst);
        cvWaitKey();
        cvReleaseImage(&scr);
        cvReleaseImage(&dst);
        cvDestroyWindow("scr");
        cvDestroyWindow("dst");
    }
    return 0;
}

圖片加“懷舊色”濾鏡儲存輸出

#include <opencv/cv.h>
#include <opencv/highgui.h>

using namespace cv;
using namespace std;

int main(int argc, char ** argv)
{
    // input args check
    if(argc < 3){
        printf("please input args.\n");
        printf("e.g. : ./test infilepath outfilepath \n");
        return 0;
    }
    
    char * input = argv[1];
    char * output = argv[2];
    
    printf("input: %s, output: %s\n", input, output);

    Mat src = imread(input, 1);

    int width=src.cols;
    int heigh=src.rows;
    RNG rng;
    Mat img(src.size(),CV_8UC3);
    for (int y=0; y<heigh; y++)
    {
        uchar* P0 = src.ptr<uchar>(y);
        uchar* P1 = img.ptr<uchar>(y);
        for (int x=0; x<width; x++)
        {
            float B=P0[3*x];
            float G=P0[3*x+1];
            float R=P0[3*x+2];
            float newB=0.272*R+0.534*G+0.131*B;
            float newG=0.349*R+0.686*G+0.168*B;
            float newR=0.393*R+0.769*G+0.189*B;
            if(newB<0)newB=0;
            if(newB>255)newB=255;
            if(newG<0)newG=0;
            if(newG>255)newG=255;
            if(newR<0)newR=0;
            if(newR>255)newR=255;
            P1[3*x] = (uchar)newB;
            P1[3*x+1] = (uchar)newG;
            P1[3*x+2] = (uchar)newR;
        }
    }
    //imshow("out",img);
    waitKey();
    imwrite(output,img);
}

浮雕和雕刻效果

#include <cv.h>  
#include <highgui.h>  

#pragma comment( lib, "cv.lib" )  
#pragma comment( lib, "cxcore.lib" )  
#pragma comment( lib, "highgui.lib" )  

int main()  
{  
    IplImage *org=cvLoadImage("1.jpg",1);  
    IplImage *image=cvCloneImage(org);  
    int width=image->width;  
    int height=image->height;  
    int step=image->widthStep;  
    int channel=image->nChannels;  
    uchar* data=(uchar *)image->imageData;  
    for(int i=0;i<width-1;i++)  
    {  
        for(int j=0;j<height-1;j++)  
        {  
            for(int k=0;k<channel;k++)  
            {  
                int temp = data[(j+1)*step+(i+1)*channel+k]-data[j*step+i*channel+k]+128;//

浮雕  
                //int temp = data[j*step+i*channel+k]-data[(j+1)*step+(i+1)*channel

+k]+128;//雕刻  
                if(temp>255)  
                {  
                    data[j*step+i*channel+k]=255;  
                }  
                else if(temp<0)  
                {  
                    data[j*step+i*channel+k]=0;  
                }  
                else  
                {  
                    data[j*step+i*channel+k]=temp;  
                }  
            }  
        }  
    }  
    cvNamedWindow("original",1);  
    cvShowImage("original",org);  
    cvNamedWindow("image",1);  
    cvShowImage("image",image);  
    cvWaitKey(0);   
    cvDestroyAllWindows();  
    cvReleaseImage(&image);  
    cvReleaseImage(&org);  
    return 0;  
}

影象褶皺效果

#include <cv.h>  
#include <highgui.h>  

#pragma comment( lib, "cv.lib" )  
#pragma comment( lib, "cxcore.lib" )  
#pragma comment( lib, "highgui.lib" )  

int main()  
{  
    IplImage *org=cvLoadImage("lena.jpg",1);  
    IplImage *image=cvCloneImage(org);  
    int width=image->width;  
    int height=image->height;  
    int step=image->widthStep;  
    int channel=image->nChannels;  
    uchar* data=(uchar *)image->imageData;  
    int sign=-1;  
    for(int i=0;i<height;i++)  
    {     
        int cycle=10;  
        int margin=(i%cycle);  
        if((i/cycle)%2==0)  
        {  
            sign=-1;  
        }  
        else  
        {  
            sign=1;  
        }  
        if(sign==-1)  
        {     
            margin=cycle-margin;  
            for(int j=0;j<width-margin;j++)  
            {             
                for(int k=0;k<channel;k++)  
                {  
                    data[i*step+j*channel+k]=data[i*step+(j+margin)*channel+k];  
                }  
            }  
        }  
        else if(sign==1)  
        {         
            for(int j=0;j<width-margin;j++)  
            {  
                for(int k=0;k<channel;k++)  
                {  
                    data[i*step+j*channel+k]=data[i*step+(j+margin)*channel+k];  
                }  
            }  
        }     
    }  
    cvNamedWindow("original",1);  
    cvShowImage("original",org);  
    cvNamedWindow("image",1);  
    cvShowImage("image",image);  
    cvSaveImage("image.jpg",image);  
    cvWaitKey(0);   
    cvDestroyAllWindows();  
    cvReleaseImage(&image);  
    cvReleaseImage(&org);  
    return 0;  
}

Grabcut演算法

#include "stdafx.h"  
  
#include "opencv2/highgui/highgui.hpp"  
#include "opencv2/imgproc/imgproc.hpp"  
  
#include <iostream>  
  
#include "ComputeTime.h"  
#include "windows.h"  
  
using namespace std;  
using namespace cv;  
  
static void help()  
{  
    cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a 

region\n"  
        "and then grabcut will attempt to segment it out.\n"  
        "Call:\n"  
        "./grabcut <image_name>\n"  
        "\nSelect a rectangular area around the object you want to segment\n" <<  
        "\nHot keys: \n"  
        "\tESC - quit the program\n"  
        "\tr - restore the original image\n"  
        "\tn - next iteration\n"  
        "\n"  
        "\tleft mouse button - set rectangle\n"  
        "\n"  
        "\tCTRL+left mouse button - set GC_BGD pixels\n"  
        "\tSHIFT+left mouse button - set CG_FGD pixels\n"  
        "\n"  
        "\tCTRL+right mouse button - set GC_PR_BGD pixels\n"  
        "\tSHIFT+right mouse button - set CG_PR_FGD pixels\n" << endl;  
}  
  
const Scalar RED = Scalar(0,0,255);  
const Scalar PINK = Scalar(230,130,255);  
const Scalar BLUE = Scalar(255,0,0);  
const Scalar LIGHTBLUE = Scalar(255,255,160);  
const Scalar GREEN = Scalar(0,255,0);  
  
const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;  //Ctrl鍵  
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; //Shift鍵  
  
static void getBinMask( const Mat& comMask, Mat& binMask )  
{  
    if( comMask.empty() || comMask.type()!=CV_8UC1 )  
        CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );  
    if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )  
        binMask.create( comMask.size(), CV_8UC1 );  
    binMask = comMask & 1;  //得到mask的最低位,實際上是隻保留確定的或者有可能的前景點當做

mask  
}  
  
class GCApplication  
{  
public:  
    enum{ NOT_SET = 0, IN_PROCESS = 1, SET = 2 };  
    static const int radius = 2;  
    static const int thickness = -1;  
  
    void reset();  
    void setImageAndWinName( const Mat& _image, const string& _winName );  
    void showImage() const;  
    void mouseClick( int event, int x, int y, int flags, void* param );  
    int nextIter();  
    int getIterCount() const { return iterCount; }  
private:  
    void setRectInMask();  
    void setLblsInMask( int flags, Point p, bool isPr );  
  
    const string* winName;  
    const Mat* image;  
    Mat mask;  
    Mat bgdModel, fgdModel;  
  
    uchar rectState, lblsState, prLblsState;  
    bool isInitialized;  
  
    Rect rect;  
    vector<Point> fgdPxls, bgdPxls, prFgdPxls, prBgdPxls;  
    int iterCount;  
};  
  
/*給類的變數賦值*/  
void GCApplication::reset()  
{  
    if( !mask.empty() )  
        mask.setTo(Scalar::all(GC_BGD));  
    bgdPxls.clear(); fgdPxls.clear();  
    prBgdPxls.clear();  prFgdPxls.clear();  
  
    isInitialized = false;  
    rectState = NOT_SET;    //NOT_SET == 0  
    lblsState = NOT_SET;  
    prLblsState = NOT_SET;  
    iterCount = 0;  
}  
  
/*給類的成員變數賦值而已*/  
void GCApplication::setImageAndWinName( const Mat& _image, const string& _winName  )  
{  
    if( _image.empty() || _winName.empty() )  
        return;  
    image = &_image;  
    winName = &_winName;  
    mask.create( image->size(), CV_8UC1);  
    reset();  
}  
  
/*顯示4個點,一個矩形和影象內容,因為後面的步驟很多地方都要用到這個函式,所以單獨拿出來*/  
void GCApplication::showImage() const  
{  
    if( image->empty() || winName->empty() )  
        return;  
  
    Mat res;  
    Mat binMask;  
    if( !isInitialized )  
        image->copyTo( res );  
    else  
    {  
        getBinMask( mask, binMask );  
        image->copyTo( res, binMask );  //按照最低位是0還是1來複制,只保留跟前景有關的影象

,比如說可能的前景,可能的背景  
    }  
  
    vector<Point>::const_iterator it;  
    /*下面4句程式碼是將選中的4個點用不同的顏色顯示出來*/  
    for( it = bgdPxls.begin(); it != bgdPxls.end(); ++it )  //迭代器可以看成是一個指標  
        circle( res, *it, radius, BLUE, thickness );  
    for( it = fgdPxls.begin(); it != fgdPxls.end(); ++it )  //確定的前景用紅色表示  
        circle( res, *it, radius, RED, thickness );  
    for( it = prBgdPxls.begin(); it != prBgdPxls.end(); ++it )  
        circle( res, *it, radius, LIGHTBLUE, thickness );  
    for( it = prFgdPxls.begin(); it != prFgdPxls.end(); ++it )  
        circle( res, *it, radius, PINK, thickness );  
  
    /*畫矩形*/  
    if( rectState == IN_PROCESS || rectState == SET )  
        rectangle( res, Point( rect.x, rect.y ), Point(rect.x + rect.width, rect.y + 

rect.height ), GREEN, 2);  
  
    imshow( *winName, res );  
}  
  
/*該步驟完成後,mask影象中rect內部是3,外面全是0*/  
void GCApplication::setRectInMask()  
{  
    assert( !mask.empty() );  
    mask.setTo( GC_BGD );   //GC_BGD == 0  
    rect.x = max(0, rect.x);  
    rect.y = max(0, rect.y);  
    rect.width = min(rect.width, image->cols-rect.x);  
    rect.height = min(rect.height, image->rows-rect.y);  
    (mask(rect)).setTo( Scalar(GC_PR_FGD) );    //GC_PR_FGD == 3,矩形內部,為可能的前景點  
}  
  
void GCApplication::setLblsInMask( int flags, Point p, bool isPr )  
{  
    vector<Point> *bpxls, *fpxls;  
    uchar bvalue, fvalue;  
    if( !isPr ) //確定的點  
    {  
        bpxls = &bgdPxls;  
        fpxls = &fgdPxls;  
        bvalue = GC_BGD;    //0  
        fvalue = GC_FGD;    //1  
    }  
    else    //概率點  
    {  
        bpxls = &prBgdPxls;  
        fpxls = &prFgdPxls;  
        bvalue = GC_PR_BGD; //2  
        fvalue = GC_PR_FGD; //3  
    }  
    if( flags & BGD_KEY )  
    {  
        bpxls->push_back(p);  
        circle( mask, p, radius, bvalue, thickness );   //該點處為2  
    }  
    if( flags & FGD_KEY )  
    {  
        fpxls->push_back(p);  
        circle( mask, p, radius, fvalue, thickness );   //該點處為3  
    }  
}  
  
/*滑鼠響應函式,引數flags為CV_EVENT_FLAG的組合*/  
void GCApplication::mouseClick( int event, int x, int y, int flags, void* )  
{  
    // TODO add bad args check  
    switch( event )  
    {  
    case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels  
        {  
            bool isb = (flags & BGD_KEY) != 0,  
                isf = (flags & FGD_KEY) != 0;  
            if( rectState == NOT_SET && !isb && !isf )//只有左鍵按下時  
            {  
                rectState = IN_PROCESS; //表示正在畫矩形  
                rect = Rect( x, y, 1, 1 );  
            }  
            if ( (isb || isf) && rectState == SET ) //按下了alt鍵或者shift鍵,且畫好了矩形

,表示正在畫前景背景點  
                lblsState = IN_PROCESS;  
        }  
        break;  
    case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels  
        {  
            bool isb = (flags & BGD_KEY) != 0,  
                isf = (flags & FGD_KEY) != 0;  
            if ( (isb || isf) && rectState == SET ) //正在畫可能的前景背景點  
                prLblsState = IN_PROCESS;  
        }  
        break;  
    case CV_EVENT_LBUTTONUP:  
        if( rectState == IN_PROCESS )  
        {  
            rect = Rect( Point(rect.x, rect.y), Point(x,y) );   //矩形結束  
            rectState = SET;  
            setRectInMask();  
            assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && 

prFgdPxls.empty() );  
            showImage();  
        }  
        if( lblsState == IN_PROCESS )   //已畫了前後景點  
        {  
            setLblsInMask(flags, Point(x,y), false);    //畫出前景點  
            lblsState = SET;  
            showImage();  
        }  
        break;  
    case CV_EVENT_RBUTTONUP:  
        if( prLblsState == IN_PROCESS )  
        {  
            setLblsInMask(flags, Point(x,y), true); //畫出背景點  
            prLblsState = SET;  
            showImage();  
        }  
        break;  
    case CV_EVENT_MOUSEMOVE:  
        if( rectState == IN_PROCESS )  
        {  
            rect = Rect( Point(rect.x, rect.y), Point(x,y) );  
            assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && 

prFgdPxls.empty() );  
            showImage();    //不斷的顯示圖片  
        }  
        else if( lblsState == IN_PROCESS )  
        {  
            setLblsInMask(flags, Point(x,y), false);  
            showImage();  
        }  
        else if( prLblsState == IN_PROCESS )  
        {  
            setLblsInMask(flags, Point(x,y), true);  
            showImage();  
        }  
        break;  
    }  
}  
  
/*該函式進行grabcut演算法,並且返回演算法執行迭代的次數*/  
int GCApplication::nextIter()  
{  
    if( isInitialized )  
        //使用grab演算法進行一次迭代,引數2為mask,裡面存的mask位是:矩形內部除掉那些可能是背

景或者已經確定是背景後的所有的點,且mask同時也為輸出  
        //儲存的是分割後的前景影象  
        grabCut( *image, mask, rect, bgdModel, fgdModel, 1 );  
    else  
    {  
        if( rectState != SET )  
            return iterCount;  
  
        if( lblsState == SET || prLblsState == SET )  
            grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_MASK );  
        else  
            grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT );  
  
        isInitialized = true;  
    }  
    iterCount++;  
  
    bgdPxls.clear(); fgdPxls.clear();  
    prBgdPxls.clear(); prFgdPxls.clear();  
  
    return iterCount;  
}  
  
GCApplication gcapp;  
  
static void on_mouse( int event, int x, int y, int flags, void* param )  
{  
    gcapp.mouseClick( event, x, y, flags, param );  
}  
  
int main( int argc, char** argv )  
{  
    string filename;  
    cout<<" Grabcuts ! \n";  
    cout<<"input image name:  "<<endl;  
    cin>>filename;  
  
      
    Mat image = imread( filename, 1 );  
    if( image.empty() )  
    {  
        cout << "\n Durn, couldn't read image filename " << filename << endl;  
        return 1;  
    }  
  
    help();  
  
    const string winName = "image";  
    cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );  
    cvSetMouseCallback( winName.c_str(), on_mouse, 0 );  
  
    gcapp.setImageAndWinName( image, winName );  
    gcapp.showImage();  
  
    for(;;)  
    {  
        int c = cvWaitKey(0);  
        switch( (char) c )  
        {  
        case '\x1b':  
            cout << "Exiting ..." << endl;  
            goto exit_main;  
        case 'r':  
            cout << endl;  
            gcapp.reset();  
            gcapp.showImage();  
            break;  
        case 'n':  
            ComputeTime ct ;  
            ct.Begin();  
              
            int iterCount = gcapp.getIterCount();  
            cout << "<" << iterCount << "... ";  
            int newIterCount = gcapp.nextIter();  
            if( newIterCount > iterCount )  
            {  
                gcapp.showImage();  
                cout << iterCount << ">" << endl;  
                cout<<"執行時間:  "<<ct.End()<<endl;  
            }  
            else  
                cout << "rect must be determined>" << endl;  
            break;  
        }  
    }  
  
exit_main:  
    cvDestroyWindow( winName.c_str() );  
    return 0;  
}

lazy snapping

lszySnapping.cpp

LazySnapping.cpp
 
#include "stdafx.h"  
#include <cv.h>  
#include <highgui.h>  
#include "graph.h"  
#include <vector>  
#include <iostream>  
#include <cmath>  
#include <string>  
  
using namespace std;  
  
typedef Graph<float,float,float> GraphType;  
  
class LasySnapping  
{  
      
public :  
    LasySnapping();  
  
    ~LasySnapping()  
    {   
        if(graph)  
        {  
            delete graph;  
        }  
    };  
private :  
    vector<CvPoint> forePts;  
    vector<CvPoint> backPts;  
    IplImage* image;  
    // average color of foreground points  
    unsigned char avgForeColor[3];  
    // average color of background points  
    unsigned char avgBackColor[3];  
public :  
    void setImage(IplImage* image)  
    {  
        this->image = image;  
        graph = new GraphType(image->width*image->height,image->width*image->height*2);  
    }  
    // include-pen locus  
    void setForegroundPoints(vector<CvPoint> pts)  
    {  
        forePts.clear();  
        for(int i =0; i< pts.size(); i++)  
        {  
            if(!isPtInVector(pts[i],forePts))  
            {  
                forePts.push_back(pts[i]);  
            }  
        }  
        if(forePts.size() == 0)  
        {  
            return;  
        }  
        int sum[3] = {0};  
        for(int i =0; i < forePts.size(); i++)  
        {  
            unsigned char* p = (unsigned char*)image->imageData + forePts[i].x * 3   
                + forePts[i].y*image->widthStep;  
            sum[0] += p[0];  
            sum[1] += p[1];  
            sum[2] += p[2];              
        }  
        cout<<sum[0]<<" " <<forePts.size()<<endl;  
        avgForeColor[0] = sum[0]/forePts.size();  
        avgForeColor[1] = sum[1]/forePts.size();  
        avgForeColor[2] = sum[2]/forePts.size();  
    }  
    // exclude-pen locus  
    void setBackgroundPoints(vector<CvPoint> pts)  
    {  
        backPts.clear();  
        for(int i =0; i< pts.size(); i++)  
        {  
            if(!isPtInVector(pts[i],backPts))  
            {  
                backPts.push_back(pts[i]);  
            }  
        }  
        if(backPts.size() == 0)  
        {  
            return;  
        }  
        int sum[3] = {0};  
        for(int i =0; i < backPts.size(); i++)  
        {  
            unsigned char* p = (unsigned char*)image->imageData + backPts[i].x * 3 +   
                backPts[i].y*image->widthStep;  
            sum[0] += p[0];  
            sum[1] += p[1];  
            sum[2] += p[2];              
        }  
        avgBackColor[0] = sum[0]/backPts.size();  
        avgBackColor[1] = sum[1]/backPts.size();  
        avgBackColor[2] = sum[2]/backPts.size();  
    }  
  
    // return maxflow of graph  
    int runMaxflow();  
    // get result, a grayscale mast image indicating forground by 255 and background by 0  
    IplImage* getImageMask();  
  
private :  
  
    float colorDistance(unsigned char* color1, unsigned char* color2);  
    float minDistance(unsigned char* color, vector<CvPoint> points);  
    bool isPtInVector(CvPoint pt, vector<CvPoint> points);  
    void getE1(unsigned char* color,float* energy);  
    float getE2(unsigned char* color1,unsigned char* color2);  
      
    GraphType *graph;      
};  
  
LasySnapping::LasySnapping()  
{  
    graph = NULL;  
    avgForeColor[0] = 0;  
    avgForeColor[1] = 0;  
    avgForeColor[2] = 0;  
  
    avgBackColor[0] = 0;  
    avgBackColor[1] = 0;  
    avgBackColor[2] = 0;  
}  
 
float LasySnapping::colorDistance(unsigned char* color1, unsigned char* color2)  
{  
      
    return sqrt(((float)color1[0]-(float)color2[0])*((float)color1[0]-(float)color2[0])+  
        ((float)color1[1]-(float)color2[1])*((float)color1[1]-(float)color2[1])+  
        ((float)color1[2]-(float)color2[2])*((float)color1[2]-(float)color2[2]));      
}  
  
float LasySnapping::minDistance(unsigned char* color, vector<CvPoint> points)  
{  
    float distance = -1;  
    for(int i =0 ; i < points.size(); i++)  
    {  
        unsigned char* p = (unsigned char*)image->imageData + points[i].y * image-

>widthStep +   
            points[i].x * image->nChannels;  
        float d = colorDistance(p,color);  
        if(distance < 0 )  
        {  
            distance = d;  
        }  
        else  
        {  
            if(distance > d)  
            {  
                distance = d;  
            }  
        }  
    }  
  
    return distance;  
}  
  
bool LasySnapping::isPtInVector(CvPoint pt, vector<CvPoint> points)  
{  
    for(int i =0 ; i < points.size(); i++)  
    {  
        if(pt.x == points[i].x && pt.y == points[i].y)  
        {  
            return true;  
        }  
    }  
    return false;  
}  
void LasySnapping::getE1(unsigned char* color,float* energy)  
{  
    // average distance  
    float df = colorDistance(color,avgForeColor);  
    float db = colorDistance(color,avgBackColor);  
    // min distance from background points and forground points  
    // float df = minDistance(color,forePts);  
    // float db = minDistance(color,backPts);  
    energy[0] = df/(db+df);  
    energy[1] = db/(db+df);  
}  
  
float LasySnapping::getE2(unsigned char* color1,unsigned char* color2)  
{  
    const float EPSILON = 0.01;  
    float lambda = 100;  
    return lambda/(EPSILON+  
        (color1[0]-color2[0])*(color1[0]-color2[0])+  
        (color1[1]-color2[1])*(color1[1]-color2[1])+  
        (color1[2]-color2[2])*(color1[2]-color2[2]));  
}  
  
int LasySnapping::runMaxflow()  
{     
    const float INFINNITE_MAX = 1e10;  
    int indexPt = 0;  
    for(int h = 0; h < image->height; h ++)  
    {  
        unsigned char* p = (unsigned char*)image->imageData + h *image->widthStep;  
        for(int w = 0; w < image->width; w ++)  
        {  
            // calculate energe E1  
            float e1[2]={0};  
            if(isPtInVector(cvPoint(w,h),forePts))  
            {  
                e1[0] =0;  
                e1[1] = INFINNITE_MAX;  
            }  
            else if  
                (isPtInVector(cvPoint(w,h),backPts))  
            {  
                e1[0] = INFINNITE_MAX;  
                e1[1] = 0;  
            }  
            else   
            {  
                getE1(p,e1);  
            }  
  
            // add node  
            graph->add_node();  
            graph->add_tweights(indexPt, e1[0],e1[1]);  
  
            // add edge, 4-connect  
            if(h > 0 && w > 0)  
            {  
                float e2 = getE2(p,p-3);  
                graph->add_edge(indexPt,indexPt-1,e2,e2);  
                e2 = getE2(p,p-image->widthStep);  
                graph->add_edge(indexPt,indexPt-image->width,e2,e2);  
            }  
              
            p+= 3;  
            indexPt ++;              
        }  
    }  
      
    return graph->maxflow();  
}  
  
IplImage* LasySnapping::getImageMask()  
{  
    IplImage* gray = cvCreateImage(cvGetSize(image),8,1);   
    int indexPt =0;  
    for(int h =0; h < image->height; h++)  
    {  
        unsigned char* p = (unsigned char*)gray->imageData + h*gray->widthStep;  
        for(int w =0 ;w <image->width; w++)  
        {  
            if (graph->what_segment(indexPt) == GraphType::SOURCE)  
            {  
                *p = 0;  
            }  
            else  
            {  
                *p = 255;  
            }  
  
            p++;  
            indexPt ++;  
        }  
    }  
    return gray;  
}  
  
// global  
vector<CvPoint> forePts;  
vector<CvPoint> backPts;  
int currentMode = 0;// indicate foreground or background, foreground as default  
CvScalar paintColor[2] = {CV_RGB(0,0,255),CV_RGB(255,0,0)};  
  
IplImage* image = NULL;  
char* winName = "lazySnapping";  
IplImage* imageDraw = NULL;  
const int SCALE = 4;  
  
void on_mouse( int event, int x, int y, int flags, void* )  
{      
    if( event == CV_EVENT_LBUTTONUP )  
    {  
        if(backPts.size() == 0 && forePts.size() == 0)  
        {  
            return;  
        }  
        LasySnapping ls;  
        IplImage* imageLS = cvCreateImage(cvSize(image->width/SCALE,image->height/SCALE),  
            8,3);  
        cvResize(image,imageLS);  
        ls.setImage(imageLS);  
        ls.setBackgroundPoints(backPts);  
        ls.setForegroundPoints(forePts);  
        ls.runMaxflow();  
        IplImage* mask = ls.getImageMask();  
        IplImage* gray = cvCreateImage(cvGetSize(image),8,1);  
        cvResize(mask,gray);  
        // edge  
        cvCanny(gray,gray,50,150,3);  
          
        IplImage* showImg = cvCloneImage(imageDraw);  
        for(int h =0; h < image->height; h ++)  
        {  
            unsigned char* pgray = (unsigned char*)gray->imageData + gray->widthStep*h;  
            unsigned char* pimage = (unsigned char*)showImg->imageData + showImg-

>widthStep*h;  
            for(int width  =0; width < image->width; width++)  
            {  
                if(*pgray++ != 0 )  
                {  
                    pimage[0] = 0;  
                    pimage[1] = 255;  
                    pimage[2] = 0;  
                }  
                pimage+=3;                  
            }  
        }  
        cvSaveImage("t.bmp",showImg);  
        cvShowImage(winName,showImg);  
        cvReleaseImage(&imageLS);  
        cvReleaseImage(&mask);  
        cvReleaseImage(&showImg);  
        cvReleaseImage(&gray);  
    }  
    else if( event == CV_EVENT_LBUTTONDOWN )  
    {  
  
    }  
    else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON))  
    {  
        CvPoint pt = cvPoint(x,y);  
        if(currentMode == 0)  
        {//foreground  
            forePts.push_back(cvPoint(x/SCALE,y/SCALE));  
        }  
        else  
        {//background  
            backPts.push_back(cvPoint(x/SCALE,y/SCALE));  
        }  
        cvCircle(imageDraw,pt,2,paintColor[currentMode]);  
        cvShowImage(winName,imageDraw);  
    }  
}  
int main(int argc, char** argv)  
{     
    //if(argc != 2)  
    //{  
     //   cout<<"command : lazysnapping inputImage"<<endl;  
     //   return 0;  
   // }  
  
    string image_name;  
    cout<<"input image name: "<<endl;  
    cin>>image_name;  
  
    cvNamedWindow(winName,1);  
    cvSetMouseCallback( winName, on_mouse, 0);  
      
    image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);  
    imageDraw = cvCloneImage(image);  
    cvShowImage(winName, image);  
    for(;;)  
    {  
        int c = cvWaitKey(0);  
        c = (char)c;  
        if(c == 27)  
        {//exit  
            break;  
        }  
        else if(c == 'r')  
        {//reset  
            image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);  
            imageDraw = cvCloneImage(image);  
            forePts.clear();  
            backPts.clear();  
            currentMode = 0;  
            cvShowImage(winName, image);  
        }  
        else if(c == 'b')  
        {//change to background selection  
            currentMode = 1;  
        }else if(c == 'f')  
        {//change to foreground selection  
            currentMode = 0;  
        }  
    }  
    cvReleaseImage(&image);  
    cvReleaseImage(&imageDraw);  
    return 0;  
}

由漢字生成圖片

AddChinese.cpp
#include "stdafx.h"    
  
#include <opencv2/core/core.hpp>    
#include <opencv2/highgui/highgui.hpp>  
#include "CvxText.h"  
  
#pragma comment(lib,"freetype255d.lib")  
#pragma comment(lib,"opencv_core2410d.lib")                  
#pragma comment(lib,"opencv_highgui2410d.lib")                  
#pragma comment(lib,"opencv_imgproc2410d.lib")     
  
using namespace std;  
using namespace cv;  
  
#define ROW_BLOCK 2  
#define COLUMN_Block 2  
  
writePng.cpp : 定義控制檯應用程式的入口點。  
int run_test_png(Mat &mat,string image_name)  
{  
    /*採用自己設定的引數來儲存圖片*/  
    //Mat mat(480, 640, CV_8UC4);  
    //createAlphaMat(mat);  
    vector<int> compression_params;  
    compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);  
    compression_params.push_back(9);    //png格式下,預設的引數為3.  
    try   
    {  
        imwrite(image_name, mat, compression_params);  
    }  
    catch (runtime_error& ex)   
    {  
        fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what());  
        return 1;  
    }  
    fprintf(stdout, "Saved PNG file with alpha data.\n");  
  
    waitKey(0);  
    return 0;  
}  
  
int coloured(Mat &template_src, Mat &mat_png, CvScalar color)  
{  
  
    for (int i = 0; i < template_src.rows; ++i)   
    {  
        for (int j = 0; j < template_src.cols; ++j)   
        {  
            Vec4b& bgra = mat_png.at<Vec4b>(i, j);  
            //int temp = template_src.at<uchar>(i,j);  
            if (template_src.at<uchar>(i,j)== 0)  
            {  
                bgra[0] = color.val[0];    //b通道  
                bgra[1] = color.val[1];     //g通道  
                bgra[2] = color.val[2];     //r通道  
                bgra[3] = 255;//alpha通道全部設定為透明完全透明為0,否則為255  
            }  
            else  
            {  
                bgra[3] = 0;//alpha通道全部設定為透明完全透明為0,否則為255  
            }  
              
              
              
        }  
    }  
  
    return 0;  
}  
  
void ImageBinarization(IplImage *src)  
{   /*對灰度影象二值化,自適應門限threshold*/  
    int i,j,width,height,step,chanel,threshold;  
    /*size是影象尺寸,svg是灰度直方圖均值,va是方差*/  
    float size,avg,va,maxVa,p,a,s;  
    unsigned char *dataSrc;  
    float histogram[256];  
  
    width = src->width;  
    height = src->height;  
    dataSrc = (unsigned char *)src->imageData;  
    step = src->widthStep/sizeof(char);  
    chanel = src->nChannels;  
    /*計算直方圖並歸一化histogram*/  
    for(i=0; i<256; i++)  
        histogram[i] = 0;  
    for(i=0; i<height; i++)  
        for(j=0; j<width*chanel; j++)  
        {  
            histogram[dataSrc[i*step+j]-'0'+48]++;  
        }  
        size = width * height;  
        for(i=0; i<256; i++)  
            histogram[i] /=size;  
        /*計算灰度直方圖中值和方差*/  
        avg = 0;  
        for(i=0; i<256; i++)  
            avg += i*histogram[i];  
        va = 0;  
        for(i=0; i<256; i++)  
            va += fabs(i*i*histogram[i]-avg*avg);  
        /*利用加權最大方差求門限*/  
        threshold = 20;  
        maxVa = 0;  
        p = a = s = 0;  
        for(i=0; i<256; i++)  
        {  
            p += histogram[i];  
            a += i*histogram[i];  
            s = (avg*p-a)*(avg*p-a)/p/(1-p);  
            if(s > maxVa)  
            {  
                threshold = i;  
                maxVa = s;  
            }  
        }  
        /*二值化*/  
        for(i=0; i<height; i++)  
            for(j=0; j<width*chanel; j++)  
            {  
                if(dataSrc[i*step+j] > threshold)  
                    dataSrc[i*step+j] = 255;  
                else  
                    dataSrc[i*step+j] = 0;  
            }  
}  
  
Mat binaryzation(Mat &src)  
{  
    Mat des_gray(src.size(),CV_8UC1);  
  
    cvtColor(src,des_gray,CV_BGR2GRAY);  
      
    //Mat bin_mat();  
    IplImage temp(des_gray);  
    ImageBinarization(&temp);  
  
  
    //threshold(des_gray,des_gray,150,255,THRESH_BINARY);  
    imshow("二值影象",des_gray);  
    return des_gray;  
}  
  
int generate_chinese(const int size_zi, const char *msg ,int number,CvScalar color)  
{  
    //int size_zi = 50;//字型大小  
    CvSize czSize;  //目標影象尺寸  
    float p = 0.5;  
    CvScalar fsize;  
  
  
    //讀取TTF字型檔案  
    CvxText text("simhei.ttf");       
  
    //設定字型屬性 字型大小/空白比例/間隔比例/旋轉角度  
    fsize = cvScalar(size_zi, 1, 0.1, 0);  
    text.setFont(NULL, &fsize, NULL, &p);        
  
    czSize.width = size_zi*number;  
    czSize.height = size_zi;  
    //載入原影象  
    IplImage* ImageSrc = cvCreateImage(czSize,IPL_DEPTH_8U,3);//cvLoadImage(Imagename, 

CV_LOAD_IMAGE_UNCHANGED);  
    //Mat image(ImageSrc);  
    //createAlphaMat(image);  
    //ImageSrc = ℑ  
  
    //IplImage temp(image);   
    //ImageSrc = &temp;  
  
    //設定原影象文字  
    text.putText(ImageSrc, msg, cvPoint(1, size_zi), color);   
  
    //顯示原影象  
    cvShowImage("原圖", ImageSrc);  
  
  
    string hanzi = msg;  
    hanzi = hanzi + ".png";  
  
    Mat chinese(ImageSrc,true);  
    Mat gray = binaryzation(chinese);  
  
    imwrite("chinese_gray.jpg",gray);  
  
    Mat mat_png(chinese.size(),CV_8UC4);  
    coloured(gray,mat_png,color);  
    run_test_png(mat_png,hanzi);  
    //  
    ////cvSaveImage("hanzi.jpg",reDstImage);  
    //run_test_png(chinese,hanzi);  
    //等待按鍵事件  
    cvWaitKey();  
    return 0;  
}  
  
int main()  
{  
    CvScalar color = CV_RGB(0,0,0);  
    int size = 200;  
    const char* msg = "你好a";//暫時一行字不要太長  
  
    int number = 3;//字元個數  
  
    generate_chinese(size,msg,number,color);  
      
  
    return 0;  
}