1. 程式人生 > >掃描影象二維碼摳圖(傾斜校正 去黑邊)

掃描影象二維碼摳圖(傾斜校正 去黑邊)

標籤:des   演算法   class   style   log   com   http   it   si   

由於要識別掃描器得到的圖片,直接將得到的圖片進行識別,並不能得到識別結果,筆者使用的是zbar類,只能正常識別只含有二維碼影象的圖片。於是將二維碼從圖中扣出來就成了工作中的一個需求。
(網上有一些收費的控制元件具有影象預處理的功能,可以進行很好的識別)

技術分享
簡單的使用現有的影象邊緣檢測和連通域演算法,並不能得到很好的效果:

例如canny邊沿檢測處理結果:

技術分享

不過觀察不難發現二維碼的特點,接近於正方形的一個小方塊,於是設想是否能夠通過簡單的畫框得到該二維碼的區域。

另外由於掃描是可能會產生傾斜,會導致二維碼區域變成非矩形的區域(其實二維碼的識別是360度的,也許並不需要進行傾斜校正)。

筆者在網上找到傾斜校正的演算法:

參見大神部落格:http://johnhany.net/2013/11/dft-based-text-rotation-correction/

人為增加一點傾斜,如圖:

技術分享

使用大神的演算法後得到:

技術分享

然後在通過邊緣檢測演算法得到:

技術分享

為了方便畫框,首先要去掉周圍的白線。大致思路是獲取最大的黑色連通區域,為了做到這一點,筆者使用橫豎畫框,就是如果一個50*50的區域內沒有白點,則進行畫框,初步的結果如下:

技術分享

然後將相鄰的白框進行和並,之後取最大的連通域框,並將周圍的區域都變成黑色:

技術分享

之後將所有的白框置黑:

技術分享

至此便得到了一個比較乾淨的圖片。下一步就是進行畫框,筆者使用的思路是,從左往右進行探測如果10個畫素點內出現了白點,則往右移動。出現一個白點,則往右往下延展10個畫素點。從上往下 從左往右 依次探測,如此會生成很多個小白框:

技術分享

同樣利用合併的思路,需要進行框的合併,左右合併,上下合併,需要遍歷進行兩次得到最後的矩形框:

技術分享

看到二維碼周圍的那個白框,就看到了預期的結果。在此需要將它和其他的非二維碼進行區分,可以設定條件,例如寬高比和寬高畫素等。最後將二維碼的白框向左上擴大10個畫素點,根據這個框的座標取原影象取對應的影象:

技術分享

大功告成

當然其中的演算法也存在很多漏洞,例如已經發現的有,如果最大的連通區域不是中間的,那麼就需要進行取捨。如果圖中含有其他一些黑邊什麼的會影響最後的畫框也不行。針對不同的圖形,去白邊和畫框的引數需要重新設定。

這只是作為影象處理菜鳥的簡單演算法。對於opencv和cximage都不是很熟悉,也不知道該用什麼函式,所以程式碼很臃腫,但是就影象的本質來說,只不過是二維的矩陣,熟練的掌握了二維陣列,那麼處理資料也並不是那麼困難。

以下是原始碼,希望大神多多指點:

</pre><pre name="code" class="cpp"><span style="font-size:18px;">/*
 *  原傾斜校正作者!
 *	Author: John Hany
 *	Website: http://johnhany.net
 *	Source code updates: https://github/johnhany/textRotCorrect
 *	If you have any advice, you could contact me at: [email protected]
 *	Need OpenCV environment!
 *
 */

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <algorithm>
#include <stdio.h>
#include "opencv/cv.h"
#include "opencv/cxcore.h"
#include "opencv2/highgui/highgui_c.h"
#include "direct.h"

#define  BOX_WIDTH  50
#define  BLACK  0
#define  WHITE 255

#define  RATE  0.2
// #pragma comment(lib, "ml.lib")
// #pragma comment(lib, "cv.lib")
// #pragma comment(lib, "cvaux.lib")
// #pragma comment(lib, "cvcam.lib")
// #pragma comment(lib, "cxcore.lib")
// #pragma comment(lib, "cxts.lib")
// #pragma comment(lib, "highgui.lib")
// #pragma comment(lib, "cvhaartraining.lib
using namespace cv;
using namespace std;

#define GRAY_THRESH 150
#define HOUGH_VOTE 100

//#define DEGREE 27
//影象的輪廓檢測下
//By MoreWindows (http://blog.csdn.net/MoreWindows)

char szCurrentPath[MAX_PATH]; 

string getFilePath( const char * szBuf)
{
	string str;
	str = szCurrentPath;
	str += "\\";
	str += szBuf;
	//刪除已經存在的檔案
	DeleteFile(str.c_str());
	return str;
}

string strOrigin;
string strSave1;
string strSave2;
string strSave3;
string strSave4;
string strSave5;
string strSave6_0;
string strSave6_1;
string strSave6;
string strSave7;
string strSave8;



//#pragma comment(linker, "/subsystem:\"windows\" /entry:\"mainCRTStartup\"")
IplImage *g_pGrayImage = NULL;
const char *pstrWindowsBinaryTitle = "二值圖(http://blog.csdn.net/MoreWindows)";
const char *pstrWindowsOutLineTitle = "輪廓圖(http://blog.csdn.net/MoreWindows)";
CvSeq *g_pcvSeq = NULL;

void on_trackbar(int pos)
{
	// 轉為二值圖
	IplImage *pBinaryImage = cvCreateImage(cvGetSize(g_pGrayImage), IPL_DEPTH_8U, 1);
	cvThreshold(g_pGrayImage, pBinaryImage, pos, 255, CV_THRESH_BINARY);
	// 顯示二值圖
	cvShowImage(pstrWindowsBinaryTitle, pBinaryImage);

	CvMemStorage* cvMStorage = cvCreateMemStorage();
	// 檢索輪廓並返回檢測到的輪廓的個數
	cvFindContours(pBinaryImage,cvMStorage, &g_pcvSeq);

	IplImage *pOutlineImage = cvCreateImage(cvGetSize(g_pGrayImage), IPL_DEPTH_8U, 3);
	int _levels = 5;
	cvZero(pOutlineImage);
	cvDrawContours(pOutlineImage, g_pcvSeq, CV_RGB(255,0,0), CV_RGB(0,255,0), _levels);
	cvShowImage(pstrWindowsOutLineTitle, pOutlineImage);

	cvReleaseMemStorage(&cvMStorage);
	cvReleaseImage(&pBinaryImage);
	cvReleaseImage(&pOutlineImage);
}

//呼叫opencv 輪廓檢測
int outLinePic()
{
	const char *pstrWindowsSrcTitle = "原圖(http://blog.csdn.net/MoreWindows)";
	const char *pstrWindowsToolBarName = "二值化";

	// 從檔案中載入原圖
	IplImage *pSrcImage = cvLoadImage("003.jpg", CV_LOAD_IMAGE_UNCHANGED);
	// 顯示原圖
	cvNamedWindow(pstrWindowsSrcTitle, CV_WINDOW_AUTOSIZE);
	cvShowImage(pstrWindowsSrcTitle, pSrcImage);

	// 轉為灰度圖
	g_pGrayImage =  cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 1);
	cvCvtColor(pSrcImage, g_pGrayImage, CV_BGR2GRAY);

	// 建立二值圖和輪廓圖視窗
	cvNamedWindow(pstrWindowsBinaryTitle, CV_WINDOW_AUTOSIZE);
	cvNamedWindow(pstrWindowsOutLineTitle, CV_WINDOW_AUTOSIZE);

	// 滑動條	
	int nThreshold = 0;
	cvCreateTrackbar(pstrWindowsToolBarName, pstrWindowsBinaryTitle, &nThreshold, 254, on_trackbar);

	on_trackbar(1);

	cvWaitKey(0);

	cvDestroyWindow(pstrWindowsSrcTitle);
	cvDestroyWindow(pstrWindowsBinaryTitle);
	cvDestroyWindow(pstrWindowsOutLineTitle);
	cvReleaseImage(&pSrcImage);
	cvReleaseImage(&g_pGrayImage);
	return 0;
}

int outLinePic2()
{
	Mat src = imread(strSave1.c_str());
	Mat dst;

	//輸入影象
	//輸出影象
	//輸入影象顏色通道數
	//x方向階數
	//y方向階數
	Sobel(src,dst,src.depth(),1,1);
	//imwrite("sobel.jpg",dst);

	//輸入影象
	//輸出影象
	//輸入影象顏色通道數
	Laplacian(src,dst,src.depth());
	//imwrite("laplacian.jpg",dst);

	//輸入影象
	//輸出影象
	//彩色轉灰度
	cvtColor(src,src,CV_BGR2GRAY);  //canny只處理灰度圖

	//輸入影象
	//輸出影象
	//低閾值
	//高閾值,opencv建議是低閾值的3倍
	//內部sobel濾波器大小
	Canny(src,dst,50,150,3);
	imwrite(strSave2.c_str(),dst);

	//imshow("dst",dst);
	//waitKey();

	return 0;
	
}
//連通域分割
int ConnectDomain()
{
	IplImage* src;  
	src=cvLoadImage("imageText_D.jpg",CV_LOAD_IMAGE_GRAYSCALE);  
	
	IplImage* dst = cvCreateImage( cvGetSize(src), 8, 3 );  
	CvMemStorage* storage = cvCreateMemStorage(0);  
	CvSeq* contour = 0;  
	cvThreshold( src, src,120, 255, CV_THRESH_BINARY );//二值化   
	cvNamedWindow( "Source", 1 );  
	cvShowImage( "Source", src );  
	//提取輪廓   
	cvFindContours( src, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );  
	cvZero( dst );//清空陣列   
	CvSeq* _contour =contour;   
	double maxarea=0;  
	double minarea=100;  
	int n=-1,m=0;//n為面積最大輪廓索引,m為迭代索引   
	for( ; contour != 0; contour = contour->h_next )  
	{  

		double tmparea=fabs(cvContourArea(contour));  
		if(tmparea < minarea)   
		{  
			cvSeqRemove(contour,0); //刪除面積小於設定值的輪廓   
			continue;  
		}  
		CvRect aRect = cvBoundingRect( contour, 0 );   
		if ((aRect.width/aRect.height)<1)  
		{  
			cvSeqRemove(contour,0); //刪除寬高比例小於設定值的輪廓   
			continue;  
		}  
		if(tmparea > maxarea)  
		{  
			maxarea = tmparea;  
			n=m;  
		}  
		m++;  
		//  CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );//建立一個色彩值   
		CvScalar color = CV_RGB( 0, 255,255 );  

		//max_level 繪製輪廓的最大等級。如果等級為0,繪製單獨的輪廓。如果為1,繪製輪廓及在其後的相同的級別下輪廓。   
		//如果值為2,所有的輪廓。如果等級為2,繪製所有同級輪廓及所有低一級輪廓,諸此種種。   
		//如果值為負數,函式不繪製同級輪廓,但會升序繪製直到級別為abs(max_level)-1的子輪廓。    
		cvDrawContours( dst, contour, color, color, -1, 1, 8 );//繪製外部和內部的輪廓   
	}  
	contour =_contour; /*int k=0;*/  
	int count=0;  
	for( ; contour != 0; contour = contour->h_next )  
	{  
		count++;  
		double tmparea=fabs(cvContourArea(contour));  
		if (tmparea==maxarea /*k==n*/)  
		{  
			CvScalar color = CV_RGB( 255, 0, 0);  
			cvDrawContours( dst, contour, color, color, -1, 1, 8 );  
		}  
		/*k++;*/  
	}  
	printf("The total number of contours is:%d",count);  
	cvNamedWindow( "Components", 1 );  
	cvShowImage( "Components", dst ); 
	cvSaveImage("imageText_ConnectDomain.jpg",dst);
	cvWaitKey(0);  
	cvDestroyWindow( "Source" );  
	cvReleaseImage(&src);  
	cvDestroyWindow( "Components" );  
	cvReleaseImage(&dst);  
	return 0;  
}

struct MYBOX{
	BOOL bIsBlack;
	Rect rect;

	MYBOX()
	{
		bIsBlack = FALSE;
	}
};

struct CONNECT_ZON{
	vector<RECT> rectList;
	int nBox;
	CONNECT_ZON()
	{
		nBox = 0;
	}
};

//畫框線為一個顏色
void DrowBoxColor(Mat &srcImg, std::vector<RECT> &boxList, int nColor)
{
	int nResultSize = boxList.size();
	for (int i = 0; i < nResultSize; i ++)
	{
		RECT tempRect = boxList[i];
		//上下邊線
		int y1 = tempRect.top;
		int y2 = tempRect.bottom;
		for (int x = tempRect.left;  x <= tempRect.right; x ++)
		{
			*(srcImg.data + srcImg.step[1] * x + srcImg.step[0] * y1) = nColor;
			*(srcImg.data + srcImg.step[1] * x + srcImg.step[0] * y2) = nColor;
		}
		//左右邊線
		int x1 = tempRect.left;
		int x2 = tempRect.right;
		for (int y = tempRect.top; y <= tempRect.bottom; y ++)
		{
			*(srcImg.data + srcImg.step[1] * x1 + srcImg.step[0] * y) = nColor;
			*(srcImg.data + srcImg.step[1] * x2 + srcImg.step[0] * y) = nColor;
		}
	}
}


//計算兩個白框是否臨近
BOOL IsRectNear(RECT rect1, RECT rect2)
{
	if (  sqrt(1.0*(rect1.top - rect2.top)* (rect1.top - rect2.top) + 1.0*(rect1.left - rect2.left)*(rect1.left - rect2.left) )  == BOX_WIDTH)
	{
		return TRUE;
	}
	else
	{
		return FALSE;
	}
}
//計算兩個框是否相交
BOOL IsRectIntersect(RECT rect1, RECT rect2)
{
	int nWidth1 = rect1.right - rect1.left;
	int nHeight1 = rect1.bottom - rect1.top;
	int nWidth2 = rect2.right - rect2.left;
	int nHeight2 = rect2.bottom - rect2.top;

	if ( rect1.left <= rect2.left)
	{
		if(rect2.left - rect1.left <= nWidth1)
		{
			if (rect1.top < rect2.top)
			{
				if (rect2.top - rect1.top <= nHeight1)
				{
					return TRUE;
				}
				else
				{
					return FALSE;
				}
			}
			else
			{
				if (rect1.top - rect2.top <= nHeight2)
				{
					return TRUE;
				}
				else
				{
					return FALSE;
				}
			}
		}
		else
		{
			return FALSE;
		}
	}
	else
	{
		if(rect1.left - rect2.left <= nWidth2)
		{
			if (rect1.top < rect2.top)
			{
				if (rect2.top - rect1.top <= nHeight1)
				{
					return TRUE;
				}
				else
				{
					return FALSE;
				}
			}
			else
			{
				if (rect1.top - rect2.top <= nHeight2)
				{
					return TRUE;
				}
				else
				{
					return FALSE;
				}
			}
		}
		else
		{
			return FALSE;
		}
	}
}


//兩個區域是否臨近
BOOL IsZonNear(CONNECT_ZON zon1, CONNECT_ZON zon2)
{
	for(int i = 0; i < zon1.rectList.size(); i++)
	{
		RECT rect1 = zon1.rectList[i];
		for(int j = 0; j < zon2.rectList.size(); j ++)
		{
			RECT rect2 = zon2.rectList[j];
			if (IsRectNear(rect1,rect2) == TRUE)
			{
				return TRUE;
			}
		}
	}
	return FALSE;
}
//將兩個區域合併
void MergZon(CONNECT_ZON& zon1, CONNECT_ZON& zon2)
{
	if (zon1.nBox >= zon2.nBox)
	{
		for(int i = 0; i < zon2.nBox; i ++)
		{
			zon1.rectList.push_back(zon2.rectList[i]);
		}
		zon1.nBox += zon2.nBox;
		zon2.rectList.clear();
		zon2.nBox = 0;
	}
	else
	{
		for(int i = 0; i < zon1.nBox; i ++)
		{
			zon2.rectList.push_back(zon1.rectList[i]);
		}
		zon2.nBox += zon1.nBox;
		zon1.rectList.clear();
		zon1.nBox = 0;
	}
}
//自定義排序函式  
BOOL SortByM1( const RECT &v1, const RECT &v2)//注意:本函式的引數的型別一定要與vector中元素的型別一致  
{  
	if (v1.top < v2.top )   //按列升序排列
	{
		return TRUE;
	}
	else if (v1.top  == v2.top && v1.left < v2.left  )
	{
		return TRUE;
	}
	else
	{
		//如果a==b,則返回的應該是false,如果返回的是true,則會出上面的錯。
		return FALSE;
	}
} 
//自定義排序函式  
BOOL SortByM2( const CONNECT_ZON &v1, const CONNECT_ZON &v2)//注意:本函式的引數的型別一定要與vector中元素的型別一致  
{  
	if (v1.nBox > v2.nBox)
	{
		return TRUE;
	}
	else
	{
		return FALSE;
	}
} 

//將一部分割槽域變成黑色或者白色
void SetRectColor(Mat& srcImg, RECT rect, int nColor)
{
	for (int col = rect.top; col < rect.bottom ; col ++)
	{
		for (int row = rect.left; row < rect.right; row++)
		{
			(*(srcImg.data + srcImg.step[0] * col + srcImg.step[1] * row)) = nColor;
		}
	}
}
//選擇一個內部的聯通域 第一個返回ture 第二個返回false
BOOL IsInnerZon(CONNECT_ZON &zon1, CONNECT_ZON &zon2)
{
	RECT rect1 = zon1.rectList[0];
	RECT rect2 = zon2.rectList[0];
	//獲取兩個區域的左上 右下座標
	for (int i = 1; i < zon1.rectList.size(); i ++)
	{
		RECT tempRect = zon1.rectList[i];
		if (tempRect.left < rect1.left)
		{
			rect1.left  = tempRect.left;
		}
		if (tempRect.right > rect1.right)
		{
			rect1.right = tempRect.right;
		}
		if (tempRect.top < rect1.top)
		{
			rect1.top = tempRect.top;
		}
		if (tempRect.bottom > rect1.bottom)
		{
			rect1.bottom = tempRect.bottom;
		}
	}

	for (int i = 1; i < zon2.rectList.size(); i ++)
	{
		RECT tempRect = zon2.rectList[i];
		if (tempRect.left < rect2.left)
		{
			rect2.left  = tempRect.left;
		}
		if (tempRect.right > rect2.right)
		{
			rect2.right = tempRect.right;
		}
		if (tempRect.top < rect2.top)
		{
			rect2.top = tempRect.top;
		}
		if (tempRect.bottom > rect2.bottom)
		{
			rect2.bottom = tempRect.bottom;
		}
	}
	//評分 
	int nPoint1 = 0;
	int nPoint2 = 0;
	if (rect1.left < rect2.left)
	{
		nPoint1 ++;
	}
	else
	{
		nPoint2 ++;
	}

	if (rect1.right > rect2.right)
	{
		nPoint1 ++;
	}
	else
	{
		nPoint2 ++;
	}

	if (rect1.top < rect2.top)
	{
		nPoint1 ++;
	}
	else
	{
		nPoint2 ++;
	}

	if (rect1.bottom > rect2.bottom)
	{
		nPoint1 ++;
	}
	else
	{
		nPoint2 ++;
	}

	if (nPoint1 > nPoint2)
	{
		return FALSE;
	}
	else
	{
		return TRUE;
	}
}
//清理影象的邊緣,只保留中間的部分
int ClearEdge()
{
	//IplImage* src = cvLoadImage("imageText_D.jpg",CV_LOAD_IMAGE_GRAYSCALE); 
	Mat srcImg = imread(strSave2.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
	
	//int nWidth = src->width;
	//int nHeight = src->height;
	int nWidth = srcImg.cols;
	int nHeight = srcImg.rows;
	int nUp = 210;
	int nLeft = 140;
	int nRight = 170;
	int nDown = 210;
	//先確定上邊界
	int nRectSize = 50;
	vector<RECT > BoxList;
	for(int i = 0; i < nHeight - nRectSize; i += nRectSize)
	{
		for (int j = 0; j < nWidth - nRectSize; j += nRectSize)
		{
			//看這個box中的畫素是否都為黑 允許一個雜點
			BOOL bBlack = TRUE;
			int nWhite = 0;
			for (int col = j; col < j + nRectSize; col ++)
			{
				for (int row = i; row < i + nRectSize; row++)
				{
					int nPixel = (int)(*(srcImg.data + srcImg.step[0] * row + srcImg.step[1] * col));
					if ( nPixel == 255)
					{
						nWhite ++;
						if (nWhite >= 0)
						{
							bBlack = FALSE;
						}
					}
				}
				if (bBlack == FALSE)
				{
					break;
				}
			}
			if (bBlack)
			{
				RECT  temRect = {j,i, j + nRectSize, i + nRectSize};
				BoxList.push_back(temRect);
			}
		
		}
		
	}
	//將box的邊線都程式設計白色。
	int nSize = BoxList.size();
	for (int i = 0; i < nSize; i ++)
	{
		RECT tempRect = BoxList[i];
		//上下邊線
		int y1 = tempRect.top;
		int y2 = tempRect.bottom;
		for (int x = tempRect.left;  x <= tempRect.right; x ++)
		{
			*(srcImg.data + srcImg.step[1] * x + srcImg.step[0] * y1) = WHITE;
			*(srcImg.data + srcImg.step[1] * x + srcImg.step[0] * y2) = WHITE;
		}
		//左右邊線
		int x1 = tempRect.left;
		int x2 = tempRect.right;
		for (int y = tempRect.top; y <= tempRect.bottom; y ++)
		{
			*(srcImg.data + srcImg.step[1] * x1 + srcImg.step[0] * y) = WHITE;
			*(srcImg.data + srcImg.step[1] * x2 + srcImg.step[0] * y) = WHITE;
		}
	}
	imwrite(strSave3.c_str(),srcImg);
	vector<CONNECT_ZON> g_ConnectZon;
	//獲取白框最大的聯通域
	for(int i = 0; i < nSize; i++)
	{
		RECT tempRect = BoxList[i];
		if (g_ConnectZon.empty())
		{
			CONNECT_ZON connectTemp;
			connectTemp.rectList.push_back(tempRect);
			connectTemp.nBox ++;
			g_ConnectZon.push_back(connectTemp);
		}
		else
		{
			BOOL bInList = FALSE;
			for(int j = 0; j < g_ConnectZon.size(); j ++)
			{
				CONNECT_ZON connectZon = g_ConnectZon[j];
				for (int k = 0; k < connectZon.rectList.size(); k ++)
				{
					if (IsRectNear(tempRect, connectZon.rectList[k]))
					{
						g_ConnectZon[j].rectList.push_back(tempRect);
						g_ConnectZon[j].nBox ++;
						bInList = TRUE;
						break;
					}
					if (bInList)
					{
						break;
					}
				}
			}
			//沒有相鄰則加入新的
			if (bInList == FALSE)
			{
				CONNECT_ZON connectTemp;
				connectTemp.rectList.push_back(tempRect);
				connectTemp.nBox ++;
				g_ConnectZon.push_back(connectTemp);
			}
		}
	}
	//檢查任意兩個連通域中是否有相鄰的框
	for (int i = 0; i < g_ConnectZon.size(); i ++)
	{
		for(int j = i + 1; j < g_ConnectZon.size(); j ++)
		{
			BOOL bZonNear = IsZonNear(g_ConnectZon[i], g_ConnectZon[j]);
			if (bZonNear)
			{
				//相鄰則把小的加入到大的之中
				//MergZon(g_ConnectZon[i], g_ConnectZon[j]);
				if (g_ConnectZon[i].nBox >= g_ConnectZon[j].nBox)
				{
					for(int k = 0; k < g_ConnectZon[j].nBox; k ++)
					{
						g_ConnectZon[i].rectList.push_back(g_ConnectZon[j].rectList[k]);
					}
					g_ConnectZon[i].nBox += g_ConnectZon[j].nBox;
					g_ConnectZon[j].rectList.clear();
					g_ConnectZon[j].nBox = 0;
				}
				else
				{
					for(int k = 0; k < g_ConnectZon[i].nBox; k ++)
					{
						g_ConnectZon[j].rectList.push_back(g_ConnectZon[i].rectList[k]);
					}
					g_ConnectZon[j].nBox += g_ConnectZon[i].nBox;
					g_ConnectZon[i].rectList.clear();
					g_ConnectZon[i].nBox = 0;
				}
			}
		}
	}
	//取最大的聯通域boxList
	int nMaxSize = 0;
	//如果有兩個較大的聯通域,則取裡面的一個。
	std::sort(g_ConnectZon.begin(),g_ConnectZon.end(),SortByM2);
	CONNECT_ZON maxConnect = g_ConnectZon[0];
	//出現另外一個
	if (g_ConnectZon.size() > 1)
	{
		if (g_ConnectZon[1].nBox > 0)
		{
			CONNECT_ZON maxConnectOther = g_ConnectZon[1];
			BOOL bInner = IsInnerZon(maxConnect, maxConnectOther);
			if (!bInner)
			{
				maxConnect = maxConnectOther;
			}
		}
	}
	//將box進行排序,按照從左到右 從上到下。
	 std::sort(maxConnect.rectList.begin(),maxConnect.rectList.end(),SortByM1);
	 //將之分成多個行。
	 vector<CONNECT_ZON> LineConnect;
	 int nIndexOfLine = -1;
	 int nLine = -1;
	
	 for (int i = 0; i < maxConnect.rectList.size(); i ++)
	 {
		 RECT tempRect = maxConnect.rectList[i];
		 if (nLine != tempRect.top)
		 {
			CONNECT_ZON tempConnect;
			tempConnect.rectList.push_back(tempRect);
			tempConnect.nBox ++;
			nIndexOfLine ++;
			LineConnect.push_back(tempConnect);
			nLine = tempRect.top;
		 }
		 else
		 {
			 LineConnect[nIndexOfLine].rectList.push_back(tempRect);
			 LineConnect[nIndexOfLine].nBox ++;
		 }
		 //從左往右 從上往下。
	 }
	 //如果沒有白色聯通域則直接儲存結果
	 if (maxConnect.rectList.size() == 0)
	 {
		 IplImage* src;  
		 IplImage* dst;
		 src = cvLoadImage(strSave7.c_str(),1);  
		 if(!src)  
		 {
			 return 0;
		 }
		 cvSetImageROI(src,cvRect(0,0,nWidth, nHeight));  
		 dst = cvCreateImage(cvSize(nWidth, nHeight),  
			 IPL_DEPTH_8U,  
			 src->nChannels);  
		 cvCopy(src,dst,0);  
		 cvResetImageROI(src);  
		 //cvNamedWindow("操作後的影象",1);  
		 //cvShowImage("操作後的影象",dst); 
		 cvSaveImage(strSave8.c_str(), dst);
		 return 0;
	 }
	 //將最大聯通域周邊的區域都變成黑色 0
	 //將每一行的左右兩邊都變成黑色
	 //上面部分。
	 RECT rectFirst = LineConnect[0].rectList[0];
	 RECT rectTop = {0,0,nWidth ,rectFirst.bottom};
	 SetRectColor(srcImg,rectTop, BLACK);
	 //中間各行
	 for (int i = 0; i < LineConnect.size(); i ++)
	 {
		 CONNECT_ZON tempConnect = LineConnect[i];
		 RECT tempRect = tempConnect.rectList[0];
		 RECT leftRect = {0, tempRect.top, tempRect.right, tempRect.bottom};
		 SetRectColor(srcImg, leftRect,BLACK);
		 tempRect = tempConnect.rectList[tempConnect.rectList.size() - 1];
		 RECT rightRect = {tempRect.left, tempRect.top, nWidth, tempRect.bottom };
		 SetRectColor(srcImg, rightRect, BLACK);
	 }
	
     //最下面部分
	 RECT rectLast = LineConnect[LineConnect.size() - 1].rectList[0];
	 RECT rectBottom = {0, rectLast.bottom, nWidth, nHeight};
	 SetRectColor(srcImg, rectBottom,BLACK);
	 imwrite(strSave3.c_str(),srcImg);
	 //將所有的框線都置黑
	 DrowBoxColor(srcImg, maxConnect.rectList, BLACK);
	 imwrite(strSave4.c_str(),srcImg);
	 //探測矩形框
	 int nMaxBlackNum = 10;
	 int nMinBoxWidth = 50;
	 int nBlack = 0;
	 vector<RECT > ResultBoxList;
	 int nBoxIndex = -1;
	 BOOL bStartBox = FALSE;
	 int nWhite = 0;
	 for (int col = 0; col < nHeight; col += 1)
	 {
		for (int row = 0; row < nWidth; row++)
		{
			int nPixel = (int)(*(srcImg.data + srcImg.step[1] * row + srcImg.step[0] * col));
			if (nPixel == BLACK && bStartBox == FALSE)
			{
				nBlack = 0;
				continue;
			}
			//碰到第一個白色畫素點開始探測矩形框。
			else if ( nPixel == WHITE && bStartBox == FALSE)
			{
				//不能超過右 下邊界
				RECT rectTemp = {row, col, min(row + nMaxBlackNum, nWidth), min(col + nMaxBlackNum, nHeight)};
				bStartBox = TRUE;
				ResultBoxList.push_back(rectTemp);
				nBoxIndex ++;
			}
			else if(nPixel == WHITE && bStartBox == TRUE)
			{
				//第二個仍然是白色 寬度加1 或者中間有黑色畫素
				if (ResultBoxList[nBoxIndex].right < nWidth - 1)
				{
					if (nBlack == 0)
					{
						ResultBoxList[nBoxIndex].right += 1;
					}
					else
					{
						ResultBoxList[nBoxIndex].right += nBlack;
						nBlack = 0;
					}
					
				}
				
			}
			else if(nPixel == BLACK && bStartBox == TRUE)
			{
				//碰到黑色 
				nBlack ++;
				//連續碰到10個黑點則結束box
				if (nBlack > nMaxBlackNum)
				{
					//框的大小如果小於50 則不計入
// 					int nWidth = ResultBoxList[nBoxIndex].right - ResultBoxList[nBoxIndex].left;
// 					if (nWidth < nMinBoxWidth)
// 					{
// 						ResultBoxList.erase(ResultBoxList.end() - 1);
// 					}
					bStartBox = FALSE;
				}
			}
		}
	 }
	 //畫框
	 int nResultSize = ResultBoxList.size();
// 	 Mat Img5 = srcImg;
// 	 DrowBoxColor(Img5,ResultBoxList, WHITE);
// 	 imwrite(strSave5.c_str(),Img5);
	 //合併框
	 vector<RECT> mergResultList;
	 int nIndexOfResultList = -1;
	 for(int i = 0; i < nResultSize; i++)
	 {
		 RECT tempRect = ResultBoxList[i];
		 if (mergResultList.empty())
		 {
			 mergResultList.push_back(tempRect);
			 nIndexOfResultList ++;
		 }
		 else
		 {
			 BOOL bInList = FALSE;
			 for(int j = 0; j < mergResultList.size(); j ++)
			 {
				BOOL bIntersect = IsRectIntersect(mergResultList[j], tempRect);
				if (bIntersect)
				{
					//相交則合併。
					mergResultList[j].left = min(tempRect.left, mergResultList[j].left);
					mergResultList[j].top = min(tempRect.top, mergResultList[j].top);
					mergResultList[j].right = max(tempRect.right, mergResultList[j].right);
					mergResultList[j].bottom = max(tempRect.bottom, mergResultList[j].bottom);
					bInList = TRUE;
				}
			 }
			 //沒有相鄰則加入新的
			 if (bInList == FALSE)
			 {
				 mergResultList.push_back(tempRect);
				 nIndexOfResultList ++;
			 }
		 }
	 }
	 //第二次合併
	 Mat Img6 = srcImg;
	// DrowBoxColor(Img6, mergResultList, WHITE);
	// imwrite(strSave6_0.c_str(),Img6);

	 for(int i = 0; i < mergResultList.size(); i ++)
	 {
		 if (mergResultList[i].left == 0 && mergResultList[i].right == 0)
		 {
			 continue;
		 }
		 for(int j = i + 1; j < mergResultList.size(); j ++)
		 {
			 BOOL bIntersect = IsRectIntersect(mergResultList[i], mergResultList[j]);
			 if (bIntersect)
			 {
				 //相交則合併。
				 mergResultList[i].left = min(mergResultList[j].left, mergResultList[i].left);
				 mergResultList[i].top = min(mergResultList[j].top, mergResultList[i].top);
				 mergResultList[i].right = max(mergResultList[j].right, mergResultList[i].right);
				 mergResultList[i].bottom = max(mergResultList[j].bottom, mergResultList[i].bottom);
				 //被合併的清空
				 mergResultList[j].left = 0;
				 mergResultList[j].top = 0;
				 mergResultList[j].right = 0;
				 mergResultList[j].bottom = 0;
			 }
		 }
		
	 }
	 DrowBoxColor(srcImg, mergResultList, WHITE);
	 imwrite(strSave6_1.c_str(),srcImg);
	 //去除寬高比大魚1.5 和長寬絕對值小於80的
	 RECT destRect;
	 BOOL bHaveOne = FALSE;
	 for(int i = 0;i < mergResultList.size(); i ++)
	 {
		int nTempWidth = mergResultList[i].right - mergResultList[i].left;
		int nTempHeight = mergResultList[i].bottom - mergResultList[i].top;
		BOOL bRelative = abs(nTempWidth - nTempHeight) < RATE * min(nTempWidth,nTempHeight);
		if (nTempHeight < 80 || nTempWidth < 80 || !bRelative)
		{
			mergResultList[i].left = 0;
			mergResultList[i].right = 0;
			mergResultList[i].top = 0;
			mergResultList[i].bottom = 0;
		}
		else
		{
			destRect = mergResultList[i];
			bHaveOne = TRUE;
		}
	 }
	 if (bHaveOne == FALSE)
	 {
		 cout<<"can not find one QRCode!";
		 return 0;
	 }
	 DrowBoxColor(srcImg, mergResultList, WHITE);
	imwrite(strSave6.c_str(),srcImg);
	//將box內容取出來。
	//Mat sourceImg = imread("imageText_D.bmp", CV_LOAD_IMAGE_GRAYSCALE);
	//Mat roi_img = sourceImg(Range(destRect.left,destRect.right),Range(destRect.top,destRect.bottom));
	//Rect rect(destRect.left, destRect.right, destRect.top, destRect.bottom);
	//Mat image_roi = sourceImg(rect);
	IplImage* src;  
	IplImage* dst;
	src = cvLoadImage(strSave7.c_str(),1);  
    if(!src)  
	{
		return 0;
	}
   // cvNamedWindow("源影象",1);  
    //cvShowImage("源影象",src);  
	//往左上移動10個點
	destRect.left -= 10;
	if (destRect.left < 0)
	{
		destRect.left = 0;
	}
	destRect.top -= 10;
	if (destRect.top < 0)
	{
		destRect.top = 0;
	}
    cvSetImageROI(src,cvRect(destRect.left,destRect.top ,destRect.right - destRect.left, destRect.bottom - destRect.top));  
    dst = cvCreateImage(cvSize(destRect.right - destRect.left, destRect.bottom - destRect.top),  
            IPL_DEPTH_8U,  
            src->nChannels);  
    cvCopy(src,dst,0);  
    cvResetImageROI(src);  
	//cvNamedWindow("操作後的影象",1);  
    //cvShowImage("操作後的影象",dst); 
	strSave8 = getFilePath("imageText_Clear4.jpg");
	cvSaveImage(strSave8.c_str(), dst);
	return 0;
}
//傾斜校正
void imageCorrect()
{
	Mat srcImg = imread(strOrigin.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
	if(srcImg.empty())
		return ;
	//imshow("source", srcImg);

	Point center(srcImg.cols/2, srcImg.rows/2);

#ifdef DEGREE
	//Rotate source image
	Mat rotMatS = getRotationMatrix2D(center, DEGREE, 1.0);
	warpAffine(srcImg, srcImg, rotMatS, srcImg.size(), 1, 0, Scalar(255,255,255));
	//imshow("RotatedSrc", srcImg);
	//imwrite("imageText_R.jpg",srcImg);
#endif

	//Expand image to an optimal size, for faster processing speed
	//Set widths of borders in four directions
	//If borderType==BORDER_CONSTANT, fill the borders with (0,0,0)
	Mat padded;
	int opWidth = getOptimalDFTSize(srcImg.rows);
	int opHeight = getOptimalDFTSize(srcImg.cols);
	copyMakeBorder(srcImg, padded, 0, opWidth-srcImg.rows, 0, opHeight-srcImg.cols, BORDER_CONSTANT, Scalar::all(0));

	Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
	Mat comImg;
	//Merge into a double-channel image
	merge(planes,2,comImg);

	//Use the same image as input and output,
	//so that the results can fit in Mat well
	dft(comImg, comImg);

	//Compute the magnitude
	//planes[0]=Re(DFT(I)), planes[1]=Im(DFT(I))
	//magnitude=sqrt(Re^2+Im^2)
	split(comImg, planes);
	magnitude(planes[0], planes[1], planes[0]);

	//Switch to logarithmic scale, for better visual results
	//M2=log(1+M1)
	Mat magMat = planes[0];
	magMat += Scalar::all(1);
	log(magMat, magMat);

	//Crop the spectrum
	//Width and height of magMat should be even, so that they can be divided by 2
	//-2 is 11111110 in binary system, operator & make sure width and height are always even
	magMat = magMat(Rect(0, 0, magMat.cols & -2, magMat.rows & -2));

	//Rearrange the quadrants of Fourier image,
	//so that the origin is at the center of image,
	//and move the high frequency to the corners
	int cx = magMat.cols/2;
	int cy = magMat.rows/2;

	Mat q0(magMat, Rect(0, 0, cx, cy));
	Mat q1(magMat, Rect(0, cy, cx, cy));
	Mat q2(magMat, Rect(cx, cy, cx, cy));
	Mat q3(magMat, Rect(cx, 0, cx, cy));

	Mat tmp;
	q0.copyTo(tmp);
	q2.copyTo(q0);
	tmp.copyTo(q2);

	q1.copyTo(tmp);
	q3.copyTo(q1);
	tmp.copyTo(q3);

	//Normalize the magnitude to [0,1], then to[0,255]
	normalize(magMat, magMat, 0, 1, CV_MINMAX);
	Mat magImg(magMat.size(), CV_8UC1);
	magMat.convertTo(magImg,CV_8UC1,255,0);
	//imshow("magnitude", magImg);
	//imwrite("imageText_mag.jpg",magImg);

	//Turn into binary image
	threshold(magImg,magImg,GRAY_THRESH,255,CV_THRESH_BINARY);
	//imshow("mag_binary", magImg);
	//imwrite("imageText_bin.jpg",magImg);

	//Find lines with Hough Transformation
	vector<Vec2f> lines;
	float pi180 = (float)CV_PI/180;
	Mat linImg(magImg.size(),CV_8UC3);
	HoughLines(magImg,lines,1,pi180,HOUGH_VOTE,0,0);
	int numLines = lines.size();
	for(int l=0; l<numLines; l++)
	{
		float rho = lines[l][0], theta = lines[l][1];
		Point pt1, pt2;
		double a = cos(theta), b = sin(theta);
		double x0 = a*rho, y0 = b*rho;
		pt1.x = cvRound(x0 + 1000*(-b));
		pt1.y = cvRound(y0 + 1000*(a));
		pt2.x = cvRound(x0 - 1000*(-b));
		pt2.y = cvRound(y0 - 1000*(a));
		line(linImg,pt1,pt2,Scalar(255,0,0),3,8,0);
	}
	//imshow("lines",linImg);
	//imwrite("imageText_line.jpg",linImg);
	//if(lines.size() == 3){
	//	cout << "found three angels:" << endl;
	//	cout << lines[0][1]*180/CV_PI << endl << lines[1][1]*180/CV_PI << endl << lines[2][1]*180/CV_PI << endl << endl;
	//}

	//Find the proper angel from the three found angels
	float angel=0;
	float piThresh = (float)CV_PI/90;
	float pi2 = CV_PI/2;
	for(int l=0; l<numLines; l++)
	{
		float theta = lines[l][1];
		if(abs(theta) < piThresh || abs(theta-pi2) < piThresh)
			continue;
		else{
			angel = theta;
			break;
		}
	}

	//Calculate the rotation angel
	//The image has to be square,
	//so that the rotation angel can be calculate right
	angel = angel<pi2 ? angel : angel-CV_PI;
	if(angel != pi2){
		float angelT = srcImg.rows*tan(angel)/srcImg.cols;
		angel = atan(angelT);
	}
	float angelD = angel*180/(float)CV_PI;
	//cout << "the rotation angel to be applied:" << endl << angelD << endl << endl;

	//Rotate the image to recover
	Mat rotMat = getRotationMatrix2D(center,angelD,1.0);
	Mat dstImg = Mat::ones(srcImg.size(),CV_8UC3);
	warpAffine(srcImg,dstImg,rotMat,srcImg.size(),1,0,Scalar(255,255,255));
	//imshow("result",dstImg);
	imwrite(strSave1,dstImg);
}
int main(int argc, char **argv)
{
	if(argc < 2) return(1);
	//獲取當前目錄
	_getcwd(szCurrentPath,MAX_PATH);
	strOrigin = getFilePath("imageText.jpg");
	strSave1 = getFilePath("imageText_D.jpg");
	strSave2 = getFilePath("canny.jpg");
	strSave3 = getFilePath("imageText_Clear0.jpg");
	strSave4 = getFilePath("imageText_Clear1.jpg");
	strSave5 = getFilePath("imageText_Clear2.jpg");
	strSave6_0 = getFilePath("imageText_Clear3_0.jpg");
	strSave6_1 = getFilePath("imageText_Clear3_1.jpg");
	strSave6 = getFilePath("imageText_Clear3.jpg");
	strSave7 = getFilePath("imageText_D.jpg");
	strSave8 = getFilePath("imageText_Clear4.jpg");
	
	CopyFile(argv[1], strOrigin.c_str(), FALSE);
	imageCorrect();
	outLinePic2();
	ClearEdge();
	return 0;
	//ConnectDomain();
	//Read a single-channel image

}
</span>

需要注意的是,程式需要opencv的環境,需要自己先安裝和設定。需要opencv_imgproc2410d.lib opencv_core2410d.lib opencv_highgui2410d.lib三個lib。 中間的2410是對應的版本號,不同的版本應該也可以,另外別忘了對應的dll。

其中有一些函式並沒有用到,只是作為邊緣檢測效果實驗用。

最後儲存二維碼圖片時,不知道該用什麼函式來將一個RECT的影象複製出來,mat應該也有對應的函式吧。

標籤:des   演算法   class   style   log   com   http   it   si   

原文:http://blog.csdn.net/u200814342a/article/details/51324366