1. 程式人生 > >Canny邊緣檢測原理及C++實現

Canny邊緣檢測原理及C++實現

從11.21開始寫部落格,到今天一個多月了,寫了20多篇了,希望自己能堅持下去吧!

在這裡祝大家聖誕節快樂!

Canny邊緣檢測演算法是澳大利亞科學家John F. Canny在1986年提出來的,不得不提一下的是當年John Canny本人才28歲!到今天已經30年過去了,Canny演算法仍然是影象邊緣檢測演算法中最經典、有效的演算法之一。

一起睹一下大家Canny的風采:


John Canny研究了最優邊緣檢測方法所需的特性,給出了評價邊緣檢測效能優劣的3個指標:

  • 1  好的信噪比,即將非邊緣點判定為邊緣點的概率要低,將邊緣點判為非邊緣點的概率要低;
  • 2 高的定位效能,即檢測出的邊緣點要儘可能在實際邊緣的中心;
  • 3 對單一邊緣僅有唯一響應,即單個邊緣產生多個響應的概率要低,並且虛假響應邊緣應該得到最大抑制;
Canny演算法就是基於滿足這3個指標的最優解實現的,在對影象中物體邊緣敏感性的同時,也可以抑制或消除噪聲的影響。

Canny運算元邊緣檢測的具體步驟如下:

1. 彩色影象轉換為灰度影象

2.對影象進行高斯模糊

3. 計算影象梯度,根據梯度計算影象邊緣幅值與角度

4. 非極大值抑制(邊緣細化)

5. 雙閾值處理

6.  雙閾值中間畫素處理及邊緣連結

下面詳解各部分的程式碼:

1.      彩色影象轉灰度影象

根據彩色影象RGB轉灰度公式:gray  =  R * 0.299 + G * 0.587 + B * 0.114

將彩色影象中每個RGB畫素轉為灰度值的程式碼如下:

C++程式碼實現起來也比較簡單,注意一般情況下影象處理中彩色影象各分量的排列順序是B、G、R
void ConvertRGB2GRAY(const Mat &image, Mat &imageGray)
{
	if (!image.data || image.channels() != 3)
	{
		return;
	}
	
	imageGray = Mat::zeros(image.size(), CV_8UC1);
	
	uchar *pointImage = image.data;
	uchar *pointImageGray = imageGray.data;
	
	size_t stepImage = image.step;
	size_t stepImageGray = imageGray.step;
	for (int i = 0; i < imageGray.rows; i++)
	{
		for (int j = 0; j < imageGray.cols; j++)
		{
			pointImageGray[i*stepImageGray + j] = (uchar)(0.114*pointImage[i*stepImage + 3 * j] + 0.587*pointImage[i*stepImage + 3 * j + 1] + 0.299*pointImage[i*stepImage + 3 * j + 2]);
		}
	}
}

2.對影象進行高斯模糊

關於高斯更加詳細的解釋看這裡:http://blog.csdn.net/linqianbi/article/details/78635941

//計算一維高斯的權值陣列
double *getOneGuassionArray(int size, double sigma)
{
	double sum = 0.0;
	
	int kerR = size / 2;

	
	double *arr = new double[size];
	for (int i = 0; i < size; i++)
	{

		
		arr[i] = exp(-((i - kerR)*(i - kerR)) / (2 * sigma*sigma));
		sum += arr[i];

	}
	
	for (int i = 0; i < size; i++)
	{
		arr[i] /= sum;
		cout << arr[i] << endl;
	}
	return arr;
}

void MyGaussianBlur(Mat &srcImage, Mat &dst, int size)
{
	CV_Assert(srcImage.channels() == 1 || srcImage.channels() == 3); 
	int kerR = size / 2;
	dst = srcImage.clone();
	int channels = dst.channels();
	double* arr;
	arr = getOneGuassionArray(size, 1);//先求出高斯陣列

									
	for (int i = kerR; i < dst.rows - kerR; i++)
	{
		for (int j = kerR; j < dst.cols - kerR; j++)
		{
			double GuassionSum[3] = { 0 };
			
			for (int k = -kerR; k <= kerR; k++)
			{

				if (channels == 1)
				{
					GuassionSum[0] += arr[kerR + k] * dst.at<uchar>(i, j + k);
				else if (channels == 3)
				{
					Vec3b bgr = dst.at<Vec3b>(i, j + k);
					auto a = arr[kerR + k];
					GuassionSum[0] += a*bgr[0];
					GuassionSum[1] += a*bgr[1];
					GuassionSum[2] += a*bgr[2];
				}
			}
			for (int k = 0; k < channels; k++)
			{
				if (GuassionSum[k] < 0)
					GuassionSum[k] = 0;
				else if (GuassionSum[k] > 255)
					GuassionSum[k] = 255;
			}
			if (channels == 1)
				dst.at<uchar>(i, j) = static_cast<uchar>(GuassionSum[0]);
			else if (channels == 3)
			{
				Vec3b bgr = { static_cast<uchar>(GuassionSum[0]), static_cast<uchar>(GuassionSum[1]), static_cast<uchar>(GuassionSum[2]) };
				dst.at<Vec3b>(i, j) = bgr;
			}

		}
	}

	
	for (int i = kerR; i < dst.rows - kerR; i++)
	{
		for (int j = kerR; j < dst.cols - kerR; j++)
		{
			double GuassionSum[3] = { 0 };
			
			for (int k = -kerR; k <= kerR; k++)
			{

				if (channels == 1)
				{
					GuassionSum[0] += arr[kerR + k] * dst.at<uchar>(i + k, j);
				}
				else if (channels == 3)
				{
					Vec3b bgr = dst.at<Vec3b>(i + k, j);
					auto a = arr[kerR + k];
					GuassionSum[0] += a*bgr[0];
					GuassionSum[1] += a*bgr[1];
					GuassionSum[2] += a*bgr[2];
				}
			}
			for (int k = 0; k < channels; k++)
			{
				if (GuassionSum[k] < 0)
					GuassionSum[k] = 0;
				else if (GuassionSum[k] > 255)
					GuassionSum[k] = 255;
			}
			if (channels == 1)
				dst.at<uchar>(i, j) = static_cast<uchar>(GuassionSum[0]);
			else if (channels == 3)
			{
				Vec3b bgr = { static_cast<uchar>(GuassionSum[0]), static_cast<uchar>(GuassionSum[1]), static_cast<uchar>(GuassionSum[2]) };
				dst.at<Vec3b>(i, j) = bgr;
			}

		}
	}
	delete[] arr;
}

3. 計算影象梯度,根據梯度計算影象邊緣幅值與角度

關於Soble的詳細解釋看這裡:http://blog.csdn.net/linqianbi/article/details/78673903
//儲存梯度膜長與梯度角
void SobelGradDirction(Mat &imageSource, Mat &imageSobelX, Mat &imageSobelY, double *&pointDrection)
{
	
	pointDrection = new double[(imageSource.rows - 2)*(imageSource.cols - 2)];

	for (int i = 0; i < (imageSource.rows - 2)*(imageSource.cols - 2); i++)
	{
		pointDrection[i] = 0;
	}
	imageSobelX = Mat::zeros(imageSource.size(), CV_32SC1);
	imageSobelY = Mat::zeros(imageSource.size(), CV_32SC1);
	
	uchar *P = imageSource.data;
	uchar *PX = imageSobelX.data;
	uchar *PY = imageSobelY.data;

	
	int step = imageSource.step;
	int stepXY = imageSobelX.step;

	int index = 0;
	for (int i = 1; i < imageSource.rows - 1; ++i)
	{
		for (int j = 1; j < imageSource.cols - 1; ++j)
		{
			  
			double gradY = P[(i + 1)*step + j - 1] + P[(i + 1)*step + j] * 2 + P[(i + 1)*step + j + 1] - P[(i - 1)*step + j - 1] - P[(i - 1)*step + j] * 2 - P[(i - 1)*step + j + 1];
			PY[i*stepXY + j*(stepXY / step)] = abs(gradY);

			double gradX = P[(i - 1)*step + j + 1] + P[i*step + j + 1] * 2 + P[(i + 1)*step + j + 1] - P[(i - 1)*step + j - 1] - P[i*step + j - 1] * 2 - P[(i + 1)*step + j - 1];
			PX[i*stepXY + j*(stepXY / step)] = abs(gradX);
			if (gradX == 0)
			{
				gradX = 0.00000000000000001;  
			}
			pointDrection[index] = (atan(gradY / gradX)*180)/CV_PI;
			pointDrection[index] += 90;
			index++;
			
		}
	}
	
	convertScaleAbs(imageSobelX, imageSobelX);
	convertScaleAbs(imageSobelY, imageSobelY);
}

 求梯度圖的幅值

求得X、Y方向的梯度和梯度角之後再來計算X和Y方向融合的梯度幅值,計算公式為:

程式碼簡單的實現如下:
void SobelAmplitude(const Mat imageGradX, const Mat imageGradY, Mat &SobelAmpXY)
{
	SobelAmpXY = Mat::zeros(imageGradX.size(), CV_32FC1);
	for (int i = 0; i < SobelAmpXY.rows; i++)
	{
		for (int j = 0; j < SobelAmpXY.cols; j++)
		{
			SobelAmpXY.at<float>(i,j)= sqrt(imageGradX.at<uchar>(i, j)*imageGradX.at<uchar>(i, j) + imageGradY.at<uchar>(i, j)*imageGradY.at<uchar>(i, j));
		}
	}
	convertScaleAbs(SobelAmpXY, SobelAmpXY);
}
4. 非極大值抑制(邊緣細化) 求幅值影象進行非極大值抑制,可以進一步消除非邊緣的噪點,更重要的是,可以細化邊緣。 抑制邏輯是:沿著該點梯度方向,比較前後兩個點的幅值大小,若該點大於前後兩點,則保留,若該點小於前後兩點,則置為0 示意圖如下:
圖中四條虛線代表影象中每一點可能的梯度方向,沿著梯度方向與邊界的上下兩個交點,就是需要拿來與中心點點(X0,Y0)做比較的點。交點值的計算採用插值法計算,以黃色的虛線所代表的梯度角Θ為例,右上角交點處幅值為: (1-tanΘ)P(X0-1,Y0+1)+P(X0,Y0+1)*tanΘ=P(X0-1,Y0+1)+tanΘ*(P(X0,Y0+1)-P(X0-1,Y0+1)) 四種情況下需要分別計算,程式碼如下:
//非極大值抑制,採用插值法,計算插值點的畫素值
void LocalMaxValue(const Mat imageInput, Mat &imageOutput, double *pointDrection)
{
	//複製一張輸出的影象
	imageOutput = imageInput.clone();
	int k = 0;
	for (int i = 1; i < imageInput.rows - 1; i++)
	{
		for (int j = 1; j < imageInput.cols - 1; j++)
		{
			/*
			value00  value01  value02
			value10  value11  value12
			value20  value21  value22
			*/
			//求出每個點的畫素值
			int value00 = imageInput.at<uchar>(i - 1, j - 1);
			int value01 = imageInput.at<uchar>(i - 1, j);
			int value02 = imageInput.at<uchar>(i - 1, j + 1);
			int value10 = imageInput.at<uchar>(i , j - 1);
			int value11 = imageInput.at<uchar>(i , j);
			int value12 = imageInput.at<uchar>(i , j + 1);
			int value20 = imageInput.at<uchar>(i + 1, j - 1);
			int value21 = imageInput.at<uchar>(i + 1, j);
			int value22 = imageInput.at<uchar>(i + 1, j + 1);
			//如果梯度角在[0,45]度之間的話
			if (pointDrection[k] > 0 && pointDrection[k] <= 45)
			{
				if ((value11 <= (value12 + (value02 - value12)*tan(pointDrection[k]))) || (value11 <= (value10 + (value20 - value10)*tan(pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}

			//如果梯度角在[45,90]度之間的話
			if (pointDrection[k] > 45 && pointDrection[k] <= 90)
			{
				if ((value11 <= (value01 + (value02 - value01)*tan(pointDrection[k]))) || (value11 <= (value21 + (value20 - value21)*tan(pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}

			//如果梯度角在[90,135]度之間的話
			if (pointDrection[k] > 90 && pointDrection[k] <= 135)
			{
				if ((value11 <= (value01 + (value00 - value01)*tan(180-pointDrection[k]))) || (value11 <= (value21 + (value22 - value21)*tan(180-pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}

			//如果梯度角在[135,180]度之間的話
			if (pointDrection[k] > 135 && pointDrection[k] <= 180)
			{
				if ((value11 <= (value10 + (value00 - value10)*tan(180 - pointDrection[k]))) || (value11 <= (value12 + (value22 - value12)*tan(180 - pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}
			k++;
		}
	}
}

5. 雙閾值處理
雙閾值的機理是:

 指定一個低閾值A,一個高閾值B,一般取B為影象整體灰度級分佈的70%,且B為1.5到2倍大小的A;

灰度值大於B的,置為255,灰度值小於A的,置為0;

灰度值介於A和B之間的,考察改畫素點臨近的8畫素是否有灰度值為255的,若沒有255的,表示這是一個孤立的區域性極大值點,予以排除,置為0;若有255的,表示這是一個跟其他邊緣有“接壤”的可造之材,置為255,之後重複執行該步驟,直到考察完之後一個畫素點。

實現的程式碼如下:

void DoubleThreshold(Mat &imageIput, double lowThreshold, double highThreshold)
{
	for (int i = 0; i < imageIput.rows; i++)
	{
		for (int j = 0; j < imageIput.cols; j++)
		{
			if (imageIput.at<uchar>(i, j) > highThreshold)
			{
				imageIput.at<uchar>(i, j) = 255;
			}
			if (imageIput.at<uchar>(i, j) < lowThreshold)
			{
				imageIput.at<uchar>(i, j) = 0;
			}
		}
	}
}

6.  雙閾值中間畫素處理及邊緣連結
void DoubleThresholdLink(Mat &imageInput, double lowThreshold, double highThreshold)
{
	for (int i = 1; i < imageInput.rows - 1; i++)
	{
		for (int j = 1; j < imageInput.cols - 1; j++)
		{
			//處理在高低閾值之間的畫素的點
			if (imageInput.at<uchar>(i, j) > lowThreshold && imageInput.at<uchar>(i, j) < 255)
			{
				if (imageInput.at<uchar>(i - 1, j - 1) == 255 || imageInput.at<uchar>(i - 1, j) == 255
					|| imageInput.at<uchar>(i - 1, j + 1) == 255 || imageInput.at<uchar>(i, j - 1) == 255
					|| imageInput.at<uchar>(i, j + 1) == 255 || imageInput.at<uchar>(i + 1, j - 1) == 255
					|| imageInput.at<uchar>(i + 1, j) == 255 || imageInput.at<uchar>(i + 1, j + 1) == 255)
				{
					imageInput.at<uchar>(i, j) = 255;
					DoubleThresholdLink(imageInput, lowThreshold, highThreshold);//遞迴呼叫雙閾值連結函式進行連結
				}
				else
				{
					imageInput.at<uchar>(i, j) = 0;
				}
			}
		}
	}
}

經過這幾個步驟Canny邊緣檢測的程式碼就寫完了。

下面放上完整的C++程式碼:

#include "opencv2/imgproc/imgproc.hpp"  
#include "opencv2/highgui/highgui.hpp"  
#include <iostream>  
#include <cmath>
using namespace cv;
using namespace std;
/*
RGB轉換成灰度影象的一個常用公式是:
Gray = R*0.299 + G*0.587 + B*0.114
*/
//******************灰度轉換函式*************************  
//第一個引數image輸入的彩色RGB影象的引用;  
//第二個引數imageGray是轉換後輸出的灰度影象的引用;  
//*******************************************************
void ConvertRGB2GRAY(const Mat &image, Mat &imageGray);


//****************計算一維高斯的權值陣列*****************
//第一個引數size是代表的卷積核的邊長的大小
//第二個引數sigma表示的是sigma的大小
//*******************************************************
double *getOneGuassionArray(int size, double sigma);

//****************高斯濾波函式的實現*****************
//第一個引數srcImage是代表的輸入的原圖
//第二個引數dst表示的是輸出的圖
//第三個引數size表示的是卷積核的邊長的大小
//*******************************************************
void MyGaussianBlur(Mat &srcImage, Mat &dst, int size);


//******************Sobel卷積因子計算X、Y方向梯度和梯度方向角********************  
//第一個引數imageSourc原始灰度影象;  
//第二個引數imageSobelX是X方向梯度影象;  
//第三個引數imageSobelY是Y方向梯度影象;  
//第四個引數pointDrection是梯度方向角陣列指標  
//*************************************************************  
void SobelGradDirction(Mat &imageSource, Mat &imageSobelX, Mat &imageSobelY, double *&pointDrection);

//******************計算Sobel的X和Y方向梯度幅值*************************  
//第一個引數imageGradX是X方向梯度影象;  
//第二個引數imageGradY是Y方向梯度影象;  
//第三個引數SobelAmpXY是輸出的X、Y方向梯度影象幅值  
//*************************************************************  
void SobelAmplitude(const Mat imageGradX, const Mat imageGradY, Mat &SobelAmpXY);

//******************區域性極大值抑制*************************  
//第一個引數imageInput輸入的Sobel梯度影象;  
//第二個引數imageOutPut是輸出的區域性極大值抑制影象;  
//第三個引數pointDrection是影象上每個點的梯度方向陣列指標  
//*************************************************************  
void LocalMaxValue(const Mat imageInput, Mat &imageOutput, double *pointDrection);

//******************雙閾值處理*************************  
//第一個引數imageInput輸入和輸出的的Sobel梯度幅值影象;  
//第二個引數lowThreshold是低閾值  
//第三個引數highThreshold是高閾值  
//******************************************************  
void DoubleThreshold(Mat &imageIput, double lowThreshold, double highThreshold);

//******************雙閾值中間畫素連線處理*********************  
//第一個引數imageInput輸入和輸出的的Sobel梯度幅值影象;  
//第二個引數lowThreshold是低閾值  
//第三個引數highThreshold是高閾值  
//*************************************************************  
void DoubleThresholdLink(Mat &imageInput, double lowThreshold, double highThreshold);
int main()
{
	const Mat srcImage = imread("1.jpg");
	if (!srcImage.data)
	{
		printf("could not load image...\n");
		return -1;
	}
	imshow("srcImage", srcImage);
	Mat srcGray;
	ConvertRGB2GRAY(srcImage, srcGray);
	Mat GaussianRes;
	MyGaussianBlur(srcGray, GaussianRes, 3);
	Mat imageSobelX;
	Mat imageSobelY;
	double *pointDirection = new double[(GaussianRes.cols - 2)*(GaussianRes.rows - 2)];  //定義梯度方向角陣列 
	SobelGradDirction(GaussianRes, imageSobelX, imageSobelY, pointDirection);  //計算X、Y方向梯度和方向角 
	Mat imageSobleXY;
	SobelAmplitude(imageSobelX, imageSobelY, imageSobleXY);
	Mat localMaxImage;
	LocalMaxValue(imageSobleXY, localMaxImage, pointDirection);
	imshow("Non-Maximum Image", localMaxImage);
	DoubleThreshold(localMaxImage, 60, 100);
	imshow("DoubleThr", localMaxImage);
	DoubleThresholdLink(localMaxImage, 60, 100);
	imshow("Canny Image", localMaxImage);
	imshow("srcGray", srcGray);
	imshow("GaussianRes", GaussianRes);
	imshow("SobleX", imageSobelX);
	imshow("SobleY", imageSobelY);
	imshow("SobleXY", imageSobleXY);
	
	waitKey(0);
	return 0;
}

void ConvertRGB2GRAY(const Mat &image, Mat &imageGray)
{
	if (!image.data || image.channels() != 3)
	{
		return;
	}
	
	imageGray = Mat::zeros(image.size(), CV_8UC1);
	
	uchar *pointImage = image.data;
	uchar *pointImageGray = imageGray.data;
	
	size_t stepImage = image.step;
	size_t stepImageGray = imageGray.step;
	for (int i = 0; i < imageGray.rows; i++)
	{
		for (int j = 0; j < imageGray.cols; j++)
		{
			pointImageGray[i*stepImageGray + j] = (uchar)(0.114*pointImage[i*stepImage + 3 * j] + 0.587*pointImage[i*stepImage + 3 * j + 1] + 0.299*pointImage[i*stepImage + 3 * j + 2]);
		}
	}
}





double *getOneGuassionArray(int size, double sigma)
{
	double sum = 0.0;
	
	int kerR = size / 2;

	
	double *arr = new double[size];
	for (int i = 0; i < size; i++)
	{

		
		arr[i] = exp(-((i - kerR)*(i - kerR)) / (2 * sigma*sigma));
		sum += arr[i];//將所有的值進行相加

	}
	
	for (int i = 0; i < size; i++)
	{
		arr[i] /= sum;
		cout << arr[i] << endl;
	}
	return arr;
}

void MyGaussianBlur(Mat &srcImage, Mat &dst, int size)
{
	CV_Assert(srcImage.channels() == 1 || srcImage.channels() == 3); // 只處理單通道或者三通道影象
	int kerR = size / 2;
	dst = srcImage.clone();
	int channels = dst.channels();
	double* arr;
	arr = getOneGuassionArray(size, 1);//先求出高斯陣列

									   
	for (int i = kerR; i < dst.rows - kerR; i++)
	{
		for (int j = kerR; j < dst.cols - kerR; j++)
		{
			double GuassionSum[3] = { 0 };
			
			for (int k = -kerR; k <= kerR; k++)
			{

				if (channels == 1)//如果只是單通道
				{
					GuassionSum[0] += arr[kerR + k] * dst.at<uchar>(i, j + k);//行不變,列變換,先做水平方向的卷積
				}
				else if (channels == 3)//如果是三通道的情況
				{
					Vec3b bgr = dst.at<Vec3b>(i, j + k);
					auto a = arr[kerR + k];
					GuassionSum[0] += a*bgr[0];
					GuassionSum[1] += a*bgr[1];
					GuassionSum[2] += a*bgr[2];
				}
			}
			for (int k = 0; k < channels; k++)
			{
				if (GuassionSum[k] < 0)
					GuassionSum[k] = 0;
				else if (GuassionSum[k] > 255)
					GuassionSum[k] = 255;
			}
			if (channels == 1)
				dst.at<uchar>(i, j) = static_cast<uchar>(GuassionSum[0]);
			else if (channels == 3)
			{
				Vec3b bgr = { static_cast<uchar>(GuassionSum[0]), static_cast<uchar>(GuassionSum[1]), static_cast<uchar>(GuassionSum[2]) };
				dst.at<Vec3b>(i, j) = bgr;
			}

		}
	}

	
	for (int i = kerR; i < dst.rows - kerR; i++)
	{
		for (int j = kerR; j < dst.cols - kerR; j++)
		{
			double GuassionSum[3] = { 0 };
			//滑窗搜尋完成高斯核平滑
			for (int k = -kerR; k <= kerR; k++)
			{

				if (channels == 1)//如果只是單通道
				{
					GuassionSum[0] += arr[kerR + k] * dst.at<uchar>(i + k, j);//行變,列不換,再做豎直方向的卷積
				}
				else if (channels == 3)//如果是三通道的情況
				{
					Vec3b bgr = dst.at<Vec3b>(i + k, j);
					auto a = arr[kerR + k];
					GuassionSum[0] += a*bgr[0];
					GuassionSum[1] += a*bgr[1];
					GuassionSum[2] += a*bgr[2];
				}
			}
			for (int k = 0; k < channels; k++)
			{
				if (GuassionSum[k] < 0)
					GuassionSum[k] = 0;
				else if (GuassionSum[k] > 255)
					GuassionSum[k] = 255;
			}
			if (channels == 1)
				dst.at<uchar>(i, j) = static_cast<uchar>(GuassionSum[0]);
			else if (channels == 3)
			{
				Vec3b bgr = { static_cast<uchar>(GuassionSum[0]), static_cast<uchar>(GuassionSum[1]), static_cast<uchar>(GuassionSum[2]) };
				dst.at<Vec3b>(i, j) = bgr;
			}

		}
	}
	delete[] arr;
}



void SobelGradDirction(Mat &imageSource, Mat &imageSobelX, Mat &imageSobelY, double *&pointDrection)
{
	
	pointDrection = new double[(imageSource.rows - 2)*(imageSource.cols - 2)];
	
	for (int i = 0; i < (imageSource.rows - 2)*(imageSource.cols - 2); i++)
	{
		pointDrection[i] = 0;
	}
	imageSobelX = Mat::zeros(imageSource.size(), CV_32SC1);
	imageSobelY = Mat::zeros(imageSource.size(), CV_32SC1);
	
	uchar *P = imageSource.data;
	uchar *PX = imageSobelX.data;
	uchar *PY = imageSobelY.data;

	//取出每行所佔據的位元組數
	int step = imageSource.step;
	int stepXY = imageSobelX.step;

	int index = 0;//梯度方向角的索引
	for (int i = 1; i < imageSource.rows - 1; ++i)
	{
		for (int j = 1; j < imageSource.cols - 1; ++j)
		{
			//通過指標遍歷影象上每一個畫素   
			double gradY = P[(i + 1)*step + j - 1] + P[(i + 1)*step + j] * 2 + P[(i + 1)*step + j + 1] - P[(i - 1)*step + j - 1] - P[(i - 1)*step + j] * 2 - P[(i - 1)*step + j + 1];
			PY[i*stepXY + j*(stepXY / step)] = abs(gradY);

			double gradX = P[(i - 1)*step + j + 1] + P[i*step + j + 1] * 2 + P[(i + 1)*step + j + 1] - P[(i - 1)*step + j - 1] - P[i*step + j - 1] * 2 - P[(i + 1)*step + j - 1];
			PX[i*stepXY + j*(stepXY / step)] = abs(gradX);
			if (gradX == 0)
			{
				gradX = 0.00000000000000001;  //防止除數為0異常  
			}
			pointDrection[index] = (atan(gradY / gradX)*180)/CV_PI;//弧度轉換為度 角度的範圍是[-90,90] 
			pointDrection[index] += 90;//將角度的範圍轉換為[0,180],便於計算
			index++;
			
		}
	}
	
	convertScaleAbs(imageSobelX, imageSobelX);
	convertScaleAbs(imageSobelY, imageSobelY);
}


void SobelAmplitude(const Mat imageGradX, const Mat imageGradY, Mat &SobelAmpXY)
{
	SobelAmpXY = Mat::zeros(imageGradX.size(), CV_32FC1);
	for (int i = 0; i < SobelAmpXY.rows; i++)
	{
		for (int j = 0; j < SobelAmpXY.cols; j++)
		{
			SobelAmpXY.at<float>(i,j)= sqrt(imageGradX.at<uchar>(i, j)*imageGradX.at<uchar>(i, j) + imageGradY.at<uchar>(i, j)*imageGradY.at<uchar>(i, j));
		}
	}
	convertScaleAbs(SobelAmpXY, SobelAmpXY);
}



void LocalMaxValue(const Mat imageInput, Mat &imageOutput, double *pointDrection)
{
	//複製一張輸出的影象
	imageOutput = imageInput.clone();
	int k = 0;
	for (int i = 1; i < imageInput.rows - 1; i++)
	{
		for (int j = 1; j < imageInput.cols - 1; j++)
		{
			/*
			value00  value01  value02
			value10  value11  value12
			value20  value21  value22
			*/
			//求出每個點的畫素值
			int value00 = imageInput.at<uchar>(i - 1, j - 1);
			int value01 = imageInput.at<uchar>(i - 1, j);
			int value02 = imageInput.at<uchar>(i - 1, j + 1);
			int value10 = imageInput.at<uchar>(i , j - 1);
			int value11 = imageInput.at<uchar>(i , j);
			int value12 = imageInput.at<uchar>(i , j + 1);
			int value20 = imageInput.at<uchar>(i + 1, j - 1);
			int value21 = imageInput.at<uchar>(i + 1, j);
			int value22 = imageInput.at<uchar>(i + 1, j + 1);
			//如果梯度角在[0,45]度之間的話
			if (pointDrection[k] > 0 && pointDrection[k] <= 45)
			{
				if ((value11 <= (value12 + (value02 - value12)*tan(pointDrection[k]))) || (value11 <= (value10 + (value20 - value10)*tan(pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}

			//如果梯度角在[45,90]度之間的話
			if (pointDrection[k] > 45 && pointDrection[k] <= 90)
			{
				if ((value11 <= (value01 + (value02 - value01)*tan(pointDrection[k]))) || (value11 <= (value21 + (value20 - value21)*tan(pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}

			//如果梯度角在[90,135]度之間的話
			if (pointDrection[k] > 90 && pointDrection[k] <= 135)
			{
				if ((value11 <= (value01 + (value00 - value01)*tan(180-pointDrection[k]))) || (value11 <= (value21 + (value22 - value21)*tan(180-pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}

			//如果梯度角在[135,180]度之間的話
			if (pointDrection[k] > 135 && pointDrection[k] <= 180)
			{
				if ((value11 <= (value10 + (value00 - value10)*tan(180 - pointDrection[k]))) || (value11 <= (value12 + (value22 - value12)*tan(180 - pointDrection[k]))))
				{
					imageOutput.at<uchar>(i, j) = 0;
				}
			}
			k++;
		}
	}
}

void DoubleThreshold(Mat &imageIput, double lowThreshold, double highThreshold)
{
	for (int i = 0; i < imageIput.rows; i++)
	{
		for (int j = 0; j < imageIput.cols; j++)
		{
			if (imageIput.at<uchar>(i, j) > highThreshold)
			{
				imageIput.at<uchar>(i, j) = 255;
			}
			if (imageIput.at<uchar>(i, j) < lowThreshold)
			{
				imageIput.at<uchar>(i, j) = 0;
			}
		}
	}
}

 
void DoubleThresholdLink(Mat &imageInput, double lowThreshold, double highThreshold)
{
	for (int i = 1; i < imageInput.rows - 1; i++)
	{
		for (int j = 1; j < imageInput.cols - 1; j++)
		{
			//處理在高低閾值之間的畫素的點
			if (imageInput.at<uchar>(i, j) > lowThreshold && imageInput.at<uchar>(i, j) < 255)
			{
				if (imageInput.at<uchar>(i - 1, j - 1) == 255 || imageInput.at<uchar>(i - 1, j) == 255
					|| imageInput.at<uchar>(i - 1, j + 1) == 255 || imageInput.at<uchar>(i, j - 1) == 255
					|| imageInput.at<uchar>(i, j + 1) == 255 || imageInput.at<uchar>(i + 1, j - 1) == 255
					|| imageInput.at<uchar>(i + 1, j) == 255 || imageInput.at<uchar>(i + 1, j + 1) == 255)
				{
					imageInput.at<uchar>(i, j) = 255;
					DoubleThresholdLink(imageInput, lowThreshold, highThreshold);//遞迴呼叫雙閾值連結函式進行連結
				}
				else
				{
					imageInput.at<uchar>(i, j) = 0;
				}
			}
		}
	}
}

下面放上原圖和各個步驟的效果圖:

原圖:


原圖的灰度影象:


高斯模糊過的影象:


X方向的Soble圖:


Y方向的Soble圖:


XY方向的Soble圖:


非極大值抑制圖:


雙閾值處理圖:


最後的Canny效果圖: