1. 程式人生 > >OpenCv的安裝,配置和基本使用

OpenCv的安裝,配置和基本使用

影象的讀取和寫入
#讀入影象
img = cv2.imread('E:\MachineLearning\image\img1.jpg')

#列印影象尺寸
h,w = img.shape[:2]
print(h,w)

#儲存原jpg格式為png格式
cv2.imwrite('E:\MachineLearning\image\img3.png',img)
顯示影象和結果
#新增字型支援
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\SimSun.ttc",size = 14)

#讀入影象
img = cv2.imread('E:\MachineLearning\image\img1.jpg')

#轉換顏色空間
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)#灰度圖

#顯示積分影象
fig = plt.figure()
subplot(121)
plt.gray()
imshow(gray)
title(u'灰度圖', fontproperties= font)
axis('off')

#計算積分影象
intim = cv2.integral(gray)

#歸一化
intim = (255.0*intim)

#顯示積分影象
subplot(122)
plt.gray()
imshow(intim)
title(u'積分圖', fontproperties= font)
axis('off')
show()

#用opencv顯示影象
#cv2.imshow("Image", gray)
#cv2.waitKey()
* 灰度圖![Image Text](https://raw.github.com/wangyufei1006/Java-Design-patterns/master/Image/40.png)* 積分圖![Image Text](https://raw.github.com/wangyufei1006/Java-Design-patterns/master/Image/43.png)
從種子畫素開始應用泛洪(漫水)填充
#新增字型支援
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\SimSun.ttc",size = 14)

#讀入影象
img = cv2.imread('E:\MachineLearning\image\img1.jpg')

#轉換顏色空間
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

#顯示原圖
fig = plt.figure()
subplot(121)
plt.gray()
imshow(rgb)
title(u'原圖',fontProperties = font)
axis('off')

#獲取影象尺寸
h,w = img.shape[:2]
print(h,w)

#泛洪填充
diff = (6,6,6)
mask = zeros((h+2,w+2),numpy.uint8)
cv2.floodFill(img,mask,(10,10),(255,255,0),diff,diff)

#顯示泛洪填充後的結果
subplot(122)
imshow(img)
title(u'泛洪填充',fontproperties = font)
axis('off')
show()

cv2.namedWindow('flood fill',0)
cv2.imshow('flood fill',img)
cv2.waitKey()
cv2.imwrite('E:\MachineLearning\image\img10.jpg',rgb)
cv2.imwrite('E:\MachineLearning\image\img11.jpg',img)
* 泛洪圖![Image Text](https://raw.github.com/wangyufei1006/Java-Design-patterns/master/Image/41.png)提取影象的SURF(加速穩健特徵)特徵
#讀入影象
img = cv2.imread('E:\MachineLearning\image\img1.jpg')

#下采樣
im_lowers = cv2.pyrDown(img)

#轉化為灰度影象
gray = cv2.cvtColor(im_lowers,cv2.COLOR_BGR2GRAY)

#檢測特徵點
s = cv2.xfeatures2d.SURF_create()
mask = numpy.uint8(ones(gray.shape))
keypoints = s.detect(gray,mask)

#顯示影象及特徵點
vis = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR)
for k in keypoints[::10]:
    cv2.circle(vis,(int(k.pt[0]),int(k.pt[1])),2,(0,255,0),-1)
    cv2.circle(vis,(int(k.pt[0]),int(k.pt[1])),int(k.size),(0,255,0),2)
print(vis.shape)
print(img.shape)
imshow(vis)
show()
#cv2.namedWindow('local descriptors',0)
#cv2.imshow('local descriptors',vis)
#cv2.waitKey()
#用pyrDown下采樣,得到的一幅尺寸是原影象尺寸一半的降取樣影象,即im_lowres,然後將影象轉換為灰度影象,並將它傳遞給SURF關鍵點檢測物件
cv2.imwrite('E:\MachineLearning\image\img12.jpg',vis)
* 特徵提取圖![Image Text](https://raw.github.com/wangyufei1006/Java-Design-patterns/master/Image/42.png)
視訊處理
cap = cv2.VideoCapture(0)
while True:
    ret,img = cap.read()
    #blur = cv2.GaussianBlur(img,(0,0),10)#進行模糊處理
    cv2.imshow('video test',img)
    key = cv2.waitKey(10)
    if key == 27:
        break
    if key == ord(' '):
        cv2.imwrite('E:\MachineLearning\image\vid_result.jpg',img)
```
# 讀取視訊到numpy陣列
```python
cap = cv2.VideoCapture(0)
frames = []

while True:
    ret,img = cap.read()
    cv2.imshow('video',img)
    frames.append(img)
    if cv2.waitKey(10) == 27:
        break
frames = array(frames)
print(img.shape)
print(frames.shape)#幀數,高度,寬度,3