Opencv Python實現兩幅圖像匹配
本文實例為大傢分享瞭Opencv Python實現兩幅圖像匹配的具體代碼,供大傢參考,具體內容如下
原圖
import cv2 img1 = cv2.imread('SURF_2.jpg', cv2.IMREAD_GRAYSCALE) img1 = cv2.resize(img1,dsize=(600,400)) img2 = cv2.imread('SURF_1.jpg', cv2.IMREAD_GRAYSCALE) img2 = cv2.resize(img2,dsize=(600,400)) image1 = img1.copy() image2 = img2.copy() #創建一個SURF對象 surf = cv2.xfeatures2d.SURF_create(25000) #SIFT對象會使用Hessian算法檢測關鍵點,並且對每個關鍵點周圍的區域計算特征向量。該函數返回關鍵點的信息和描述符 keypoints1,descriptor1 = surf.detectAndCompute(image1,None) keypoints2,descriptor2 = surf.detectAndCompute(image2,None) # print('descriptor1:',descriptor1.shape(),'descriptor2',descriptor2.shape()) #在圖像上繪制關鍵點 image1 = cv2.drawKeypoints(image=image1,keypoints = keypoints1,outImage=image1,color=(255,0,255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) image2 = cv2.drawKeypoints(image=image2,keypoints = keypoints2,outImage=image2,color=(255,0,255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) #顯示圖像 cv2.imshow('surf_keypoints1',image1) cv2.imshow('surf_keypoints2',image2) cv2.waitKey(20) matcher = cv2.FlannBasedMatcher() matchePoints = matcher.match(descriptor1,descriptor2) # print(type(matchePoints),len(matchePoints),matchePoints[0]) #提取強匹配特征點 minMatch = 1 maxMatch = 0 for i in range(len(matchePoints)): if minMatch > matchePoints[i].distance: minMatch = matchePoints[i].distance if maxMatch < matchePoints[i].distance: maxMatch = matchePoints[i].distance print('最佳匹配值是:',minMatch) print('最差匹配值是:',maxMatch) #獲取排雷在前邊的幾個最優匹配結果 goodMatchePoints = [] for i in range(len(matchePoints)): if matchePoints[i].distance < minMatch + (maxMatch-minMatch)/16: goodMatchePoints.append(matchePoints[i]) #繪制最優匹配點 outImg = None outImg = cv2.drawMatches(img1,keypoints1,img2,keypoints2,goodMatchePoints,outImg, matchColor=(0,255,0),flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT) cv2.imshow('matche',outImg) cv2.waitKey(0) cv2.destroyAllWindows()
原圖
#coding=utf-8 import cv2 from matplotlib import pyplot as plt img=cv2.imread('xfeatures2d.SURF_create2.jpg',0) # surf=cv2.SURF(400) #Hessian閾值400 # kp,des=surf.detectAndCompute(img,None) # leng=len(kp) # print(leng) # 關鍵點太多,重取閾值 surf=cv2.cv2.xfeatures2d.SURF_create(50000) #Hessian閾值50000 kp,des=surf.detectAndCompute(img,None) leng=len(kp) print(leng) img2=cv2.drawKeypoints(img,kp,None,(255,0,0),4) plt.imshow(img2) plt.show() # 下面是U-SURF算法,關鍵點朝向一致,運算速度加快。 surf.upright=True kp=surf.detect(img,None) img3=cv2.drawKeypoints(img,kp,None,(255,0,0),4) plt.imshow(img3) plt.show() #檢測關鍵點描述符大小,改64維成128維 surf.extended=True kp,des=surf.detectAndCompute(img,None) dem1=surf.descriptorSize() print(dem1) shp1=des.shape() print(shp1)
效果圖
import cv2 from matplotlib import pyplot as plt leftImage = cv2.imread('xfeatures2d.SURF_create_1.jpg') rightImage = cv2.imread('xfeatures2d.SURF_create_2.jpg') # 創造sift sift = cv2.xfeatures2d.SIFT_create() kp1, des1 = sift.detectAndCompute(leftImage, None) kp2, des2 = sift.detectAndCompute(rightImage, None) # 返回關鍵點信息和描述符 FLANN_INDEX_KDTREE = 0 indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) searchParams = dict(checks=50) # 指定索引樹要被遍歷的次數 flann = cv2.FlannBasedMatcher(indexParams, searchParams) matches = flann.knnMatch(des1, des2, k=2) matchesMask = [[0, 0] for i in range(len(matches))] print("matches", matches[0]) for i, (m, n) in enumerate(matches): if m.distance < 0.07 * n.distance: matchesMask[i] = [1, 0] drawParams = dict(matchColor=(0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2) # flag=2隻畫出匹配點,flag=0把所有的點都畫出 resultImage = cv2.drawMatchesKnn(leftImage, kp1, rightImage, kp2, matches, None, **drawParams) plt.imshow(resultImage) plt.show()
以上就是本文的全部內容,希望對大傢的學習有所幫助,也希望大傢多多支持WalkonNet。
推薦閱讀:
- Python OpenCV中的drawMatches()關鍵匹配繪制方法
- OpenCV基於ORB算法實現角點檢測
- 基於Python和openCV實現圖像的全景拼接詳細步驟
- OpenCV-Python 實現兩張圖片自動拼接成全景圖
- Python OpenCV學習之特征點檢測與匹配詳解