'Show only large differences between images of slightly different scale
I am trying to show differences between 2 images, mainly those within a "cloud" outline as shown in the new image below. The "cloud" outline represents changes that have been made to the reference image. Some info has been blanked out due to privacy reasons, do let me know if you require any more information!
Reference Image: reference
New Image: new
The scale of both images is a tad bit off, so I used cv2.homography but was unable to get it aligned properly
Aligned Image: aligned new
Here's the output image from my code: result
Image result I am trying to achieve: ideal result
To summarize, here are the problems I am trying to solve
- To align 2 images of slightly different scale
- To highlight differences, typically within a cloud outline
- Merge small differences of close proximity into a larger boundary
My code is based on the response of user fmw42 in this thread with some minor changes: Find Differences Between Images with OpenCV Python
Any help is greatly appreciated!
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.1
def alignImages(imNew, imRef):
# im2 is reference and im1 is to be warped to match im2
# note: numbering is swapped in function
# Convert images to grayscale
imNew_gray = cv2.cvtColor(imNew, cv2.COLOR_BGR2GRAY)
imRef_gray = cv2.cvtColor(imRef, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(imNew_gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(imRef_gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = list(matcher.match(descriptors1, descriptors2, None))
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(imNew, keypoints1, imRef, keypoints2, matches, None)
cv2.imwrite("matches.png", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = imRef.shape
imReg = cv2.warpPerspective(imNew, h, (width, height))
return imReg, h
# Read reference image
refFilename = "SD5021-.png"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename)
hh, ww = imReference.shape[:2]
# Read image to be aligned
imFilename = "SD5021A.png"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename)
# Aligned image will be stored in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Print estimated homography
print("Estimated homography : \n", h)
# Convert images to grayscale
ref_gray = cv2.cvtColor(imReference, cv2.COLOR_BGR2GRAY)
im_gray = cv2.cvtColor(imReg, cv2.COLOR_BGR2GRAY)
# Otsu threshold
refThresh = cv2.threshold(ref_gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
imThresh = cv2.threshold(im_gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_OPEN, kernel, iterations=1)
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_CLOSE, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_CLOSE, kernel, iterations=1)
# get absolute difference between the two thresholded images
diff = np.abs(cv2.add(imThresh,-refThresh))
# apply morphology open to remove small regions caused by slight misalignment of the two images
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20))
diff_cleaned = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.uint8)
# Filter using contour area and draw bounding boxes that do not touch the sides of the image
cnts = cv2.findContours(diff_cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
result = im.copy()
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
if x>0 and y>0 and x+w<ww-1 and y+h<hh-1:
cv2.rectangle(result, (x, y), (x+w, y+h), (0, 0, 255), 2)
# save images
cv2.imwrite('new_aligned.jpg', imReg)
cv2.imwrite('diff.png', diff_cleaned)
cv2.imwrite('results.png', result)
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|
