LEtsgo obvezne done, cajt za the fun part

main
Spagnolo Gasper 2022-11-27 16:46:25 +01:00
parent 6af7bbb4b8
commit 1742437304
4 changed files with 106 additions and 8 deletions

BIN
assignment4/datam/img3.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 983 KiB

BIN
assignment4/datam/img4.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 990 KiB

View File

@ -53,8 +53,8 @@ def one_b() -> None:
plt.show()
def ex2():
#two_a()
two_b()
two_a()
#two_b()
def two_a() -> None:
"""
@ -91,7 +91,7 @@ def two_a() -> None:
def two_b() -> None:
"""
jjjjj
also as two_c as it has improved method included
"""
#graph_a_small = uz_image.imread_gray("datam/img1.jpg", uz_image.ImageType.float64)
#graph_b_small = uz_image.imread_gray("datam/img2.jpg", uz_image.ImageType.float64)
@ -107,6 +107,7 @@ def two_b() -> None:
def ex3():
three_a()
#three_b()
def three_a() -> None:
"""
@ -115,6 +116,7 @@ def three_a() -> None:
keypoints_path = ["data/newyork/newyork.txt", "data/graf/graf.txt"]
images_a_path = ["data/newyork/newyork_a.jpg", "data/graf/graf_a.jpg"]
images_b_path = ["data/newyork/newyork_b.jpg", "data/graf/graf_b.jpg"]
def map_keypoints(keypoints):
# Map the keypoints
a_points =[]
@ -150,7 +152,41 @@ def three_a() -> None:
axs[i*2+1, 1].imshow(img_output, cmap="gray")
axs[i*2+1, 1].set_title("B transformed")
plt.show()
def three_b() -> None:
"""
Hi
"""
image_a = uz_image.imread_gray("data/graf/graf_a.jpg", uz_image.ImageType.float64)
image_b = uz_image.imread_gray("data/graf/graf_b.jpg", uz_image.ImageType.float64)
#image_a = uz_image.imread_gray("datam/img1.jpg", uz_image.ImageType.float64)
#image_b = uz_image.imread_gray("datam/img2.jpg", uz_image.ImageType.float64)
#image_a = uz_image.imread_gray("data/newyork/newyork_a.jpg", uz_image.ImageType.float64)
#image_b = uz_image.imread_gray("data/newyork/newyork_b.jpg", uz_image.ImageType.float64)
# Does not work for newyork dataset, becouse the keypoints are not reciprocal
# Get the keypoints
keypoints_a, keypoints_b = uz_image.find_matches(image_a, image_b, sigma=3)
hm, best_inliers = uz_image.ransac(image_a,keypoints_a, image_b, keypoints_b)
# Plot the matches
def map_keypoints(keypoints):
# Map the keypoints
a_points =[]
b_points = []
for row in keypoints:
a_points.append((row[0], row[1]))
b_points.append((row[2], row[3]))
return np.array(a_points), np.array(b_points)
a,b = map_keypoints(best_inliers)
uz_image.display_matches(image_a, a, image_b, b)
# ######## #
# SOLUTION #
# ######## #

View File

@ -743,7 +743,6 @@ def hough_transform_a_circle(image: Union[npt.NDArray[np.float64] , npt.NDArray[
# Loop through all nonzero pixels above treshold
for i in tqdm(range(len(indices)), desc='Hough transform'):
for r in range(0, r_end - r_start):
print(r)
y, x = indices[i]
a = x - r * np.cos(ga[y, x])
b = y - r * np.sin(ga[y, x])
@ -1057,7 +1056,8 @@ def display_matches(I1, pts1, I2, pts2):
plt.show()
def find_matches(image_a: npt.NDArray[np.float64],
image_b: npt.NDArray[np.float64]):
image_b: npt.NDArray[np.float64],
sigma=6, treshold=1e-6):
"""
Finds matches between two images.
@ -1065,8 +1065,8 @@ def find_matches(image_a: npt.NDArray[np.float64],
"""
# Get the keypoints
_, image_a_keypoints = harris_detector(image_a, 3, treshold=1e-6)
_, image_b_keypoints = harris_detector(image_b, 3, treshold=1e-6)
_, image_a_keypoints = harris_detector(image_a, sigma=sigma, treshold=treshold)
_, image_b_keypoints = harris_detector(image_b, sigma=sigma, treshold=treshold)
print("[+] Keypoints detected")
@ -1160,4 +1160,66 @@ def estimate_homography(image_a: npt.NDArray[np.float64],
# Reshape to 3x3
H = h.reshape(3, 3)
return H
return H
def ransac(image_a: npt.NDArray[np.float64], correspondences_a: npt.NDArray[np.float64],
image_b: npt.NDArray[np.float64], correspondences_b: npt.NDArray[np.float64],
iterations: int = 10000,
threshold: float = 1.5):
"""
RANSAC algorithm for estimating homography.
"""
# Find the best homography
best_inliers = []
best_homography = []
for i in tqdm(range(iterations), desc='RANSAC'):
# Randomly sample 4 correspondences
sample = np.random.choice(correspondences_a.shape[0], 4, replace=False)
# correspondance_a[0] (x,y) -> correspondance_b[0] (x,y)
sample_a = correspondences_a[sample]
sample_b = correspondences_b[sample]
# Make it (x,y) -> (x,y) = x,y,x,y matrix
keypoints = np.concatenate((sample_a, sample_b), axis=1)
# Estimate homography
homography = estimate_homography(image_a, image_b, keypoints)
# Compute the inliers
inlier_indices = []
# Calculate the distance between the transformed points and the actual points
# and count the number of inliers
for i, correspondence in enumerate(zip(correspondences_a, correspondences_b)):
(x_r, y_r), (x_t, y_t) = correspondence
res = np.dot(homography, [x_r, y_r, 1])
# Make sure that res has last element 1
res = res / res[-1]
x_t_, y_t_ = res[:2]
# Compute the distance
distance = np.sqrt((x_t - x_t_)**2 + (y_t - y_t_)**2)
# Check if it is an inlier
if distance < threshold:
inlier_indices.append(i)
# Check if we have a new best homography
if len(inlier_indices) > 4:
homography_2 = estimate_homography(image_a, image_b, np.concatenate((correspondences_a[inlier_indices], correspondences_b[inlier_indices]), axis=1))
inlier_indices_2 = []
for i, correspondence in enumerate(zip(correspondences_a, correspondences_b)):
(x_r, y_r), (x_t, y_t) = correspondence
res = np.dot(homography_2, [x_r, y_r, 1])
# Make sure that res has last element 1
res = res / res[-1]
x_t_, y_t_ = res[:2]
# Compute the distance
distance = np.sqrt((x_t - x_t_)**2 + (y_t - y_t_)**2)
# Check if it is an inlier
if distance < threshold:
inlier_indices_2.append(i)
if len(inlier_indices_2) > len(best_inliers):
best_inliers = inlier_indices_2
best_homography = homography_2
best_keypoints = np.concatenate((correspondences_a[best_inliers], correspondences_b[best_inliers]), axis=1)
return best_homography.astype(np.float64), best_keypoints