uz_assignments/assignment3/solution.py

314 lines
12 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import numpy as np
import numpy.typing as npt
from matplotlib import pyplot as plt
import cv2
import uz_framework.image as uz_image
import uz_framework.text as uz_text
##############################################
# EXCERCISE 1: Exercise 1: Image derivatives #
##############################################
def ex1():
#one_a()
#one_b()
#one_c()
one_d()
def one_a() -> None:
"""
Follow the equations above and derive the equations used to compute first and
second derivatives with respect to y: Iy(x, y), Iyy(x, y), as well as the mixed derivative
Ixy(x, y)
"""
def one_b() -> None:
"""
Implement a function that computes the derivative of a 1-D Gaussian kernel
Implement the function gaussdx(sigma) that works the same as function gauss
from the previous assignment. Dont forget to normalize the kernel. Be careful as
the derivative is an odd function, so a simple sum will not do. Instead normalize the
kernel by dividing the values such that the sum of absolute values is 1. Effectively,
you have to divide each value by sum(abs(gx(x))).
"""
sigmas = [0.5, 1, 2]
for sigma in sigmas:
kernel = uz_image.gaussdx(sigma)
print(kernel)
def one_c() -> None:
"""
The properties of the filter can be analyzed by using an impulse response function.
This is performed as a convolution of the filter with a Dirac delta function. The
discrete version of the Dirac function is constructed as a finite image that has all
elements set to 0 except the central element, which is set to a high value (e.g. 1).
Generate a 1-D Gaussian kernel G and a Gaussian derivative kernel D.
What happens if you apply the following operations to the impulse image?
(a) First convolution with G and then convolution with GT
(b) First convolution with G and then convolution with DT
(c) First convolution with D and then convolution with GT
(d) First convolution with GT and then convolution with D.
(e) First convolution with DT and then convolution with G.
Is the order of operations important? Display the images of the impulse responses
for different combinations of operations.
"""
impulse = uz_image.generate_dirac_impulse(50)
gauss = np.array([uz_image.get_gaussian_kernel(3)])
gaussdx = np.array([uz_image.gaussdx(3)])
# Becouse CV2 applies the correlation instead of convolution, we need to flip the kernels
gauss = np.flip(gauss, axis=1)
gaussdx = np.flip(gaussdx, axis=1)
fig, axs = plt.subplots(2, 3)
# Plot impulse only
axs[0, 0].imshow(impulse, cmap='gray')
axs[0, 0].set_title('Impulse')
# Plot impulse after convolution with G and GT
g_gt_impulse = impulse.copy()
g_gt_impulse = cv2.filter2D(g_gt_impulse, cv2.CV_64F, gauss)
g_gt_impulse = cv2.filter2D(g_gt_impulse, cv2.CV_64F, gauss.T)
axs[1, 0].imshow(g_gt_impulse, cmap='gray')
axs[1, 0].set_title('impulse * G * GT')
# Plot impulse after convolution with G and DT
g_dt_impulse = impulse.copy()
g_dt_impulse = cv2.filter2D(g_dt_impulse, cv2.CV_64F, gauss)
g_dt_impulse = cv2.filter2D(g_dt_impulse, cv2.CV_64F, gaussdx.T)
axs[0, 1].imshow(g_dt_impulse, cmap='gray')
axs[0, 1].set_title('impulse * G * DT')
# Plot impulse after convolution with D and GT
d_gt_impulse = impulse.copy()
d_gt_impulse = cv2.filter2D(d_gt_impulse, cv2.CV_64F, gaussdx)
d_gt_impulse = cv2.filter2D(d_gt_impulse, cv2.CV_64F, gauss.T)
axs[0, 2].imshow(d_gt_impulse, cmap='gray')
axs[0, 2].set_title('impulse * D * GT')
# Plot impulse after convolution with GT and D
gt_d_impulse = impulse.copy()
gt_d_impulse = cv2.filter2D(gt_d_impulse, cv2.CV_64F, gauss.T)
gt_d_impulse = cv2.filter2D(gt_d_impulse, cv2.CV_64F, gaussdx)
axs[1, 1].imshow(gt_d_impulse, cmap='gray')
axs[1, 1].set_title('impulse * GT * D')
# Plot impulse after convolution with DT and G
dt_g_impulse = impulse.copy()
dt_g_impulse = cv2.filter2D(dt_g_impulse, cv2.CV_64F, gaussdx.T)
dt_g_impulse = cv2.filter2D(dt_g_impulse, cv2.CV_64F, gauss)
axs[1, 2].imshow(dt_g_impulse, cmap='gray')
axs[1, 2].set_title('impulse * DT * G')
plt.show()
def one_d() -> None:
"""
Implement a function that uses functions gauss and gaussdx to compute both
partial derivatives of a given image with respect to x and with respect to y.
Similarly, implement a function that returns partial second order derivatives of a
given image.
Additionally, implement the function gradient_magnitude that accepts a grayscale
image I and returns both derivative magnitudes and derivative angles. Magnitude
is calculated as m(x, y) = sqrt(Ix(x,y)^2 + Iy(x, y)^2) and angles are calculated as
φ(x, y) = arctan(Iy(x, y)/Ix(x, y))
Hint: Use function np.arctan2 to avoid division by zero for calculating the arctangent function.
Use all the implemented functions on the same image and display the results in the
same window.
"""
museum = uz_image.imread_gray('./images/museum.jpg', uz_image.ImageType.float64)
museum_x, museum_y = uz_image.derive_image_first_order(museum, 1)
(museum_xx, museum_xy) , (_, museum_yy) = uz_image.derive_image_second_order(museum, 1)
derivative_magnitude, derivative_angle = uz_image.gradient_magnitude(museum, 1)
fig, axs = plt.subplots(2, 4)
fig.suptitle('Museum')
axs[0,0].imshow(museum, cmap='gray')
axs[0,0].set_title('Original')
axs[0, 1].imshow(museum_x, cmap='gray')
axs[0, 1].set_title('I_x')
axs[0, 2].imshow(museum_y, cmap='gray')
axs[0, 2].set_title('I_y')
axs[1, 0].imshow(museum_xx, cmap='gray')
axs[1, 0].set_title('I_xx')
axs[1, 1].imshow(museum_xy, cmap='gray')
axs[1, 1].set_title('I_xy')
axs[1, 2].imshow(museum_yy, cmap='gray')
axs[1, 2].set_title('I_yy')
axs[0, 3].imshow(derivative_magnitude, cmap='gray')
axs[0, 3].set_title('I_mag')
axs[1, 3].imshow(derivative_angle, cmap='gray')
axs[1, 3].set_title('I_dir')
plt.show()
############################################
# EXCERCISE 2: Exercise 1: Edges in images #
############################################
def ex2():
#two_a()
two_b()
two_c()
def two_a():
"""
Firstly, create a function findedges that accepts an image I, and the parameters
sigma and theta.
The function should create a binary matrix Ie that only keeps pixels higher than
threshold theta:
Ie(x, y) =
1 ; Imag(x, y) ≥ ϑ
0 ; otherwise (6)
Test the function with the image museum.png and display the results for different
values of the parameter theta. Can you set the parameter so that all the edges in
the image are clearly visible?
"""
SIGMA = 0.2
THETA = 0.16
museum = uz_image.imread_gray('./images/museum.jpg', uz_image.ImageType.float64)
museum_edges = uz_image.find_edges_primitive(museum, SIGMA, THETA)
plt.imshow(museum_edges, cmap='gray')
plt.show()
def two_b():
"""
Using magnitude produces only a first approximation of detected edges. Unfortunately,
these are often wide and we would like to only return edges one pixel wide.
Therefore, you will implement non-maxima suppression based on the image derivative magnitudes and angles.
Iterate through all the pixels and for each search its
8-neighborhood. Check the neighboring pixels parallel to the gradient direction and
set the current pixel to 0 if it is not the largest in the neighborhood (based on
derivative magnitude). You only need to compute the comparison to actual pixels,
interpolating to more accuracy is not required.
"""
SIGMA = 1
THETA = 0.01
museum = uz_image.imread_gray('./images/museum.jpg', uz_image.ImageType.float64)
museum_edges = uz_image.find_edges_nms(museum, SIGMA, THETA)
plt.imshow(museum_edges, cmap='gray')
plt.show()
def two_c():
"""
The final step of Cannys algorithm is edge tracking by hysteresis.
Add the final step after performing non-maxima suppression along edges. Hysteresis
uses two thresholds tlow < thigh, keeps all pixels above thigh and discards all pixels
below tlow. The pixels between the thresholds are kept only if they are connected to
a pixel above thigh.
Hint: Since we are looking for connected components containing at least one pixel
above thigh, you could use something like cv2.connectedComponentsWithStats to
extract them. Try to avoid explicit for loops as much as possible
"""
SIGMA = 1
THETA = 0.02
T_LOW = 0.04
T_HIGH = 0.16
museum = uz_image.imread_gray('./images/museum.jpg', uz_image.ImageType.float64)
connected = uz_image.find_edges_canny(museum, SIGMA, THETA, T_LOW, T_HIGH)
plt.imshow(connected, cmap='gray')
plt.show()
############################################
# EXCERCISE 2: Exercise 1: Edges in images #
############################################
def ex3():
#three_a()
three_b()
#three_c()
def three_a():
"""
Create an accumulator array defined by the resolution on ρ and ϑ values. Calculate the
sinusoid that represents all the lines that pass through some nonzero point.
Increment the corresponding cells in the accumulator array. Experiment with different
positions of the nonzero point to see how the sinusoid changes. You can set
the number of accumulator bins on each axis to 300 to begin with.
"""
x_y_values = np.array([[10, 10], [30, 60], [50, 20], [80, 90]])
fig, axs = plt.subplots(2, 2)
fig.suptitle('Trasformation of points into hugh space')
for i in range(0, 4):
accumulator = uz_image.hough_transform_a_point(x_y_values[i][0], x_y_values[i][1], 300)
axs[i // 2, i % 2].imshow(accumulator)
axs[i // 2, i % 2].set_title(f'x = {x_y_values[i][0]}, y = {x_y_values[i][1]}')
plt.show()
def three_b():
"""
Implement the function hough_find_lines that accepts a binary image, the number
of bins for ϑ and ρ (allow the possibility of them being different) and a threshold.
Create an accumulator matrix A for the parameter space (ρ, ϑ). Parameter ϑ is
defined in the interval from −π/2 to π/2, ρ is defined on the interval from D to
D, where D is the length of the image diagonal. For each nonzero pixel in the image,
generate a curve in the (ρ, ϑ) space by using the equation (7) for all possible values
of ϑ and increase the corresponding cells in A. Display the accumulator matrix. Test
the method on your own synthetic images ((e.g. 100 × 100 black image, with two
white pixels at (10, 10) and (10, 20)).
Finally, test your function on two synthetic images oneline.png and rectangle.png. First, you should obtain an edge map for each image using either your
function findedges or some inbuilt function. Run your implementation of the Hough
algorithm on the resulting edge maps.
"""
SIGMA = 1
THETA = 0.02
T_LOW = 0.04
T_HIGH = 0.16
synthetic_image = np.zeros((100, 100))
synthetic_image[10, 10] = 1
synthetic_image[10, 20] = 1
oneline_image = uz_image.imread_gray('./images/oneline.png', uz_image.ImageType.float64)
rectangle_image = uz_image.imread_gray('./images/rectangle.png', uz_image.ImageType.float64)
oneline_image_edges = uz_image.find_edges_canny(oneline_image, SIGMA, THETA, T_LOW, T_HIGH)
rectangle_image_edges = uz_image.find_edges_canny(rectangle_image, SIGMA, THETA, T_LOW, T_HIGH)
fig, axs = plt.subplots(3, 3)
axs[0, 0].imshow(synthetic_image, cmap='gray')
axs[0, 0].set_title('Synthetic image')
axs[2, 0].imshow(uz_image.hough_find_lines(synthetic_image, 200, 200, 0.2))
axs[0, 1].imshow(oneline_image, cmap='gray')
axs[0, 1].set_title('Oneline image')
axs[1, 1].imshow(oneline_image_edges, cmap='gray')
axs[0, 2].imshow(rectangle_image, cmap='gray')
axs[0, 2].set_title('Rectangle image')
axs[1, 2].imshow(rectangle_image_edges, cmap='gray')
axs[1, 0].set_visible(False)
axs[2, 1].imshow(uz_image.hough_find_lines(oneline_image_edges, 200, 200, 0.2))
axs[2, 2].imshow(uz_image.hough_find_lines(rectangle_image_edges, 200, 200, 0.2))
plt.show()
# ######## #
# SOLUTION #
# ######## #
def main():
#ex1()
#ex2()
ex3()
if __name__ == '__main__':
main()