Well something is something

main
Gasper Spagnolo 2022-11-13 15:13:43 +01:00
parent 182432917f
commit 3a899115a6
4 changed files with 591 additions and 0 deletions

82
assignment3/solution.py Normal file
View File

@ -0,0 +1,82 @@
import numpy as np
import numpy.typing as npt
from matplotlib import pyplot as plt
import cv2
import uz_framework.image as uz_image
import uz_framework.text as uz_text
#################################################################
# EXCERCISE 1: Exercise 1: Global approach to image description #
#################################################################
def ex1():
#one_a()
#two_b()
two_c()
def one_a() -> None:
"""
Follow the equations above and derive the equations used to compute first and
second derivatives with respect to y: Iy(x, y), Iyy(x, y), as well as the mixed derivative
Ixy(x, y)
"""
def two_b() -> None:
"""
Implement a function that computes the derivative of a 1-D Gaussian kernel
Implement the function gaussdx(sigma) that works the same as function gauss
from the previous assignment. Dont forget to normalize the kernel. Be careful as
the derivative is an odd function, so a simple sum will not do. Instead normalize the
kernel by dividing the values such that the sum of absolute values is 1. Effectively,
you have to divide each value by sum(abs(gx(x))).
"""
sigma = 1
kernel = uz_image.gaussdx(sigma)
print(kernel)
def two_c() -> None:
"""
The properties of the filter can be analyzed by using an impulse response function.
This is performed as a convolution of the filter with a Dirac delta function. The
discrete version of the Dirac function is constructed as a finite image that has all
elements set to 0 except the central element, which is set to a high value (e.g. 1).
Generate a 1-D Gaussian kernel G and a Gaussian derivative kernel D.
What happens if you apply the following operations to the impulse image?
(a) First convolution with G and then convolution with GT
(b) First convolution with G and then convolution with DT
(c) First convolution with D and then convolution with GT
(d) First convolution with GT and then convolution with D.
(e) First convolution with DT and then convolution with G.
Is the order of operations important? Display the images of the impulse responses
for different combinations of operations.
"""
impulse = uz_image.generate_dirac_impulse(50)
gauss = np.array([uz_image.get_gaussian_kernel(3)])
gaussdx = np.array([uz_image.gaussdx(3)])
fig, axs = plt.subplots(2, 3)
# Plot impulse only
axs[0, 0].imshow(impulse, cmap='gray')
axs[0, 0].set_title('Impulse')
# Plot impulse after convolution with G and GT
g_gt_impulse = impulse.copy()
g_gt_impulse = cv2.filter2D(g_gt_impulse, cv2.CV_64F, gauss)
g_gt_impulse = cv2.filter2D(g_gt_impulse, cv2.CV_64F, gauss.T)
axs[0, 1].imshow(g_gt_impulse, cmap='gray')
axs[0, 1].set_title('impulse * G * GT')
plt.show()
# ######## #
# SOLUTION #
# ######## #
def main():
ex1()
#ex2()
#ex3()
if __name__ == '__main__':
main()

View File

View File

@ -0,0 +1,500 @@
import numpy as np
import cv2 as cv2
from matplotlib import pyplot as plt
from PIL import Image
from typing import Union
import numpy.typing as npt
import enum
class ImageType(enum.Enum):
uint8 = 0
float64 = 1
class DistanceMeasure(enum.Enum):
euclidian_distance = 0
chi_square_distance = 1
intersection_distance = 2
hellinger_distance = 3
def imread(path: str, type: ImageType) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Reads an image in RGB order. Image type is transformed from uint8 to float, and
range of values is reduced from [0, 255] to [0, 1].
"""
I = Image.open(path).convert('RGB') # PIL image.
I = np.asarray(I) # Converting to Numpy array.
if type == ImageType.float64:
I = I.astype(np.float64) / 255
return I
elif type == ImageType.uint8:
return I
raise Exception(f"Unrecognized image format! {type}")
def imread_gray(path: str, type: ImageType) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Reads an image in gray. Image type is transformed from uint8 to float, and
range of values is reduced from [0, 255] to [0, 1].
"""
I = Image.open(path).convert('L') # PIL image opening and converting to gray.
I = np.asarray(I) # Converting to Numpy array.
if type == ImageType.float64:
I = I.astype(np.float64) / 255
return I
elif type == ImageType.uint8:
return I
raise Exception("Unrecognized image format!")
def signal_show(*signals):
"""
Plots all given 1D signals in the same plot.
Signals can be Python lists or 1D numpy array.
"""
for s in signals:
if type(s) == np.ndarray:
s = s.squeeze()
plt.plot(s)
plt.show()
def convolve(I: np.ndarray, *ks):
"""
Convolves input image I with all given kernels.
:param I: Image, should be of type float64 and scaled from 0 to 1.
:param ks: 2D Kernels
:return: Image convolved with all kernels.
"""
for k in ks:
k = np.flip(k) # filter2D performs correlation, so flipping is necessary
I = cv2.filter2D(I, cv2.CV_64F, k)
return I
def transform_coloured_image_to_grayscale(image: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""
Accepts float64 picture with three colour channels and returns float64 grayscale image
with one channel.
"""
grayscale_image = np.zeros(image.shape[:2])
for i in range(image.shape[0]):
for j in range(image.shape[1]):
grayscale_image[i, j] = (image[i, j, 0] + image[i,j, 1] + image[i, j, 2]) / 3
return grayscale_image
def invert_coloured_image_part(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], startx: int, endx: int, starty: int, endy: int) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts image, starting position end end position for axes x & y. Returns whole image with inverted part.
"""
inverted_image = image.copy()
if image.dtype.type == np.float64:
for i in range(startx, endx):
for j in range(starty, endy):
inverted_image[i, j, 0] = 1 - image[i, j, 0]
inverted_image[i, j, 1] = 1 - image[i, j, 1]
inverted_image[i, j, 2] = 1 - image[i, j, 2]
return inverted_image
elif image.dtype.type == np.uint8:
for i in range(startx, endx):
for j in range(starty, endy):
inverted_image[i, j, 0] = 255 - image[i, j, 0]
inverted_image[i, j, 1] = 255 - image[i, j, 1]
inverted_image[i, j, 2] = 255 - image[i, j, 2]
return inverted_image
raise Exception("Unrecognized image format!")
def invert_coloured_image(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts image and inverts it
"""
return invert_coloured_image_part(image, 0, image.shape[0], 0, image.shape[1])
def calculate_best_treshold_using_otsu_method(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]) -> int:
"""
Accepts image and returns best treshold using otsu method
"""
if image.dtype.type == np.float64:
im = image.copy()
im = im * (255.0/im.max())
elif image.dtype.type == np.uint8:
im = image.copy()
else:
raise Exception("Unrecognized image format!")
treshold_range = np.arange(np.max(im) + 1)
criterias = []
for treshold in treshold_range:
# create the thresholded image
thresholded_im = np.zeros(im.shape)
thresholded_im[im >= treshold] = 1
# compute weights
nb_pixels = im.size
nb_pixels1 = np.count_nonzero(thresholded_im)
weight1 = nb_pixels1 / nb_pixels
weight0 = 1 - weight1
# if one the classes is empty, eg all pixels are below or above the threshold, that threshold will not be considered
# in the search for the best threshold
if weight1 == 0 or weight0 == 0:
continue
# find all pixels belonging to each class
val_pixels1 = im[thresholded_im == 1]
val_pixels0 = im[thresholded_im == 0]
# compute variance of these classes
var0 = np.var(val_pixels0) if len(val_pixels0) > 0 else 0
var1 = np.var(val_pixels1) if len(val_pixels1) > 0 else 0
criterias.append( weight0 * var0 + weight1 * var1)
best_threshold = treshold_range[np.argmin(criterias)]
return best_threshold
def get_image_bins_for_loop(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], number_of_bins: int) -> npt.NDArray[np.float64]:
"""
Accepts image in the float64 format or uint8, returns normailzed
image bins, histogram
"""
if image.dtype.type == np.float64 or image.dtype.type == np.uint8:
bin_restrictions = np.linspace(np.min(image), np.max(image), num=number_of_bins)
else:
raise Exception("Unrecognized image format!")
bins = np.zeros(number_of_bins).astype(np.float64)
for pixel in image.reshape(-1):
# https://stackoverflow.com/a/16244044
bins[np.argmax(bin_restrictions > pixel)] += 1
return bins / np.sum(bins)
# Much faster implementation than for loop
def get_image_bins(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], number_of_bins: int) -> npt.NDArray[np.float64]:
"""
Accepts image in the float64 format or uint8, returns normailzed
image bins, histogram
"""
if image.dtype.type == np.float64 or image.dtype.type == np.uint8:
bins = np.linspace(np.min(image), np.max(image), num=number_of_bins)
else:
raise Exception("Unrecognized image format!")
# Put pixels into classes
# ex. binsize = 10 then 0.4 would map into 4
binarray = np.digitize(image.reshape(-1), bins).astype(np.uint8)
# Now count those values
binarray = np.unique(binarray, return_counts=True)
counts = binarray[1].astype(np.float64) # Get the counts out of tuple
# Check if there is any empty bin
empty_bins = []
bins = binarray[0]
for i in range(1, number_of_bins + 1):
if i not in bins:
empty_bins.append(i)
# Add empty bins with zeros
if empty_bins != []:
for i in empty_bins:
counts = np.insert(counts, i - 1, 0)
return counts
def get_image_bins_ND(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], number_of_bins: int) -> npt.NDArray[np.float64]:
"""
Accepts image in the float64 format or uint8 and number of bins
Returns normailzed image histogram bins
"""
bs = []
hist = np.zeros((number_of_bins, number_of_bins, number_of_bins))
bins = np.linspace(np.min(image), np.max(image), num=number_of_bins)
for i in range(image.shape[2]):
v = image[:, :, i].reshape(-1)
bs.append(np.digitize(v, bins).astype(np.uint32))
for i in range(len(bs[0])):
hist[bs[2][i] -1, bs[1][i] -1, bs[0][i] - 1] += 1
return hist / np.sum(hist)
def compare_two_histograms(h1: npt.NDArray[np.float64], h2: npt.NDArray[np.float64], method: DistanceMeasure) -> float:
"""
Accepts two histograms and method of comparison
Returns distance between them
"""
if method == DistanceMeasure.euclidian_distance:
d = np.sqrt(np.sum(np.square(h1 - h2)))
elif method == DistanceMeasure.chi_square_distance:
d = 0.5 * np.sum(np.square(h1 - h2) / (h1 + h2 + np.finfo(float).eps))
elif method == DistanceMeasure.intersection_distance:
d = 1 - np.sum(np.minimum(h1, h2))
elif method == DistanceMeasure.hellinger_distance:
d = np.sqrt(0.5 * np.sum(np.square(np.sqrt(h1) - np.sqrt(h2))))
else:
raise Exception('Unsuported method chosen!')
return d.astype(float)
def apply_mask_on_image(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], mask: npt.NDArray[np.uint8]) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts image and applys mask to image
"""
image = image.copy()
mask = np.expand_dims(mask, axis=2)
image = mask * image
return image
def simple_convolution(signal: npt.NDArray[np.float64], kernel: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""
Accepts: signal & kernel
Returns: convolved signal with a kernel
"""
N = int(np.ceil(len(kernel) / 2 - 1))
n_conv = signal.size - kernel.size + 1
print(N, signal.size -N, n_conv)
convolved_signal = np.zeros(len(signal))
rev_kernel = kernel[::-1].copy()
for i in range(n_conv):
convolved_signal[i] = np.dot(signal[i: i+kernel.size], rev_kernel) # Well if you would add i+N then you wuold shift this
return convolved_signal
def simple_convolution_improved(signal: npt.NDArray[np.float64], kernel: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""
Accepts: signal & kernel
Returns: convolved signal with a kernel
Improved method replicates edges of an signal
"""
signal_len = signal.size
kernel_len = kernel.size
signal = signal.copy()
# Calculate which values to fill in and range
EDGE_FRONT = signal[0]
EDGE_BACK = signal[-1]
EXTEND_RANGE = int(np.floor(kernel.size / 2))
# Append end insert edges
front = [ EDGE_FRONT for _ in range(EXTEND_RANGE)]
back = [ EDGE_BACK for _ in range(EXTEND_RANGE)]
signal = np.insert(signal, 1, front, axis=0)
signal = np.append(signal, back, axis=0)
convolved_signal = np.zeros(signal_len)
rev_kernel = kernel[::-1].copy()
n_conv = signal_len - kernel_len + 1
for i in range(n_conv):
convolved_signal[i] = np.dot(signal[i: i+kernel.size], rev_kernel)
return convolved_signal
def get_gaussian_kernel(sigma: float) -> npt.NDArray[np.float64]:
"""
Accepts sigma
Returns gaussian kernel
"""
# https://github.com/mikepound/convolve/blob/master/run.gaussian.py
kernel_size = int(2 * np.ceil(3*sigma) + 1)
k_min_max = np.ceil(3*sigma)
k_interval = np.arange(-k_min_max, k_min_max + 1.)
result = (1. / (np.sqrt(2. * np.pi )* sigma)) * np.exp(- (np.square(k_interval)) / (2.*np.square(sigma)))
assert(kernel_size == len(result))
return result / np.sum(result)
def sharpen_image(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], sharpen_factor=1.0) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts: image & sharpen factor
Returns: sharpened image
https://blog.demofox.org/2022/02/26/image-sharpening-convolution-kernels/ <-- sharpening kernel, but also on slides
good explanation
"""
sharpened_image = image.copy()
KERNEL = np.array([[-1, -1, -1],
[-1, 17, -1],
[-1, -1,-1]]) * 1./9. * sharpen_factor
if image.dtype.type == np.float64:
sharpened_image = cv2.filter2D(sharpened_image, cv2.CV_64F, KERNEL)
elif image.dtype.type == np.uint8:
sharpened_image = cv2.filter2D(sharpened_image, cv2.CV_8U, KERNEL)
return sharpened_image
def gaussfilter2D(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], sigma: float) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts: image, sigma
Applies gaussian noise on image
returns: filtered_image
"""
filtered_image = image.copy()
kernel = np.array([get_gaussian_kernel(sigma)])
filtered_image = cv2.filter2D(filtered_image, cv2.CV_64F, kernel)
filtered_image = cv2.filter2D(filtered_image, cv2.CV_64F, kernel.T)
return filtered_image
def gaussdx(sigma: float) -> npt.NDArray[np.float64]:
"""
Accepts sigma
Returns gaussian kernel
"""
kernel_size = int(2 * np.ceil(3*sigma) + 1)
k_min_max = np.ceil(3*sigma)
k_interval = np.arange(-k_min_max, k_min_max + 1.)
result = - (1. / (np.sqrt(2. * np.pi )* np.power(sigma, 3))) * k_interval* np.exp(- (np.square(k_interval)) / (2.*np.square(sigma)))
assert(kernel_size == len(result))
return result / np.sum(np.abs(result))
def simple_median(signal: npt.NDArray[np.float64], width: int) -> npt.NDArray[np.float64]:
"""
Accepts: signal & width
returns signal improved using median filter
"""
if width % 2 == 0:
raise Exception('No u won\'t do that')
signal = signal.copy()
for i in range(len(signal) - int(np.ceil(width/2))):
middle_element = int(i + np.floor(width/2))
signal[middle_element] = np.median(signal[i:i+width])
return signal
def apply_median_method_2D(image:Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], width: int) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts: image & filter width
returns: image with median filter applied
"""
if width % 2 == 0:
raise Exception('No u won\'t do that')
image = image.copy()
W_HALF = int(np.floor(width/2))
padded_image = np.pad(image, W_HALF, mode='edge')
IMAGE_HEIGHT = image.shape[0] # y
IMAGE_WIDTH = image.shape[1] # x
for x in range(W_HALF, IMAGE_WIDTH):
for y in range(W_HALF, IMAGE_HEIGHT):
median_filter = np.zeros(0)
STARTX = x - W_HALF
STARTY = y - W_HALF
for m in range(width):
median_filter = np.append(median_filter, padded_image[STARTY + m][STARTX: STARTX + width], axis=0)
if image.dtype.type == np.uint8:
image[y][x] = int(np.mean(median_filter))
else:
image[y][x] = np.mean(median_filter)
return image
def filter_laplace(image:Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], sigma: float) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts: image & sigma
returns: image with laplace filter applied
"""
# Prepare unit impulse and gauss kernel
unit_impulse = np.zeros((1, 2 * int(np.ceil(3*sigma)) + 1))
unit_impulse[0][int(np.ceil(unit_impulse.size /2)) - 1]= 1
gauss_kernel = np.array([get_gaussian_kernel(sigma)])
assert(len(gauss_kernel[0]) == len(unit_impulse[0]))
laplacian_filter = unit_impulse - gauss_kernel[0]
# Now apply laplacian filter
applied_by_x = cv2.filter2D(image, -1, laplacian_filter)
applied_by_y = cv2.filter2D(applied_by_x, -1, laplacian_filter.T)
return applied_by_y
def gauss_noise(I, magnitude=.1) -> npt.NDArray[np.float64]:
"""
Accepts: image & magnitude
Returns: image with gaussian noise applied
"""
# input: image, magnitude of noise
# output: modified image
I = I.copy()
return I + np.random.normal(size=I.shape) * magnitude
def sp_noise(I, percent=.1) -> npt.NDArray[np.float64]:
"""
Accepts: image & percent
Returns: image with salt and pepper noise applied
"""
# input: image, percent of corrupted pixels
# output: modified image
res = I.copy()
res[np.random.rand(I.shape[0], I.shape[1]) < percent / 2] = 1
res[np.random.rand(I.shape[0], I.shape[1]) < percent / 2] = 0
return res
def sp_noise1D(signal, percent=.1) -> npt.NDArray[np.float64]:
"""
Accepts: signal & percent
Returns: signal with salt and pepper noise applied
"""
signal = signal.copy()
signal[np.random.rand(signal.shape[0]) < percent / 2] = 2
signal[np.random.rand(signal.shape[0]) < percent / 2] = 1
signal[np.random.rand(signal.shape[0]) < percent / 2] = 4
signal[np.random.rand(signal.shape[0]) < percent / 2] = 0.4
return signal
def sum_two_grayscale_images(image_a: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], image_b :Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
"""
Accepts: image_a, image_b
Returns: image_a + image_b
"""
# Merge image_a and image_b
return (image_a + image_b)/ 2
def generate_dirac_impulse(size: int) -> npt.NDArray[np.float64]:
"""
Accepts: size
Returns: dirac impulse of size
"""
dirac_impulse = np.zeros((size, size))
dirac_impulse[int(size/2), int(size/2)] = 1
return dirac_impulse

View File

@ -0,0 +1,9 @@
import numpy as np
def read_data(filename: str):
# reads a numpy array from a text file
with open(filename) as f:
s = f.read()
return np.fromstring(s, sep=' ')