import numpy as np import cv2 as cv2 from matplotlib import pyplot as plt from PIL import Image from typing import Union import numpy.typing as npt import enum class ImageType(enum.Enum): uint8 = 0 float64 = 1 class DistanceMeasure(enum.Enum): euclidian_distance = 0 chi_square_distance = 1 intersection_distance = 2 hellinger_distance = 3 def imread(path: str, type: ImageType) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]: """ Reads an image in RGB order. Image type is transformed from uint8 to float, and range of values is reduced from [0, 255] to [0, 1]. """ I = Image.open(path).convert('RGB') # PIL image. I = np.asarray(I) # Converting to Numpy array. if type == ImageType.float64: I = I.astype(np.float64) / 255 return I elif type == ImageType.uint8: return I raise Exception(f"Unrecognized image format! {type}") def imread_gray(path: str, type: ImageType) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]: """ Reads an image in gray. Image type is transformed from uint8 to float, and range of values is reduced from [0, 255] to [0, 1]. """ I = Image.open(path).convert('L') # PIL image opening and converting to gray. I = np.asarray(I) # Converting to Numpy array. if type == ImageType.float64: I = I.astype(np.float64) / 255 return I elif type == ImageType.uint8: return I raise Exception("Unrecognized image format!") def signal_show(*signals): """ Plots all given 1D signals in the same plot. Signals can be Python lists or 1D numpy array. """ for s in signals: if type(s) == np.ndarray: s = s.squeeze() plt.plot(s) plt.show() def convolve(I: np.ndarray, *ks): """ Convolves input image I with all given kernels. :param I: Image, should be of type float64 and scaled from 0 to 1. :param ks: 2D Kernels :return: Image convolved with all kernels. """ for k in ks: k = np.flip(k) # filter2D performs correlation, so flipping is necessary I = cv2.filter2D(I, cv2.CV_64F, k) return I def transform_coloured_image_to_grayscale(image: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """ Accepts float64 picture with three colour channels and returns float64 grayscale image with one channel. """ grayscale_image = np.zeros(image.shape[:2]) for i in range(image.shape[0]): for j in range(image.shape[1]): grayscale_image[i, j] = (image[i, j, 0] + image[i,j, 1] + image[i, j, 2]) / 3 return grayscale_image def invert_coloured_image_part(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], startx: int, endx: int, starty: int, endy: int) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]: """ Accepts image, starting position end end position for axes x & y. Returns whole image with inverted part. """ inverted_image = image.copy() if image.dtype.type == np.float64: for i in range(startx, endx): for j in range(starty, endy): inverted_image[i, j, 0] = 1 - image[i, j, 0] inverted_image[i, j, 1] = 1 - image[i, j, 1] inverted_image[i, j, 2] = 1 - image[i, j, 2] return inverted_image elif image.dtype.type == np.uint8: for i in range(startx, endx): for j in range(starty, endy): inverted_image[i, j, 0] = 255 - image[i, j, 0] inverted_image[i, j, 1] = 255 - image[i, j, 1] inverted_image[i, j, 2] = 255 - image[i, j, 2] return inverted_image raise Exception("Unrecognized image format!") def invert_coloured_image(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]) -> Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]: """ Accepts image and inverts it """ return invert_coloured_image_part(image, 0, image.shape[0], 0, image.shape[1]) def calculate_best_treshold_using_otsu_method(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]]) -> int: """ Accepts image and returns best treshold using otsu method """ if image.dtype.type == np.float64: im = image.copy() im = im * (255.0/im.max()) elif image.dtype.type == np.uint8: im = image.copy() else: raise Exception("Unrecognized image format!") treshold_range = np.arange(np.max(im) + 1) criterias = [] for treshold in treshold_range: # create the thresholded image thresholded_im = np.zeros(im.shape) thresholded_im[im >= treshold] = 1 # compute weights nb_pixels = im.size nb_pixels1 = np.count_nonzero(thresholded_im) weight1 = nb_pixels1 / nb_pixels weight0 = 1 - weight1 # if one the classes is empty, eg all pixels are below or above the threshold, that threshold will not be considered # in the search for the best threshold if weight1 == 0 or weight0 == 0: continue # find all pixels belonging to each class val_pixels1 = im[thresholded_im == 1] val_pixels0 = im[thresholded_im == 0] # compute variance of these classes var0 = np.var(val_pixels0) if len(val_pixels0) > 0 else 0 var1 = np.var(val_pixels1) if len(val_pixels1) > 0 else 0 criterias.append( weight0 * var0 + weight1 * var1) best_threshold = treshold_range[np.argmin(criterias)] return best_threshold def get_image_bins_for_loop(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], number_of_bins: int) -> npt.NDArray[np.float64]: """ Accepts image in the float64 format or uint8, returns normailzed image bins, histogram """ if image.dtype.type == np.float64 or image.dtype.type == np.uint8: bin_restrictions = np.linspace(np.min(image), np.max(image), num=number_of_bins) else: raise Exception("Unrecognized image format!") bins = np.zeros(number_of_bins).astype(np.float64) for pixel in image.reshape(-1): # https://stackoverflow.com/a/16244044 bins[np.argmax(bin_restrictions > pixel)] += 1 return bins / np.sum(bins) # Much faster implementation than for loop def get_image_bins(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], number_of_bins: int) -> npt.NDArray[np.float64]: """ Accepts image in the float64 format or uint8, returns normailzed image bins, histogram """ if image.dtype.type == np.float64 or image.dtype.type == np.uint8: bins = np.linspace(np.min(image), np.max(image), num=number_of_bins) else: raise Exception("Unrecognized image format!") # Put pixels into classes # ex. binsize = 10 then 0.4 would map into 4 binarray = np.digitize(image.reshape(-1), bins).astype(np.uint8) # Now count those values binarray = np.unique(binarray, return_counts=True) counts = binarray[1].astype(np.float64) # Get the counts out of tuple # Check if there is any empty bin empty_bins = [] bins = binarray[0] for i in range(1, number_of_bins + 1): if i not in bins: empty_bins.append(i) # Add empty bins with zeros if empty_bins != []: for i in empty_bins: counts = np.insert(counts, i - 1, 0) return counts def get_image_bins_ND(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], number_of_bins: int) -> npt.NDArray[np.float64]: bs = [] hist = np.zeros((number_of_bins, number_of_bins, number_of_bins)) if image.dtype.type == np.uint8: bins = np.linspace(0, 255, num=number_of_bins) elif image.dtype.type == np.float64: bins = np.linspace(0, 1, num=number_of_bins) else: raise Exception('Unsuported datatype!') for i in range(image.shape[2]): v = image[:, :, i].reshape(-1) bs.append(np.digitize(v, bins).astype(np.uint32)) for i in range(len(bs[0])): hist[bs[2][i] -1, bs[1][i] -1, bs[0][i] - 1] += 1 return hist / np.sum(hist) def compare_two_histograms(h1: npt.NDArray[np.float64], h2: npt.NDArray[np.float64], method: DistanceMeasure) -> float: if method == DistanceMeasure.euclidian_distance: d = np.sqrt(np.sum(np.square(h1 - h2))) elif method == DistanceMeasure.chi_square_distance: d = 0.5 * np.sum(np.square(h1 - h2) / (h1 + h2 + np.finfo(float).eps)) elif method == DistanceMeasure.intersection_distance: d = 1 - np.sum(np.minimum(h1, h2)) elif method == DistanceMeasure.hellinger_distance: d = np.sqrt(0.5 * np.sum(np.square(np.sqrt(h1) - np.sqrt(h2)))) else: raise Exception('Unsuported method chosen!') return d.astype(float) def apply_mask_on_image(image: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], mask: npt.NDArray[np.uint8]): """ Accepts image and applys mask to image """ image = image.copy() mask = np.expand_dims(mask, axis=2) image = mask * image return image def simple_convolution(signal: npt.NDArray[np.float64], kernel: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: N = int(np.ceil(len(kernel) / 2 - 1)) n_conv = signal.size - kernel.size + 1 print(N, signal.size -N, n_conv) convolved_signal = np.zeros(len(signal)) rev_kernel = kernel[::-1].copy() for i in range(n_conv): convolved_signal[i] = np.dot(signal[i: i+kernel.size], rev_kernel) return convolved_signal def get_gaussian_kernel(sigma: float): # https://github.com/mikepound/convolve/blob/master/run.gaussian.py kernel_size = int(2 * np.ceil(3*sigma) + 1) k_min_max = np.ceil(3*sigma) k_interval = np.arange(-k_min_max, k_min_max + 1.) result = (1. / (np.sqrt(2. * np.pi )* sigma)) * np.exp(- (np.square(k_interval)) / (2.*np.square(sigma))) assert(kernel_size == len(result)) return result / np.sum(result) def gaussfilter2D(imge: Union[npt.NDArray[np.float64], npt.NDArray[np.uint8]], sigma: float): kernel = get_gaussian_kernel(sigma) kernel = cv2.filter2D(kernel, cv2.CV_64F, kernel) def gauss_noise(I, magnitude=.1): # input: image, magnitude of noise # output: modified image I = I.copy() return I + np.random.normal(size=I.shape) * magnitude def sp_noise(I, percent=.1): # input: image, percent of corrupted pixels # output: modified image res = I.copy() res[np.random.rand(I.shape[0], I.shape[1]) < percent / 2] = 1 res[np.random.rand(I.shape[0], I.shape[1]) < percent / 2] = 0 return res