├── Exercise 1-Image Manipulation. ├── bears.jpg ├── ex1_image_manipulation.py ├── ex2_histogram.py └── tree.jpg ├── Exercise 10-Windowed fourier and wavelet transformation ├── IPPtools.py ├── IPPtools.pyc ├── ex1_windowed_FT_solution.py ├── ex2_wavelet_transformation_solution.py ├── script_used_in_tutorial │ └── ex1_windowed_FT_tutoring.py └── tree.jpg ├── Exercise 11-Least Square ├── IPPtools.py ├── IPPtools.pyc ├── ex1_windowed_FT_solution.py ├── ex2_wavelet_transformation_solution.py ├── script_used_in_tutorial │ └── ex1_windowed_FT_tutoring.py └── tree.jpg ├── Exercise 2-Convolution And Filtering ├── ex1_convolution.py ├── ex2_laplace_filtering.py ├── tree.jpg └── venice.jpg ├── Exercise 3-Interpolation ├── ex1_Interpolation.py ├── ex2_spline_order.py └── tree.jpg ├── Exercise 4-Segmentation ├── ex1_segmentation.py └── stars.jpg ├── Exercise 5-Wave Propagation ├── ex1_fesnel_propagation.py ├── ex2_fraunhofer_propagation.py ├── tum.png └── wavefront.txt ├── Exercise 6-Phase Retrieval ├── ex1_paganin_phase_retrieval.py ├── ex2_iterative_phase_retrieval.py └── proj.npy ├── Exercise 7-Resolution and Noise ├── 2018_06_07_Resolution_and_Noise_filled.pdf ├── ex1_correlation.py ├── ex2_deconvolution.py ├── tree.jpg ├── worldA.jpg └── worldB.jpg ├── Exercise 8 - Tomography ├── Head_CT_scan.jpg ├── backup_filtered_sinogram.npy ├── backup_sinogram.npy └── ex1_tomography.py ├── Exercise 9- Grating based phase contrast ├── data │ ├── data_stepping_0000.npy │ ├── data_stepping_0001.npy │ ├── data_stepping_0002.npy │ ├── data_stepping_0003.npy │ ├── data_stepping_0004.npy │ ├── data_stepping_0005.npy │ ├── data_stepping_0006.npy │ ├── data_stepping_0007.npy │ ├── data_stepping_0008.npy │ ├── data_stepping_0009.npy │ ├── data_stepping_0010.npy │ ├── flat_stepping_0000.npy │ ├── flat_stepping_0001.npy │ ├── flat_stepping_0002.npy │ ├── flat_stepping_0003.npy │ ├── flat_stepping_0004.npy │ ├── flat_stepping_0005.npy │ ├── flat_stepping_0006.npy │ ├── flat_stepping_0007.npy │ ├── flat_stepping_0008.npy │ ├── flat_stepping_0009.npy │ └── flat_stepping_0010.npy └── ex1_interferometry_solution.py └── README.md /Exercise 1-Image Manipulation./bears.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 1-Image Manipulation./bears.jpg -------------------------------------------------------------------------------- /Exercise 1-Image Manipulation./ex1_image_manipulation.py: -------------------------------------------------------------------------------- 1 | """ 2 | 19.04.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_image_manipulation.py 7 | 8 | Using numpy, matplotlib and scipy 9 | The goal of this exercise is for you to become familiar with the important 10 | packages numpy (for n-dimensional array manipulations) and matplotlib (for 11 | matlab-like plotting and visualization). 12 | Your task is to load a provided image into a numpy array and do some basic 13 | manipulations. 14 | 15 | You need to replace the ??? in the code with the required commands 16 | """ 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | import scipy.ndimage as nd 20 | 21 | # use plt.imread to read in 'tree.jpg' as numpy array, select 22 | # only the red channel. Check the image dimensions with img.shape. 23 | # img_red should contain a 2-dimensional array 24 | # Please note that this is actually a gray scale image and all 3 25 | # image channels are the same. 26 | 27 | img = plt.imread('tree.jpg') 28 | 29 | # Choose the red color channel. For exploring the shape of your image type 30 | # img.shape into your interpreter to know which axis of the array to take. 31 | # Your img has the shape (640, 640, 3) for 640x640 pixels and 3 color channels. 32 | # To leave the for example the first axis untouched type img[:, something] 33 | # You can always check the result by img_red.shape -> should be (640, 640) 34 | 35 | img_red = img[:, :, 0] 36 | 37 | # show img_red with plt.imshow 38 | 39 | plt.figure(1) 40 | plt.imshow(img_red, cmap='gray') 41 | 42 | # Using imread, the image values returns unsigned integers between 0 and 255 43 | # Add a colorbar to verify the range of values. 44 | 45 | plt.colorbar() 46 | 47 | # Create a new numpy array that is the subarray containing only the tree in 48 | # the image. Then invert the intensity values of the small subimage and call 49 | # the resulting array img_crop_inv 50 | 51 | # Use slicing to select the tree (get the coordinates by looking at the image) 52 | 53 | img_crop = img_red[360:490, 360:490].copy() 54 | 55 | # Invert the image by subtracting from its maximum. You can find the maximum by 56 | # calling .max() 57 | 58 | img_crop_inv = img_crop.max() - img_crop 59 | 60 | plt.figure(2) 61 | plt.imshow(img_crop_inv, cmap='gray') 62 | plt.colorbar() 63 | 64 | # apply a threshold to img_red to make a binary image separating the 65 | # tree from its background 66 | 67 | # Define a threshold. You can check by looking at the image colorbar, if your 68 | # threshold is appropriate 69 | 70 | threshold = 60 71 | 72 | # Thresholding is possible by a simple "<" or ">" sign and the threshold value. 73 | # You do not have to explicitly loop over the image 74 | 75 | img_binary = img_red < threshold 76 | 77 | plt.figure(3) 78 | plt.imshow(img_binary, cmap='gray') 79 | 80 | # Plot a vertical profile line through the tree of img_red 81 | # Select a column of the image via slicing. Your result line_tree should be a 82 | # 1D array of the shape 640. 83 | 84 | line_tree = img_red[:, 425] 85 | 86 | plt.figure(4) 87 | plt.plot(line_tree) 88 | 89 | # Generate a matrix that consists only of zeros of dimension 400 x 400 pixels 90 | 91 | img_seg = np.zeros((400, 400)) 92 | cs = img_crop.shape 93 | ss = img_seg.shape 94 | 95 | # Place the subarray containing just the tree (img_crop) in the center of 96 | # img_seg 97 | 98 | img_seg[(ss[0]//2 - cs[0]//2):(ss[0]//2 + cs[0]//2), 99 | (ss[1]//2 - cs[1]//2):(ss[1]//2 + cs[1]//2)] = img_crop 100 | 101 | # Have a look at img_seg with imshow 102 | 103 | plt.figure(5) 104 | plt.imshow(img_seg, cmap='gray') 105 | 106 | # Use the function nd.rotate to rotate img_seg by 45 degrees 107 | # Use nd.rotate? to see the function definition in ipython interpreter or 108 | # help(nd.rotate) in python interpreter or look it up in spyder (recommended) 109 | 110 | img_rot = nd.rotate(img_seg, 45, reshape = True) 111 | 112 | # check if the shape of img_rot is the same as for img_seg, if not look at 113 | # the additional paramters of the nd.rotate functions 114 | 115 | plt.figure(6) 116 | plt.imshow(img_rot, cmap='gray') 117 | -------------------------------------------------------------------------------- /Exercise 1-Image Manipulation./ex2_histogram.py: -------------------------------------------------------------------------------- 1 | """ 2 | 19.04.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex2_histogram.py 7 | 8 | Using numpy, matplotlib and scipy 9 | The goal of this exercise is for you to become familiar with the important 10 | packages numpy, matplotlib and scipy. 11 | Here you will load an image, add noise and look at the histograms of the 12 | different color channels 13 | 14 | You need to replace the ??? in the code with the required commands 15 | """ 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | import scipy.ndimage as nd 19 | 20 | # use plt.imread to read in 'bears.jpg' and save the color channels 21 | # into separate numpy arrays. 22 | # Check the image dimensions before and after splitting the colors 23 | # with with the shape attribute of img, red, green, and blue 24 | # img should be a 3-dimensional array and the colors a 2d array, respectively. 25 | 26 | img = plt.imread('bears.jpg') / 255. # Division to norm to an interval [0, 1] 27 | sh = img.shape 28 | 29 | # Select red, green, and blue channel 30 | 31 | red = img[:,:,0] 32 | green = img[:,:,1] 33 | blue = img[:,:,2] 34 | print(sh, red.shape, green.shape, blue.shape) 35 | 36 | # Display the original and the three color channels in an array of subplots. 37 | # Therefore, open a figure with plt.figure() and use plt.subplot(...) to plot 38 | # them in a 2x2 array. To use the function correctly, look up the help by 39 | # typing plt.subplot? in the ipython console or help(plt.subplot) in python 40 | 41 | plt.figure(1) 42 | plt.subplot(2, 2, 1) 43 | plt.imshow(img) 44 | plt.title('color') 45 | plt.subplot(2, 2, 2) 46 | plt.imshow(red, cmap='gray') 47 | plt.title('red channel') 48 | plt.subplot(2, 2, 3) 49 | plt.imshow(green, cmap='gray') 50 | plt.title('green channel') 51 | plt.subplot(2, 2, 4) 52 | plt.imshow(blue, cmap='gray') 53 | plt.title('blue channel') 54 | 55 | # Create the histograms of the three color channels separately 56 | # using the np.histogram function. Use 50 bis and a range of (0, 1) 57 | # Afterwards plot them into one histogram line plot. Keep in mind that 58 | # np.histograms returns left and right bin margins. Therefore, you will need 59 | # to create the central bin positions by yourself 60 | 61 | red_hist = np.histogram(red, bins=50, range=(0,1)) 62 | green_hist = np.histogram(green, bins=50, range=(0,1)) 63 | blue_hist = np.histogram(blue, bins=50, range=(0,1)) 64 | 65 | # In case you do not know how to do the last part look at the lower parts of 66 | # the script. The lines before will appear again in a similar fashion. 67 | 68 | red_bins = red_hist[1] 69 | central_bins = (red_bins[1:] + red_bins[:-1]) / 2. 70 | '''green_bins = green_hist[1] 71 | central_bins_g = (green_bins[1:] + green_bins[:-1]) / 2. 72 | blue_bins = blue_hist[1] 73 | central_bins_b = (blue_bins[1:] + blue_bins[:-1]) / 2.''' 74 | 75 | plt.figure(2) 76 | plt.title('histograms of 3 color channels') 77 | plt.plot(central_bins, blue_hist[0], label='blue') 78 | plt.plot(central_bins, green_hist[0], label='green') 79 | plt.plot(central_bins, red_hist[0], label='red') 80 | plt.grid() 81 | plt.legend() 82 | 83 | # Now, add Gaussian noise to the image with the function 84 | # np.random.standard_normal with a standard deviation of 0.1 85 | 86 | img_noisy = img + 0.1*np.random.standard_normal(sh) 87 | 88 | # Note, that values below 0. and above 1. wrap around on the color scale 89 | # Therefore, they have to be set back to 0. or 1. respectively 90 | # Hint: The coordinates to index the array can also be a boolean array of the 91 | # same shape. So, if you want to select all pixels with a value smaller 92 | # than 0, you can use img_noisy < 0. 93 | 94 | img_noisy[img_noisy < 0.] = 0. 95 | img_noisy[img_noisy > 1.] = 1. 96 | 97 | plt.figure(3) 98 | plt.title('noisy image') 99 | plt.imshow(img_noisy, cmap='gray', vmin=0, vmax=1.) 100 | 101 | red_hist_noisy = np.histogram(img_noisy[..., 0], bins=50, range=(0, 1)) 102 | green_hist_noisy = np.histogram(img_noisy[..., 1], bins=50, range=(0, 1)) 103 | blue_hist_noisy = np.histogram(img_noisy[..., 2], bins=50, range=(0, 1)) 104 | 105 | plt.figure(4) 106 | plt.title('histograms of 3 noisy color channels') 107 | plt.plot(central_bins, blue_hist_noisy[0], label='blue') 108 | plt.plot(central_bins, green_hist_noisy[0], label='green') 109 | plt.plot(central_bins, red_hist_noisy[0], label='red') 110 | plt.grid() 111 | plt.legend() 112 | 113 | # After adding noise, we want to remove it again by Gaussian filtering. 114 | # Therefore, the function gaussian_filter of the nd.filter module can be used. 115 | # Apply the filter with a filter kernel size of sigma=1. 116 | # You can either filter each image band separately or give a list of sigmas 117 | # (one for each dimension) and make sure that you do not filter across color 118 | # channels with a zero at the right place. 119 | 120 | # The 0 in the last axis mean that we do not filter 121 | # across color channels of the image 122 | 123 | sigma = (1, 1, 0) 124 | img_filtered = nd.filters.gaussian_filter(img_noisy, sigma=sigma) 125 | 126 | plt.figure(5) 127 | plt.title('filtered image') 128 | plt.imshow(img_filtered, cmap='gray', vmin=0, vmax=1.) 129 | 130 | red_hist_filtered = np.histogram(img_filtered[..., 0], bins=50, range=(0, 1)) 131 | green_hist_filtered = np.histogram(img_filtered[..., 1], bins=50, range=(0, 1)) 132 | blue_hist_filtered = np.histogram(img_filtered[..., 2], bins=50, range=(0, 1)) 133 | 134 | plt.figure(6) 135 | plt.title('histograms of 3 filtered color channels') 136 | plt.plot(central_bins, blue_hist_filtered[0], label='blue') 137 | plt.plot(central_bins, green_hist_filtered[0], label='green') 138 | plt.plot(central_bins, red_hist_filtered[0], label='red') 139 | plt.grid() 140 | plt.legend() 141 | -------------------------------------------------------------------------------- /Exercise 1-Image Manipulation./tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 1-Image Manipulation./tree.jpg -------------------------------------------------------------------------------- /Exercise 10-Windowed fourier and wavelet transformation/IPPtools.py: -------------------------------------------------------------------------------- 1 | """ 2 | 05.07.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | Image Processing in Physics Toolbox for the wavelet transformation exercise. 7 | """ 8 | import numpy as np 9 | 10 | import pywt # can be downloaded from http://www.pybytes.com/pywavelets/ 11 | 12 | __all__ = ['dwt_multiscale', 'idwt_multiscale', 'tile_dwt', 'rescale'] 13 | 14 | 15 | def dwt_multiscale(image, nLevel=3, wavelet='db1', mode='cpd'): 16 | """Calculate a multilevel 2D discrete wavelet transform""" 17 | A = np.array([]) 18 | H = np.array([]) 19 | V = np.array([]) 20 | D = np.array([]) 21 | coeffs = [] 22 | # initialize image variable with input image 23 | im = image 24 | # perform multilevel decomposition 25 | for iLevel in range(nLevel): 26 | # perform wavelet transform of image variable 27 | appr, (hori, vert, diag) = pywt.dwt2(im, wavelet, mode) 28 | # save coefficient results 29 | A = np.hstack([A, appr.ravel()]) 30 | H = np.hstack([H, hori.ravel()]) 31 | V = np.hstack([V, vert.ravel()]) 32 | D = np.hstack([D, diag.ravel()]) 33 | coeffs.append((appr, hori, vert, diag)) 34 | # save approximation aa at level iLevel in image variable 35 | im = appr 36 | return coeffs, (A, H, V, D) 37 | 38 | 39 | def idwt_multiscale(coeffs, wavelet='db1', mode='cpd'): 40 | """Calculate a multilevel 2D inverse discrete wavelet transform""" 41 | recons = coeffs[-1][0] 42 | nLevel = len(coeffs) 43 | for iLevel in reversed(range(nLevel)): 44 | recons = pywt.idwt2((recons, tuple(coeffs[iLevel][1:])), wavelet, mode) 45 | return recons 46 | 47 | 48 | def tile_dwt(coeffs, shape): 49 | """Tile 2D-wavelet coefficients into the standard shape""" 50 | tiled_image = np.zeros(shape) 51 | 52 | # add n-th level approximation into corner of tiled image 53 | A0 = coeffs[-1][0] 54 | tiled_image[0:A0.shape[0], 0:A0.shape[1]] = rescale(A0, (0, 1)) 55 | 56 | nLevel = len(coeffs) 57 | for iLevel in reversed(range(nLevel)): 58 | # read coefficients at level iLevel 59 | appr, hori, vert, diag = coeffs[iLevel] 60 | Vert = rescale(abs(vert), (0, 1)) 61 | Hori = rescale(abs(hori), (0, 1)) 62 | Diag = rescale(abs(diag), (0, 1)) 63 | # determine shape parameters at level iLevel 64 | i0 = int(np.floor(shape[0] * .5**(iLevel+1))) 65 | j0 = int(np.floor(shape[1] * .5**(iLevel+1))) 66 | ir, jr = appr.shape 67 | # tile subimages at level iLevel 68 | tiled_image[i0:(i0+ir), 0:jr] = Vert 69 | tiled_image[0:ir, j0:(j0+jr)] = Hori 70 | tiled_image[i0:(i0+ir), j0:(j0+jr)] = Diag 71 | return tiled_image 72 | 73 | 74 | def rescale(a, bounds): 75 | """ linear rescaling of the input onto a interval given by "bounds" """ 76 | b0, b1 = (min(bounds), max(bounds)) 77 | a_scaled = b0 + (b1-b0)*(a.astype('float')-a.min()) / (a.max()-a.min()) 78 | return a_scaled 79 | -------------------------------------------------------------------------------- /Exercise 10-Windowed fourier and wavelet transformation/IPPtools.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 10-Windowed fourier and wavelet transformation/IPPtools.pyc -------------------------------------------------------------------------------- /Exercise 10-Windowed fourier and wavelet transformation/ex1_windowed_FT_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 05.07.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_windowed_FT.py 7 | 8 | Using numpy, matplotlib 9 | 10 | This is a script which calculates the spectrogram of a signal by calling 11 | the function calc_spectrogram. 12 | 13 | Try to replace the missing information (there are 6 incomplete lines as 14 | indicated by the ??? and generate the spectrogram plot. 15 | """ 16 | 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | 20 | 21 | def calc_spectrogram(signal, width, sigma): 22 | """ 23 | Calculate the spectrogram of a 1D-signal via the 1D Windowed Fourier 24 | Transform. Smoothen the rectangular window of width "width" with a 25 | Gaussian of width "sigma" 26 | 27 | Parameters are: - the signal 28 | - the width of the rect window 29 | - the width of the apodizing gaussian 30 | 31 | Returns: spectrogram 32 | """ 33 | 34 | # append zeros to both sides of the signal, so that there is no 35 | # wrap-around when windowing 36 | signal = np.hstack([np.zeros(width//2), signal, np.zeros(width//2)]) 37 | 38 | # create spectrogram data container 39 | spectrogram = np.zeros((len(signal), len(signal))) 40 | 41 | for position in np.arange(width//2, len(signal) - width//2): 42 | # extract rectangular region of width "width" from signal about 43 | # position 44 | windowed_signal = signal[position - width//2:position + width//2] 45 | 46 | # apodize edges of rect-window by gaussian 47 | gaussian = np.exp( 48 | -((np.linspace(0, width, width) - width//2)**2) / (2. * sigma**2)) 49 | windowed_signal = windowed_signal * gaussian 50 | 51 | # zero-pad resulting windowed signal 52 | padded_window = np.zeros((len(signal))) 53 | 54 | padded_window[0:width] = windowed_signal 55 | 56 | # fourier transform the padded windowed signal 57 | # hint you need an fft shift here 58 | local_spectrum = np.fft.fftshift(np.fft.fft(padded_window)) 59 | 60 | # calculate the spectrogram from the WFT 61 | spectrogram[:, position] = np.abs(local_spectrum)**2 62 | 63 | return spectrogram[:, width//2:len(signal) - width//2] 64 | 65 | # Generate a vector x of 1000 points between 0 and 1 66 | x = np.linspace(0, 1., 1000) 67 | x2 = np.linspace(0, 4., 4000) 68 | p1 = 1. / 80 69 | p2 = 1. / 160 70 | signal1 = np.cos(2 * np.pi / p1 * x) 71 | signal2 = np.cos(2 * np.pi / p2 * x) 72 | signal3 = (signal1 + signal2) / 2 73 | signal4 = np.cos(2 * np.pi / p1 * x**2) 74 | 75 | signal = np.hstack([signal1, signal2, signal3, signal4]) 76 | 77 | # Call the spectrogram function choosing an appropriate width: 78 | width = 50 79 | sigma = width / 5. 80 | spec = calc_spectrogram(signal, width, sigma) 81 | 82 | # Truncate the spectrogram to just take the positive frequencies: 83 | spec = spec[0:spec.shape[0]//2, :] 84 | 85 | # Plot the original signal 86 | plt.figure(1, figsize=(14, 4)) 87 | plt.plot(x2, signal) 88 | 89 | # Plot the spectrogram 90 | plt.figure(2, figsize=(14, 4)) 91 | plt.imshow(spec, aspect='auto', extent=(0, 1, 0, 500), cmap='gray') 92 | plt.title('spectrogram') 93 | plt.ylabel('frequency') 94 | plt.xlabel('time/space') 95 | plt.show() 96 | -------------------------------------------------------------------------------- /Exercise 10-Windowed fourier and wavelet transformation/ex2_wavelet_transformation_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 05.07.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex2_wavelet_transformation.py 7 | 8 | Using numpy, matplotlib and scipy 9 | 10 | This is a script which calculates the wavelet transform of an image, 11 | thresholds the wavelet coefficients, performs the inverse wavelet transform, 12 | in order to compress the image. The script plots the original image, the 13 | wavelet tiling, the thresholded tiling, and the compressed image. 14 | 15 | See IPPtools for details on the functions that have been added for this 16 | exercise: 17 | dwt_multiscale 18 | idwt_multiscale 19 | tile_dwt 20 | rescale 21 | 22 | Try to replace the missing information (there are 6 incomplete lines as 23 | indicated by the ??? and hence generate a successfully compressed image. 24 | """ 25 | import numpy as np 26 | import matplotlib.pyplot as plt 27 | import IPPtools as IPPT 28 | 29 | # Load in the image tree.jpg and choose the red channel 30 | img = plt.imread('tree.jpg')[:, :, 0] / 255. 31 | 32 | # Choose compression level for hard threshold 33 | compression_level = 0.1 # Between 0 and 1 34 | 35 | # Wavelet decomposition parameters 36 | # Choose your favourite wavelet type. See the pywt wavelet object homepage for 37 | # a list http://www.pybytes.com/pywavelets/regression/wavelet.html 38 | # Choose the number of levels of decomposition 39 | 40 | nLevel = 3 # Number of decompositions 41 | wavelet = 'haar' # mother wavelet 42 | mode = 'per' # zero padding mode 43 | 44 | # Decomposition with IPPT.dwt_multiscale 45 | coeffs, (A, H, V, D) = IPPT.dwt_multiscale( 46 | img, nLevel=nLevel, mode=mode, wavelet=wavelet) 47 | 48 | # Extract the approximation image of last decomposition level 49 | A0 = coeffs[-1][0] 50 | 51 | # Group all coefficients to search for the right threshold 52 | allcoeffs = np.hstack([A0.ravel(), H, V, D])**2 53 | 54 | # Number of coefficients that have to be set to zeros 55 | Nzeros = int((1 - compression_level) * len(allcoeffs)) 56 | 57 | # Sort coefficients by size, give back a sorted list of indices 58 | iarg = allcoeffs.argsort() 59 | 60 | # Find lowest allowed power 61 | lowest_power = allcoeffs[iarg[Nzeros]] 62 | 63 | # Threshold the coefficients 64 | newcoeffs = [ 65 | [iCoeffs*(iCoeffs**2 >= lowest_power) for iCoeffs in iLevels] 66 | for iLevels in coeffs 67 | ] 68 | 69 | # reconstruct new coefficients 70 | rec = IPPT.idwt_multiscale(newcoeffs, mode=mode, wavelet=wavelet) 71 | 72 | # Total power before thresholding 73 | power0 = allcoeffs.sum() 74 | 75 | # Total power after thresholding 76 | power1 = allcoeffs[iarg[Nzeros:]].sum() 77 | 78 | print( 79 | 'compression by %3.1f%% leads to %3.1f%% relative error' % 80 | (100-100*compression_level, 100*(1-power1/power0)) 81 | ) 82 | 83 | plt.figure(1, figsize=(12, 12)) 84 | plt.subplot(2, 2, 1) 85 | plt.imshow(img, cmap='gray') 86 | plt.title('Original') 87 | plt.subplot(2, 2, 2) 88 | plt.imshow(rec, cmap='gray') 89 | plt.title('Compression by %3.1f%%' % (100 - 100 * compression_level)) 90 | plt.subplot(2, 2, 3) 91 | plt.imshow(IPPT.tile_dwt(coeffs, img.shape)**(1 / 4.), cmap='gray') 92 | plt.title('Wavelet decomposition (gamma 0.25)') 93 | plt.subplot(2, 2, 4) 94 | plt.imshow(IPPT.tile_dwt(newcoeffs, img.shape)**(1 / 4.), cmap='gray') 95 | plt.title('Wavelet thresholded (gamma 0.25)') 96 | plt.show() 97 | -------------------------------------------------------------------------------- /Exercise 10-Windowed fourier and wavelet transformation/script_used_in_tutorial/ex1_windowed_FT_tutoring.py: -------------------------------------------------------------------------------- 1 | """ 2 | This code plots a interactive figure 3 | Juanjuan Huang 4 | """ 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | #%% 10 | def calc_spectrogram(signal, width, sigma, apply_gaussian = True): 11 | """ 12 | Calculate the spectrogram of a 1D-signal via the 1D Windowed Fourier 13 | Transform. Smoothen the rectangular window of width "width" with a 14 | Gaussian of width "sigma" 15 | 16 | Parameters are: - the signal 17 | - the width of the rect window 18 | - the width of the apodizing gaussian 19 | 20 | Returns: spectrogram 21 | """ 22 | 23 | signal = np.hstack([np.zeros(width//2), signal, np.zeros(width//2)]) 24 | print(signal.shape) 25 | spectrogram = np.zeros((len(signal), len(signal))) 26 | check = np.arange(width, len(signal), width) 27 | counter = 0 28 | 29 | for position in np.arange(width//2, len(signal) - width//2): 30 | c = float(counter)/len(check) 31 | windowed_signal = signal[position-width//2:position + width//2] 32 | if apply_gaussian == True: 33 | gaussian = np.exp(-((np.linspace(0, width, width) - width//2)**2) / (2. * sigma**2)) 34 | else: 35 | gaussian = 1 36 | 37 | if position in check: 38 | plt.figure(5) 39 | plt.clf() 40 | plt.subplot(2,1,1) 41 | plt.plot(windowed_signal, label = 'windowed_signal', color = (1-c, c, c**2)) 42 | #plt.plot(gaussian, color = 'orange',linewidth = 3) 43 | plt.legend(loc = 'center') 44 | counter += 1 45 | 46 | windowed_signal = windowed_signal * gaussian 47 | 48 | if position in check: 49 | plt.figure(5) 50 | plt.subplot(2,1,2) 51 | plt.plot(windowed_signal, label = 'windowed_signal * gaussian',color = (1-c, c, c**2)) 52 | plt.plot(gaussian, color = 'orange', linestyle = '--', linewidth = 3) 53 | plt.legend(loc = 'center') 54 | plt.pause(.1) 55 | 56 | padded_window = np.zeros((len(signal))) 57 | 58 | padded_window[position-width//2:position + width//2] = windowed_signal 59 | local_spectrum = np.fft.fftshift(np.fft.fft(padded_window)) 60 | 61 | if position in check: 62 | plt.figure(7) 63 | plt.clf() 64 | plt.subplot(3,1,2) 65 | plt.plot(padded_window, linewidth = 0.5, color= (1-c, c, c**2), label = 'padded') 66 | 67 | plt.subplot(3,1,1) 68 | plt.plot(signal, color = 'k', linewidth = 0.5, alpha = 0.5) 69 | plt.axvspan(position - width//2, 70 | position + width //2, 71 | #facecolor = plt.cm.Accent_r, 72 | facecolor= (1-c, c, c**2), 73 | alpha=0.9) 74 | plt.xticks([]) 75 | plt.title('signal') 76 | 77 | plt.subplot(3,1,3) 78 | plt.plot(np.abs(local_spectrum) ** 2, color = (1-c, c, c**2)) 79 | plt.xticks([]) 80 | plt.pause(0.01) 81 | 82 | spectrogram[:, position] = np.abs(local_spectrum) ** 2 83 | 84 | if position in check: 85 | plt.figure(8) 86 | plt.clf() 87 | plt.imshow(spectrogram, 88 | vmin = 0, vmax = 300, 89 | aspect='auto', cmap='gray') 90 | plt.title('spectrogram') 91 | plt.ylabel('frequency') 92 | plt.xlabel('time/space') 93 | plt.pause(0.01) 94 | return spectrogram[:, width//2:len(signal) - width//2] 95 | 96 | #%% 97 | # our signal 98 | x = np.linspace(0, 1., 1000) 99 | x2 = np.linspace(0, 4., 4000) 100 | p1 = 1. / 80 101 | p2 = 1. / 160 102 | signal1 = np.cos(2 * np.pi / p1 * x) 103 | signal2 = np.cos(2 * np.pi / p2 * x) 104 | signal3 = (signal1 + signal2) / 2 105 | signal4 = np.cos(2 * np.pi / p1 * x**2) 106 | 107 | signal = np.hstack([signal1, signal2, signal3, signal4]) 108 | 109 | # In[1]: 110 | 111 | 112 | # adjust width & sigma to see the diffrence of the results 113 | # and the uncertainty principle -- a trade of frequency resolution & time resolution 114 | 115 | # Adjust width values 116 | #width = 50 117 | width = 200 118 | #width = 1000 119 | #width = 250 120 | #width = 400 121 | sigma = width / 5. 122 | spec = calc_spectrogram(signal, width, sigma) 123 | 124 | #%% 125 | # Adjust sigma values 126 | # sigma too small 127 | sigma = width / 20. 128 | # sigma too big 129 | sigma = width 130 | 131 | spec = calc_spectrogram(signal, width, sigma) 132 | 133 | # In[2]: 134 | # without applying Gaussian 135 | width = 100 136 | 137 | spec = calc_spectrogram(signal, width, sigma, apply_gaussian= False) 138 | -------------------------------------------------------------------------------- /Exercise 10-Windowed fourier and wavelet transformation/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 10-Windowed fourier and wavelet transformation/tree.jpg -------------------------------------------------------------------------------- /Exercise 11-Least Square/IPPtools.py: -------------------------------------------------------------------------------- 1 | """ 2 | 05.07.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | Image Processing in Physics Toolbox for the wavelet transformation exercise. 7 | """ 8 | import numpy as np 9 | 10 | import pywt # can be downloaded from http://www.pybytes.com/pywavelets/ 11 | 12 | __all__ = ['dwt_multiscale', 'idwt_multiscale', 'tile_dwt', 'rescale'] 13 | 14 | 15 | def dwt_multiscale(image, nLevel=3, wavelet='db1', mode='cpd'): 16 | """Calculate a multilevel 2D discrete wavelet transform""" 17 | A = np.array([]) 18 | H = np.array([]) 19 | V = np.array([]) 20 | D = np.array([]) 21 | coeffs = [] 22 | # initialize image variable with input image 23 | im = image 24 | # perform multilevel decomposition 25 | for iLevel in range(nLevel): 26 | # perform wavelet transform of image variable 27 | appr, (hori, vert, diag) = pywt.dwt2(im, wavelet, mode) 28 | # save coefficient results 29 | A = np.hstack([A, appr.ravel()]) 30 | H = np.hstack([H, hori.ravel()]) 31 | V = np.hstack([V, vert.ravel()]) 32 | D = np.hstack([D, diag.ravel()]) 33 | coeffs.append((appr, hori, vert, diag)) 34 | # save approximation aa at level iLevel in image variable 35 | im = appr 36 | return coeffs, (A, H, V, D) 37 | 38 | 39 | def idwt_multiscale(coeffs, wavelet='db1', mode='cpd'): 40 | """Calculate a multilevel 2D inverse discrete wavelet transform""" 41 | recons = coeffs[-1][0] 42 | nLevel = len(coeffs) 43 | for iLevel in reversed(range(nLevel)): 44 | recons = pywt.idwt2((recons, tuple(coeffs[iLevel][1:])), wavelet, mode) 45 | return recons 46 | 47 | 48 | def tile_dwt(coeffs, shape): 49 | """Tile 2D-wavelet coefficients into the standard shape""" 50 | tiled_image = np.zeros(shape) 51 | 52 | # add n-th level approximation into corner of tiled image 53 | A0 = coeffs[-1][0] 54 | tiled_image[0:A0.shape[0], 0:A0.shape[1]] = rescale(A0, (0, 1)) 55 | 56 | nLevel = len(coeffs) 57 | for iLevel in reversed(range(nLevel)): 58 | # read coefficients at level iLevel 59 | appr, hori, vert, diag = coeffs[iLevel] 60 | Vert = rescale(abs(vert), (0, 1)) 61 | Hori = rescale(abs(hori), (0, 1)) 62 | Diag = rescale(abs(diag), (0, 1)) 63 | # determine shape parameters at level iLevel 64 | i0 = int(np.floor(shape[0] * .5**(iLevel+1))) 65 | j0 = int(np.floor(shape[1] * .5**(iLevel+1))) 66 | ir, jr = appr.shape 67 | # tile subimages at level iLevel 68 | tiled_image[i0:(i0+ir), 0:jr] = Vert 69 | tiled_image[0:ir, j0:(j0+jr)] = Hori 70 | tiled_image[i0:(i0+ir), j0:(j0+jr)] = Diag 71 | return tiled_image 72 | 73 | 74 | def rescale(a, bounds): 75 | """ linear rescaling of the input onto a interval given by "bounds" """ 76 | b0, b1 = (min(bounds), max(bounds)) 77 | a_scaled = b0 + (b1-b0)*(a.astype('float')-a.min()) / (a.max()-a.min()) 78 | return a_scaled 79 | -------------------------------------------------------------------------------- /Exercise 11-Least Square/IPPtools.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 11-Least Square/IPPtools.pyc -------------------------------------------------------------------------------- /Exercise 11-Least Square/ex1_windowed_FT_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 05.07.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_windowed_FT.py 7 | 8 | Using numpy, matplotlib 9 | 10 | This is a script which calculates the spectrogram of a signal by calling 11 | the function calc_spectrogram. 12 | 13 | Try to replace the missing information (there are 6 incomplete lines as 14 | indicated by the ??? and generate the spectrogram plot. 15 | """ 16 | 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | 20 | 21 | def calc_spectrogram(signal, width, sigma): 22 | """ 23 | Calculate the spectrogram of a 1D-signal via the 1D Windowed Fourier 24 | Transform. Smoothen the rectangular window of width "width" with a 25 | Gaussian of width "sigma" 26 | 27 | Parameters are: - the signal 28 | - the width of the rect window 29 | - the width of the apodizing gaussian 30 | 31 | Returns: spectrogram 32 | """ 33 | 34 | # append zeros to both sides of the signal, so that there is no 35 | # wrap-around when windowing 36 | signal = np.hstack([np.zeros(width//2), signal, np.zeros(width//2)]) 37 | 38 | # create spectrogram data container 39 | spectrogram = np.zeros((len(signal), len(signal))) 40 | 41 | for position in np.arange(width//2, len(signal) - width//2): 42 | # extract rectangular region of width "width" from signal about 43 | # position 44 | windowed_signal = signal[position - width//2:position + width//2] 45 | 46 | # apodize edges of rect-window by gaussian 47 | gaussian = np.exp( 48 | -((np.linspace(0, width, width) - width//2)**2) / (2. * sigma**2)) 49 | windowed_signal = windowed_signal * gaussian 50 | 51 | # zero-pad resulting windowed signal 52 | padded_window = np.zeros((len(signal))) 53 | 54 | padded_window[0:width] = windowed_signal 55 | 56 | # fourier transform the padded windowed signal 57 | # hint you need an fft shift here 58 | local_spectrum = np.fft.fftshift(np.fft.fft(padded_window)) 59 | 60 | # calculate the spectrogram from the WFT 61 | spectrogram[:, position] = np.abs(local_spectrum)**2 62 | 63 | return spectrogram[:, width//2:len(signal) - width//2] 64 | 65 | # Generate a vector x of 1000 points between 0 and 1 66 | x = np.linspace(0, 1., 1000) 67 | x2 = np.linspace(0, 4., 4000) 68 | p1 = 1. / 80 69 | p2 = 1. / 160 70 | signal1 = np.cos(2 * np.pi / p1 * x) 71 | signal2 = np.cos(2 * np.pi / p2 * x) 72 | signal3 = (signal1 + signal2) / 2 73 | signal4 = np.cos(2 * np.pi / p1 * x**2) 74 | 75 | signal = np.hstack([signal1, signal2, signal3, signal4]) 76 | 77 | # Call the spectrogram function choosing an appropriate width: 78 | width = 50 79 | sigma = width / 5. 80 | spec = calc_spectrogram(signal, width, sigma) 81 | 82 | # Truncate the spectrogram to just take the positive frequencies: 83 | spec = spec[0:spec.shape[0]//2, :] 84 | 85 | # Plot the original signal 86 | plt.figure(1, figsize=(14, 4)) 87 | plt.plot(x2, signal) 88 | 89 | # Plot the spectrogram 90 | plt.figure(2, figsize=(14, 4)) 91 | plt.imshow(spec, aspect='auto', extent=(0, 1, 0, 500), cmap='gray') 92 | plt.title('spectrogram') 93 | plt.ylabel('frequency') 94 | plt.xlabel('time/space') 95 | plt.show() 96 | -------------------------------------------------------------------------------- /Exercise 11-Least Square/ex2_wavelet_transformation_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 05.07.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex2_wavelet_transformation.py 7 | 8 | Using numpy, matplotlib and scipy 9 | 10 | This is a script which calculates the wavelet transform of an image, 11 | thresholds the wavelet coefficients, performs the inverse wavelet transform, 12 | in order to compress the image. The script plots the original image, the 13 | wavelet tiling, the thresholded tiling, and the compressed image. 14 | 15 | See IPPtools for details on the functions that have been added for this 16 | exercise: 17 | dwt_multiscale 18 | idwt_multiscale 19 | tile_dwt 20 | rescale 21 | 22 | Try to replace the missing information (there are 6 incomplete lines as 23 | indicated by the ??? and hence generate a successfully compressed image. 24 | """ 25 | import numpy as np 26 | import matplotlib.pyplot as plt 27 | import IPPtools as IPPT 28 | 29 | # Load in the image tree.jpg and choose the red channel 30 | img = plt.imread('tree.jpg')[:, :, 0] / 255. 31 | 32 | # Choose compression level for hard threshold 33 | compression_level = 0.1 # Between 0 and 1 34 | 35 | # Wavelet decomposition parameters 36 | # Choose your favourite wavelet type. See the pywt wavelet object homepage for 37 | # a list http://www.pybytes.com/pywavelets/regression/wavelet.html 38 | # Choose the number of levels of decomposition 39 | 40 | nLevel = 3 # Number of decompositions 41 | wavelet = 'haar' # mother wavelet 42 | mode = 'per' # zero padding mode 43 | 44 | # Decomposition with IPPT.dwt_multiscale 45 | coeffs, (A, H, V, D) = IPPT.dwt_multiscale( 46 | img, nLevel=nLevel, mode=mode, wavelet=wavelet) 47 | 48 | # Extract the approximation image of last decomposition level 49 | A0 = coeffs[-1][0] 50 | 51 | # Group all coefficients to search for the right threshold 52 | allcoeffs = np.hstack([A0.ravel(), H, V, D])**2 53 | 54 | # Number of coefficients that have to be set to zeros 55 | Nzeros = int((1 - compression_level) * len(allcoeffs)) 56 | 57 | # Sort coefficients by size, give back a sorted list of indices 58 | iarg = allcoeffs.argsort() 59 | 60 | # Find lowest allowed power 61 | lowest_power = allcoeffs[iarg[Nzeros]] 62 | 63 | # Threshold the coefficients 64 | newcoeffs = [ 65 | [iCoeffs*(iCoeffs**2 >= lowest_power) for iCoeffs in iLevels] 66 | for iLevels in coeffs 67 | ] 68 | 69 | # reconstruct new coefficients 70 | rec = IPPT.idwt_multiscale(newcoeffs, mode=mode, wavelet=wavelet) 71 | 72 | # Total power before thresholding 73 | power0 = allcoeffs.sum() 74 | 75 | # Total power after thresholding 76 | power1 = allcoeffs[iarg[Nzeros:]].sum() 77 | 78 | print( 79 | 'compression by %3.1f%% leads to %3.1f%% relative error' % 80 | (100-100*compression_level, 100*(1-power1/power0)) 81 | ) 82 | 83 | plt.figure(1, figsize=(12, 12)) 84 | plt.subplot(2, 2, 1) 85 | plt.imshow(img, cmap='gray') 86 | plt.title('Original') 87 | plt.subplot(2, 2, 2) 88 | plt.imshow(rec, cmap='gray') 89 | plt.title('Compression by %3.1f%%' % (100 - 100 * compression_level)) 90 | plt.subplot(2, 2, 3) 91 | plt.imshow(IPPT.tile_dwt(coeffs, img.shape)**(1 / 4.), cmap='gray') 92 | plt.title('Wavelet decomposition (gamma 0.25)') 93 | plt.subplot(2, 2, 4) 94 | plt.imshow(IPPT.tile_dwt(newcoeffs, img.shape)**(1 / 4.), cmap='gray') 95 | plt.title('Wavelet thresholded (gamma 0.25)') 96 | plt.show() 97 | -------------------------------------------------------------------------------- /Exercise 11-Least Square/script_used_in_tutorial/ex1_windowed_FT_tutoring.py: -------------------------------------------------------------------------------- 1 | """ 2 | This code plots a interactive figure 3 | Juanjuan Huang 4 | """ 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | #%% 10 | def calc_spectrogram(signal, width, sigma, apply_gaussian = True): 11 | """ 12 | Calculate the spectrogram of a 1D-signal via the 1D Windowed Fourier 13 | Transform. Smoothen the rectangular window of width "width" with a 14 | Gaussian of width "sigma" 15 | 16 | Parameters are: - the signal 17 | - the width of the rect window 18 | - the width of the apodizing gaussian 19 | 20 | Returns: spectrogram 21 | """ 22 | 23 | signal = np.hstack([np.zeros(width//2), signal, np.zeros(width//2)]) 24 | print(signal.shape) 25 | spectrogram = np.zeros((len(signal), len(signal))) 26 | check = np.arange(width, len(signal), width) 27 | counter = 0 28 | 29 | for position in np.arange(width//2, len(signal) - width//2): 30 | c = float(counter)/len(check) 31 | windowed_signal = signal[position-width//2:position + width//2] 32 | if apply_gaussian == True: 33 | gaussian = np.exp(-((np.linspace(0, width, width) - width//2)**2) / (2. * sigma**2)) 34 | else: 35 | gaussian = 1 36 | 37 | if position in check: 38 | plt.figure(5) 39 | plt.clf() 40 | plt.subplot(2,1,1) 41 | plt.plot(windowed_signal, label = 'windowed_signal', color = (1-c, c, c**2)) 42 | #plt.plot(gaussian, color = 'orange',linewidth = 3) 43 | plt.legend(loc = 'center') 44 | counter += 1 45 | 46 | windowed_signal = windowed_signal * gaussian 47 | 48 | if position in check: 49 | plt.figure(5) 50 | plt.subplot(2,1,2) 51 | plt.plot(windowed_signal, label = 'windowed_signal * gaussian',color = (1-c, c, c**2)) 52 | plt.plot(gaussian, color = 'orange', linestyle = '--', linewidth = 3) 53 | plt.legend(loc = 'center') 54 | plt.pause(.1) 55 | 56 | padded_window = np.zeros((len(signal))) 57 | 58 | padded_window[position-width//2:position + width//2] = windowed_signal 59 | local_spectrum = np.fft.fftshift(np.fft.fft(padded_window)) 60 | 61 | if position in check: 62 | plt.figure(7) 63 | plt.clf() 64 | plt.subplot(3,1,2) 65 | plt.plot(padded_window, linewidth = 0.5, color= (1-c, c, c**2), label = 'padded') 66 | 67 | plt.subplot(3,1,1) 68 | plt.plot(signal, color = 'k', linewidth = 0.5, alpha = 0.5) 69 | plt.axvspan(position - width//2, 70 | position + width //2, 71 | #facecolor = plt.cm.Accent_r, 72 | facecolor= (1-c, c, c**2), 73 | alpha=0.9) 74 | plt.xticks([]) 75 | plt.title('signal') 76 | 77 | plt.subplot(3,1,3) 78 | plt.plot(np.abs(local_spectrum) ** 2, color = (1-c, c, c**2)) 79 | plt.xticks([]) 80 | plt.pause(0.01) 81 | 82 | spectrogram[:, position] = np.abs(local_spectrum) ** 2 83 | 84 | if position in check: 85 | plt.figure(8) 86 | plt.clf() 87 | plt.imshow(spectrogram, 88 | vmin = 0, vmax = 300, 89 | aspect='auto', cmap='gray') 90 | plt.title('spectrogram') 91 | plt.ylabel('frequency') 92 | plt.xlabel('time/space') 93 | plt.pause(0.01) 94 | return spectrogram[:, width//2:len(signal) - width//2] 95 | 96 | #%% 97 | # our signal 98 | x = np.linspace(0, 1., 1000) 99 | x2 = np.linspace(0, 4., 4000) 100 | p1 = 1. / 80 101 | p2 = 1. / 160 102 | signal1 = np.cos(2 * np.pi / p1 * x) 103 | signal2 = np.cos(2 * np.pi / p2 * x) 104 | signal3 = (signal1 + signal2) / 2 105 | signal4 = np.cos(2 * np.pi / p1 * x**2) 106 | 107 | signal = np.hstack([signal1, signal2, signal3, signal4]) 108 | 109 | # In[1]: 110 | 111 | 112 | # adjust width & sigma to see the diffrence of the results 113 | # and the uncertainty principle -- a trade of frequency resolution & time resolution 114 | 115 | # Adjust width values 116 | #width = 50 117 | width = 200 118 | #width = 1000 119 | #width = 250 120 | #width = 400 121 | sigma = width / 5. 122 | spec = calc_spectrogram(signal, width, sigma) 123 | 124 | #%% 125 | # Adjust sigma values 126 | # sigma too small 127 | sigma = width / 20. 128 | # sigma too big 129 | sigma = width 130 | 131 | spec = calc_spectrogram(signal, width, sigma) 132 | 133 | # In[2]: 134 | # without applying Gaussian 135 | width = 100 136 | 137 | spec = calc_spectrogram(signal, width, sigma, apply_gaussian= False) 138 | -------------------------------------------------------------------------------- /Exercise 11-Least Square/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 11-Least Square/tree.jpg -------------------------------------------------------------------------------- /Exercise 2-Convolution And Filtering/ex1_convolution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 26.04.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_convolution.py 7 | 8 | Using numpy, matplotlib and scipy 9 | Application of Fourier transforms and convolution 10 | Your task in this exercise is to try out different implementations of a simple 11 | convolution: 12 | 1. using a built-in function 13 | 2. implement your own spatial domain convolution 14 | 3. use FFTs to implement a frequency domain convolution 15 | 16 | The goal is to apply a diagonal camera shake/motion blur to a photo. 17 | 18 | You need to replace the ??? in the code with the required commands 19 | """ 20 | 21 | 22 | import numpy as np 23 | import scipy.ndimage as nd 24 | import matplotlib.pyplot as plt 25 | 26 | # Load the image tree.jpg using the function imread from matplotlib.pyplot 27 | tree = plt.imread('tree.jpg') 28 | 29 | # Extract the red channel only from tree and call it img 30 | img = tree[:,:,0] 31 | 32 | plt.figure(1) 33 | plt.imshow(img, cmap='gray') 34 | plt.colorbar() 35 | 36 | # Create a 5x5 numpy array containing .2 on the main diagonal, 0. otherwise. 37 | # This will be the convolution kernel. 38 | # You can use either a loop approach, the diag function from numpy, or the eye 39 | # function also from the numpy module to do this 40 | # (Check the functions documentations to learn what they do.) 41 | kernel = np.eye(5)*0.2 42 | 43 | # 2D convolution using a scipy function 44 | # Use the function convolve from scipy.ndimage (already imported as nd) in 45 | # 'wrap' mode to calculate the convolution of img and kernel. If you are 46 | # unsure how to use the function, look at its documentation. 47 | result_function = nd.convolve(img, kernel, mode = 'wrap') 48 | 49 | plt.figure(2) 50 | plt.imshow(result_function, cmap='gray') 51 | plt.colorbar() 52 | 53 | # 2D convolution using explicit python code. You will now calculate the 54 | # convolution of img and kernel by using only basic python/numpy operations. 55 | 56 | # initialize the array where the result is stored. 57 | result_explicit = np.zeros_like(img) 58 | 59 | # store height and width of image and kernel 60 | h, w = img.shape 61 | kh, kw = kernel.shape 62 | 63 | # Calculate the discrete two-dimensional convolution integral. Loop over all 64 | # pixels in img. Note this is very slow because it has four for loops (not 65 | # generally a good idea) 66 | for y in range(h): 67 | for x in range(w): 68 | # initialize result for pixel at y, x 69 | val = 0. 70 | # loop over all pixels in kernel 71 | for j in range(kh): 72 | for i in range(kw): 73 | # "The location imageY and imageX is calculated so that for the 74 | # center element of the filter it'll be y, x, but for the other 75 | # elements it'll be a pixel from the image to the left, right, 76 | # top or bottom of y, x. It's modulo divided through the 77 | # width (w) or height (h) of the image so that pixels outside 78 | # the image will be wrapped around. Before modulo dividing it, 79 | # h or w are also added to it, because this modulo division 80 | # doesn't work correctly for negative values. Now, 81 | # pixel (-1, -1) will correctly become pixel (h-1, w-1)." 82 | # (source: http://lodev.org/cgtutor/filtering.html) 83 | imageY = (y + kh // 2 - j) % h 84 | imageX = (x + kw // 2 - i) % w 85 | val += img[imageY, imageX] * kernel[j, i] 86 | # assign result to pixel at y, x 87 | result_explicit[y, x] = val 88 | 89 | plt.figure(3) 90 | plt.imshow(result_explicit, cmap='gray') 91 | plt.colorbar() 92 | 93 | # 2D convolution using Fourier theorem 94 | # Remember that a convolution in real space is equivalent to a multiplication 95 | # in Fourier space. 96 | # You should be aware that you have to zero-pad the 5x5 kernel array to the 97 | # size of the image prior to taking the Fourier transform. The best result 98 | # is achieved, when you zero-pad in a way that the center pixel of the 99 | # kernel becomes the top-left most pixel in the padded array. 100 | 101 | # The functions for fft and ifft can be found in np.fft.fft2 and np.fft.ifft2 102 | 103 | # take the Fourier transform of the image 104 | img_ft = np.fft.fft2(img) 105 | 106 | # Zero-pad the kernel so same size as img 107 | kernel_pad = np.zeros_like(img, dtype=float) 108 | kernel_pad[h//2-kh//2:h//2+kh//2+1, w//2-kw//2:w//2+kw//2+1] = kernel 109 | kernel_pad = np.fft.ifftshift(kernel_pad) 110 | 111 | # Take the Fourier transform of the zero-padded kernel 112 | kernel_ft = np.fft.fft2(kernel_pad) 113 | 114 | # Take the inverse Fourier transform of the product of the FTs of the image 115 | # and kernel. You might discard the imaginary part 116 | result_fourier = np.real(np.fft.ifft2(img_ft * kernel_ft)) 117 | 118 | plt.figure(4) 119 | plt.imshow(result_fourier, cmap='gray') 120 | plt.colorbar() 121 | -------------------------------------------------------------------------------- /Exercise 2-Convolution And Filtering/ex2_laplace_filtering.py: -------------------------------------------------------------------------------- 1 | """ 2 | 26.04.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex2_laplace_filtering.py 7 | 8 | Laplace filter in frequency domain 9 | 10 | Your task in this exercise is to create your own implementation of a 11 | Laplace filter in Fourier space and apply it to an image. 12 | The formula for the Laplacian in the Fourier domain is: 13 | H(u,v) = -4*pi^2*(u^2+v^2) # source: (Gonzalez, chapter 4, p286) 14 | 15 | You need to replace the ??? in the code with the required commands 16 | """ 17 | 18 | import numpy as np 19 | import matplotlib.pyplot as plt 20 | venice = plt.imread('venice.jpg')/255 21 | # Load venice.jpg using imread, normalize it to (0, 1) 22 | # and take the red channel again 23 | img = venice[:,:,0] 24 | 25 | # Plot the image before applying the filter 26 | plt.figure(1) 27 | plt.imshow(img, cmap='gray') 28 | plt.colorbar() 29 | 30 | # Generate a coordinate systems with the discrete Fourier transform sample 31 | # frequencies v and u. You can use the numpy function linspace to do it 32 | # manually or fftfreq. Look up the documentation to get familiar with the 33 | # parameters of these functions. 34 | v = np.fft.fftfreq(img.shape[0]) 35 | u = np.fft.fftfreq(img.shape[1]) 36 | 37 | # the function np.meshgrid creates coordinate arrays for the v and the u 38 | # coordinates and writes them into vv and uu 39 | # you can display them with plt.figure(); plt.imshow(uu); colorbar() if you 40 | # want to have a look at them 41 | vv, uu = np.meshgrid(v, u, indexing='ij') 42 | 43 | # Caluclate the filter function H(v, u) 44 | # If you want to do this in one line use vv and uu, as they are both of the 45 | # image shape. The formula is given in the very top documentation of this 46 | # script. Check if H has the same shape as the image. 47 | H = -4*(np.pi)**2*(uu**2 + vv**2) 48 | 49 | # Calculate the Fourier transform of the image 50 | # You can use the numpy function fft2 included in np.fft 51 | img_ft = np.fft.fft2(img) 52 | 53 | # Multiply the Fourier transform of the image by the filter function 54 | # Take care (if neccessary) to center the potential function H around the top 55 | # left corner of the image, because a Fourier transform in python always has 56 | # the central frequencies in the top left corner. Therefore, play with the 57 | # function fftshift and ifftshift to see what it does. Check out the looks of 58 | # the shifted and unshifted potential function H. 59 | 60 | # Take the inverse Fourier transform of the product to get the filtered image 61 | # and select the real part of it, as we do not want to have the imaginary part 62 | # of real images. 63 | img_filtered = np.real(np.fft.ifft2(img_ft*H)) 64 | 65 | plt.figure(2) 66 | plt.imshow(img_filtered, cmap='gray') 67 | plt.colorbar() 68 | -------------------------------------------------------------------------------- /Exercise 2-Convolution And Filtering/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 2-Convolution And Filtering/tree.jpg -------------------------------------------------------------------------------- /Exercise 2-Convolution And Filtering/venice.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 2-Convolution And Filtering/venice.jpg -------------------------------------------------------------------------------- /Exercise 3-Interpolation/ex1_Interpolation.py: -------------------------------------------------------------------------------- 1 | """ 2 | 03.05.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_interpolation.py 7 | 8 | Using numpy, matplotlib and scipy 9 | 10 | The goal of this exercise is for you to try a simple image interpolation 11 | using interpolation kernels. For this you should first downsample an image 12 | and then try to increase its sampling again by using an appropriate 13 | interpolation method. 14 | 15 | You need to replace the ??? in the code with the required commands 16 | """ 17 | 18 | import numpy as np 19 | import matplotlib.pyplot as plt 20 | import scipy.ndimage as nd 21 | 22 | # read an image using matplotlib tools and normalize it to [0, 1] 23 | 24 | img = plt.imread('tree.jpg')[:, :, 0] / 255. 25 | sh = np.shape(img) 26 | 27 | # Subsample the original image by a certain factor by keeping only every k-th 28 | # Row and column. Hint: remember array indexing, a[i:j:k] 29 | img_sub = np.zeros(sh) 30 | factor = 5 31 | img_sub = img[::factor, ::factor] 32 | 33 | # OPTIONAL TASK: Rebin the old image via averaging. 34 | # Therefore, patches of 4 times 4 pixels are averaged and written into the 35 | # according position of the subarray. You can do this explicitely or 36 | # by reshaping the 2d-array into a 4d array, where the two new axes contain 37 | # the values that are to be averaged 38 | 39 | img_sub = np.mean(np.reshape(img, (sh[0]//factor, factor, sh[1]//factor, 40 | factor)), axis=(1,3)) 41 | 42 | # prepare the subsampled image for interpolation by inserting zeros between all 43 | # pixel values in img_sub. img_up should be the same size as the original (img) 44 | # To fill the upscaled image with a sparse matrix, remember stepping in slicing 45 | 46 | img_up = np.zeros(sh) 47 | img_up[factor//2::factor, factor//2::factor] = img_sub 48 | # Define the interpolation kernel for nearest neighbour interpolation 49 | # Hint: the pixels are separated by 5 distance units, 50 | # So how wide must the kernel be? 51 | 52 | kernel_nearest = np.ones((factor, factor)) 53 | 54 | # Perform nearest-neighbor interpolate using either convolution (easier) or fft 55 | # You can use nd.convolve for the convolution with mode='wrap' 56 | 57 | img_nearest = nd.convolve(img_up, kernel_nearest, mode = 'wrap') 58 | 59 | # define the interpolation kernel for linear interpolation 60 | # Hint: the linear kernel can be obtained by a convolution 61 | # of two rectangular kernels centered in a larger kernel 62 | # Make sure, that the kernel is wide enough 63 | 64 | kernel_rect = np.zeros((2*factor - factor % 2, 2*factor - factor % 2)) 65 | kernel_rect[factor//2:3*factor//2, factor//2:3*factor//2] = 1 66 | kernel_linear = nd.convolve(kernel_rect, kernel_rect) 67 | kernel_linear /= factor**2 # normalization 68 | 69 | # Perform linear interpolation using either convolution (easier) or fft 70 | # Check if the images are normalized correctly and have a look if the filtered 71 | # and unfiltered images are correctly aligned 72 | 73 | img_linear = nd.convolve(img_up, kernel_linear, mode='wrap') 74 | 75 | # Perform sinc interpolation using the convolution theorem and fft 76 | # Hint: the sinc kernel is easier to define in Fourier domain: 77 | # In Fourier domain, the sinc is given by a rectangular function. 78 | # Its width is given by the width of the subsampled image, sh/factor/2. 79 | 80 | kernel_sinc = np.zeros(sh) 81 | w = sh[0]//2//factor 82 | kernel_sinc[sh[0]//2-w:sh[0]//2+w, sh[1]//2-w:sh[1]//2+w] = 1 83 | kernel_sinc = np.fft.ifftshift(kernel_sinc) 84 | img_sinc = np.real(np.fft.ifft2(np.fft.fft2(img_up) * kernel_sinc)) 85 | 86 | # Plot results 87 | 88 | plt.figure(1) 89 | plt.subplot(2, 3, 1) 90 | plt.title('original') 91 | plt.imshow(img, cmap='gray', interpolation='none') 92 | plt.subplot(2, 3, 2) 93 | plt.imshow(img_sub, cmap='gray', interpolation='none') 94 | plt.title('downsampled') 95 | plt.subplot(2, 3, 3) 96 | plt.imshow(img_up, cmap='gray', interpolation='none') 97 | plt.title('upsampled again') 98 | plt.subplot(2, 3, 4) 99 | plt.imshow(img_nearest, cmap='gray', interpolation='none') 100 | plt.title('nearest interpolated') 101 | plt.subplot(2, 3, 5) 102 | plt.imshow(img_linear, cmap='gray', interpolation='none') 103 | plt.title('linear interpolated') 104 | plt.subplot(2, 3, 6) 105 | plt.imshow(img_sinc, cmap='gray', interpolation='none') 106 | plt.title('sinc interpolated') 107 | -------------------------------------------------------------------------------- /Exercise 3-Interpolation/ex2_spline_order.py: -------------------------------------------------------------------------------- 1 | """ 2 | 03.05.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex2_spline_order.py 7 | 8 | Using numpy, matplotlib and scipy 9 | 10 | The goal of this exercise is to get an idea of the frequency performance of 11 | different interpolation algorithms. For simplicity, we will use the 12 | scipy.ndimage.rotate function to rotate an image multiple times. The 13 | function uses a special case of affine transform, where the re-gridding and 14 | interpolation step is performed automatically. 15 | The image should be rotated multiple times over 360 degrees. At each 16 | rotation step an interpolation is necessary, and the cumulative effect of these 17 | interpolations deteriorates the final result. 18 | 19 | You need to replace the ??? in the code with the required commands 20 | """ 21 | 22 | import numpy as np 23 | import matplotlib.pyplot as plt 24 | import scipy.ndimage as nd 25 | 26 | # read an image using our matplotlib tools 27 | 28 | img = plt.imread('tree.jpg') / 255. 29 | img = np.mean(img, axis=2) 30 | sh = np.shape(img) 31 | 32 | # Define a full rotation, split into N seperate rotations 33 | 34 | step = 1 35 | Nsteps = 20 36 | angle = 360 / Nsteps 37 | 38 | # Crop the image to speed up the program 39 | # eg 300x300 pixels with the tree in the centre 40 | 41 | img_cropped = img[300:600, 300:600] 42 | 43 | # In all cases start with the cropped image 44 | 45 | img_order0 = img_cropped 46 | img_order1 = img_cropped 47 | img_order2 = img_cropped 48 | img_order3 = img_cropped 49 | img_order5 = img_cropped 50 | 51 | # Creates a figure instance that will be updated in the loop. Change therefore 52 | # the Graphics backend of the Ipython console in the preferences of Spyder from 53 | # inline to automatic. For those who use ipython directly in the console, use 54 | # the matplotlib command plt.ion() before plotting the figure or start ipython 55 | # with an additional flag "python --pylab" for interactive plotting. 56 | 57 | plt.figure(1) 58 | 59 | while step <= Nsteps: 60 | 61 | # If you are using python 3, add parentheses around the arguments 62 | 63 | print('rotation No ' + str(step) + ' angle ' + str(step * angle)) 64 | 65 | # Use ndi.rotate to rotate the image. Interpolation is done using splines 66 | # of certain order which can be passed as a variable. Please use order 67 | # 0 (nearest neighbor), 1 (bilinear), 2 (biquadratic), 3 (bicubic), and 5 68 | # also use the option reshape=False 69 | 70 | img_order0 = nd.interpolation.rotate(img_order0, angle=angle, order=0, reshape = False) 71 | img_order1 = nd.interpolation.rotate(img_order1, angle=angle, order=1, reshape = False) 72 | img_order2 = nd.interpolation.rotate(img_order2, angle=angle, order=2, reshape = False) 73 | img_order3 = nd.interpolation.rotate(img_order3, angle=angle, order=3, reshape = False) 74 | img_order5 = nd.interpolation.rotate(img_order5, angle=angle, order=5, reshape = False) 75 | 76 | # Plot the resulting images at the current step 77 | 78 | plt.subplot(231) 79 | plt.imshow(img_order0, cmap='gray', interpolation='none') 80 | plt.title('nearest neighbour') 81 | plt.subplot(232) 82 | plt.imshow(img_order1, cmap='gray', interpolation='none') 83 | plt.title('bilinear') 84 | plt.subplot(233) 85 | plt.imshow(img_order2, cmap='gray', interpolation='none') 86 | plt.title('biquadratic') 87 | plt.subplot(234) 88 | plt.imshow(img_order3, cmap='gray', interpolation='none') 89 | plt.title('bicubic') 90 | plt.subplot(235) 91 | plt.imshow(img_order5, cmap='gray', interpolation='none') 92 | plt.title('5th order') 93 | 94 | # You can use the function plt.pause to update your figure 95 | # During the calculation 96 | 97 | plt.pause(.1) 98 | 99 | # Increment the counter 100 | 101 | step = step+1 102 | 103 | # Plot final results 104 | 105 | plt.subplot(236) 106 | plt.imshow(img_cropped, cmap='gray', interpolation='none') 107 | plt.title('original') 108 | -------------------------------------------------------------------------------- /Exercise 3-Interpolation/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 3-Interpolation/tree.jpg -------------------------------------------------------------------------------- /Exercise 4-Segmentation/ex1_segmentation.py: -------------------------------------------------------------------------------- 1 | """ 2 | 17.05.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_segmentation.py 7 | 8 | This exercise is all about counting stars. 9 | The goal is to know how many stars are in the image and what sizes they are. 10 | 11 | As per usual, replace the ???s with the appropriate command(s). 12 | """ 13 | 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | import scipy.ndimage as nd 17 | 18 | # Load the respective image 19 | 20 | img = plt.imread('stars.jpg') 21 | 22 | # Sum up all color channels to get a grayscale image. 23 | # use numpy function sum and sum along axis 2, be careful with the datatypes 24 | # rescale the finale image to [0.0, 1.0] 25 | 26 | img = img.sum(axis = 2)/255.0 27 | 28 | # Now look at your image using imshow. Use vmin and vmax parameters in imshow 29 | 30 | plt.figure(1) 31 | plt.title('img') 32 | plt.imshow(img, cmap='gray', interpolation='none', vmin=0.0, vmax=1.0) 33 | plt.colorbar() 34 | 35 | # You can set thresholds to cut the background noise 36 | # Once you are sure you have all stars included use a binary threshold. 37 | # (Tip: a threshold of 0.1 seemed to be good, but pick your own) 38 | 39 | threshold = 0.15 40 | img_bin = img > threshold 41 | 42 | plt.figure(2) 43 | plt.title('img_bin') 44 | plt.imshow(img_bin, cmap='gray', interpolation='none') 45 | 46 | # Now with the binary image use the opening and closing to bring the star 47 | # to compacter format. Take care that no star connects to another 48 | 49 | s1 = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) 50 | img_bin1 = nd.binary_closing(img_bin, structure=s1) 51 | 52 | plt.figure(3) 53 | plt.title('img_bin1') 54 | plt.imshow(img_bin1, cmap='gray', interpolation='none') 55 | 56 | # Remove isolated pixels around the moon with closing by a 2 pixel structure 57 | 58 | s2 = np.array([[0, 0, 0], [0, 1, 1], [0, 0, 0]]) 59 | img_bin2 = nd.binary_opening(img_bin1, structure=s2) 60 | 61 | plt.figure(4) 62 | plt.title('img_bin2') 63 | plt.imshow(img_bin2, cmap='gray', interpolation='none') 64 | 65 | # play with all the morphological options in ndimage package to increase the 66 | # quality if still needed 67 | 68 | #s3 = np.array([[0, 0, 0], [0, 1, 1], [0, 0, 0]]) 69 | #img_bin3 = nd.binary_dilation(img_bin2, structure=s3) # optional 70 | img_bin3 = img_bin2 71 | plt.figure(5) 72 | plt.title('img_bin3') 73 | plt.imshow(img_bin3, cmap='gray', interpolation='none') 74 | 75 | # plotting the sum of all your binary images can help identify if you loose 76 | # stars. In principal every star is present in every binary image, so real 77 | # stars have always at least one pixel maximum 78 | 79 | plt.figure(6) 80 | plt.imshow(img_bin.astype(int) + img_bin1.astype(int) + img_bin2.astype(int) + 81 | img_bin3.astype(int), cmap='jet', interpolation='none') 82 | plt.colorbar() 83 | 84 | # Once you're done, label your image with nd.label 85 | 86 | img_lbld, num_stars = nd.label(img_bin3) 87 | 88 | plt.figure(7) 89 | plt.imshow(img_lbld, cmap='jet', interpolation='none') 90 | plt.colorbar() 91 | 92 | # Use nd.find_objects to return a list of slices through the image for each 93 | # star 94 | 95 | slice_list = nd.find_objects(img_lbld) 96 | 97 | # You can have a look now at the individual stars. Just apply the slice to your 98 | # labelled array 99 | 100 | starnum = 150 101 | 102 | plt.figure(8) 103 | plt.title("star %i" % starnum) 104 | plt.imshow(img_lbld[slice_list[starnum-1]], cmap='gray', interpolation='none') 105 | 106 | # Remaining task: Sum up each individual star to get a list of star sizes and 107 | # make a detailed histogram (>100 bins). Take care to exclude the moon! This 108 | # can be done by sorting the star sizes list and removing the last element 109 | 110 | # Remember: img_lbld[slice_list[]] selects one star. Create a list of 111 | # boolian star images (star_list). Afterwards, sum their extent up (take 112 | # care about the datatypes) to get their sizes and sort the list 113 | 114 | star_list = [img_lbld[slc] > 0 for slc in slice_list] 115 | mass_list = [np.sum(star) for star in star_list] 116 | mass_list_sorted = np.sort(mass_list) 117 | mass_list_sorted = mass_list_sorted[0:len (mass_list_sorted) - 2] 118 | 119 | plt.figure(9) 120 | plt.title("sizes of stars") 121 | plt.hist(mass_list_sorted, bins = 200, range = (0, 200), align = 'left') 122 | -------------------------------------------------------------------------------- /Exercise 4-Segmentation/stars.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 4-Segmentation/stars.jpg -------------------------------------------------------------------------------- /Exercise 5-Wave Propagation/ex1_fesnel_propagation.py: -------------------------------------------------------------------------------- 1 | """ 2 | 24.05.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_fresnel_propagation.py 7 | 8 | Using numpy, matplotlib, scipy 9 | 10 | Script to perform Fresnel (near-field) wavefront propagation. 11 | Check your figures against the lecture notes. 12 | You need to replace the ??? in the code with the required commands. 13 | """ 14 | 15 | import numpy as np 16 | import matplotlib.pyplot as plt 17 | 18 | # Parameters 19 | 20 | psize = 1e-5 # Detector pixelsize 21 | wlen = 6e-7 # Wavelength (600nm = visible light) 22 | prop_dist = 3e-3 # Propagation distance 23 | 24 | # Read in test wavefield from image 25 | 26 | img = plt.imread('tum.png') 27 | 28 | # Sum up all channels 29 | 30 | img = np.sum(img, axis=-1, dtype=float) 31 | 32 | # Scale such that max value is 1 33 | 34 | img = img / img.max() 35 | 36 | # Generate a pure phase wavefield spanning from zero to np.pi 37 | # from img 38 | 39 | w = np.exp(1j * np.pi * img) #it's a pure phase wavefield so amplitude is 1 40 | 41 | plt.figure(1) 42 | plt.imshow(np.angle(w), cmap='jet', interpolation='none') 43 | plt.title('Wavefront phase') 44 | plt.colorbar() 45 | 46 | # Generate the grids 47 | 48 | u = 2. * np.pi * np.fft.fftfreq(img.shape[1], psize) 49 | v = 2. * np.pi * np.fft.fftfreq(img.shape[0], psize) 50 | 51 | uu, vv = np.meshgrid(u, v, indexing='xy') 52 | 53 | # Generate wave number 54 | 55 | k = 2 * np.pi/wlen 56 | 57 | # Generate the kernel 58 | 59 | kernel = np.exp(-.5j * (prop_dist/k) * (uu**2 + vv**2)) 60 | 61 | # Generate the propagated wave array 62 | 63 | out = np.fft.ifft2(np.fft.fft2(w) * kernel) 64 | 65 | # Plot the phase of the kernel, maybe the function np.angle helps ;) 66 | 67 | plt.figure(2) 68 | plt.imshow(np.fft.fftshift(np.angle(kernel)), cmap = 'jet', interpolation='none') 69 | plt.title('Fresnel kernel') 70 | plt.colorbar() 71 | 72 | # Calculate the intensity from the propagated wave array 73 | 74 | I = np.abs(out)**2 75 | 76 | # Plot the propagated intensity (with zoom to centre of image) 77 | 78 | plt.figure(3) 79 | plt.imshow(I[int(img.shape[0]/2-256):int(img.shape[0]/2+256), 80 | int(img.shape[1]/2-256):int(img.shape[1]/2+256)], 81 | cmap='gray', interpolation='none') 82 | plt.title('Intensity') 83 | plt.colorbar() 84 | -------------------------------------------------------------------------------- /Exercise 5-Wave Propagation/ex2_fraunhofer_propagation.py: -------------------------------------------------------------------------------- 1 | """ 2 | 24.05.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_fraunhofer_propagation.py 7 | 8 | Using numpy, matplotlib 9 | 10 | Script to generate a speckle pattern from an atmospheric phase screen 11 | using Fraunhofer propagation. 12 | You need to replace the ??? in the code with the required commands. 13 | """ 14 | 15 | import numpy as np 16 | import matplotlib.pylab as plt 17 | 18 | N = 1024 # Square dimension of phase screen 19 | 20 | radius = 128 # Radius of the circular aperture in pixels 21 | 22 | # Generate an NxN array of zeros for the aperture 23 | 24 | aperture = np.zeros([N, N]) 25 | 26 | # Calculate the aperture magnitude: 1's of radius given above centered 27 | # in the NxN array of zeros 28 | # Functions of interest include np.meshgrid, np.linspace, range ... 29 | # Hint eq of circle x^2 + y^2 = r^2 30 | xx, yy =np.meshgrid(np.linspace(-N/2, N/2, N), np.linspace(-N/2, N/2, N)) 31 | circle = xx**2 + yy**2 32 | aperture[circle < radius**2] = 1 33 | 34 | # Plot your aperture function 35 | 36 | plt.figure(1) 37 | plt.imshow(aperture, cmap='gray', interpolation='none') 38 | plt.colorbar() 39 | 40 | # Load in the wavefront phase screen and plot it. 41 | 42 | screen = np.loadtxt('wavefront.txt') 43 | plt.figure(2) 44 | plt.imshow(screen, cmap='jet', interpolation='none') 45 | plt.colorbar() 46 | 47 | # Propagate the phase screen from the aperture to the focal plane using 48 | # Fraunhofer propagation. 49 | # Hints - aperture is the magnitude, and screen is the phase 50 | # Fraunhofer propagation - wave at focal plane is FT of wave at aperture plane 51 | # You may need to use an fftshift here! 52 | # Intensity is the absolute value of field at the focal plane squared 53 | 54 | speckle = np.abs(np.fft.fftshift(np.fft.fft2(aperture * np.exp(screen * -1j))))**2 55 | 56 | # Plot the speckle image (zoomed in to show the centre) 57 | 58 | plt.figure(3) 59 | plt.imshow(speckle[int(N/2-64):int(N/2+64), int(N/2-64):int(N/2+64)], 60 | cmap='jet', aspect='auto', interpolation='none') 61 | plt.colorbar() 62 | plt.title('Intensity') 63 | -------------------------------------------------------------------------------- /Exercise 5-Wave Propagation/tum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 5-Wave Propagation/tum.png -------------------------------------------------------------------------------- /Exercise 6-Phase Retrieval/ex1_paganin_phase_retrieval.py: -------------------------------------------------------------------------------- 1 | """ 2 | 07.06.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_paganin_phase_retrieval.py 7 | 8 | Script recover the projected thickness of a teflon plate quantitatively from 9 | its intensity measurement in the near-field. 10 | 11 | As usual replace the ???s with the appropriate command(s). 12 | """ 13 | 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | 17 | # Load projection data from file. It is a 250 micro thick Teflon plate. Note 18 | # that this data is already flatfield-corrected! 19 | 20 | proj = np.load('proj.npy') 21 | 22 | # Look at the data. You can see the edge-enhanced borders at the transition 23 | # from Teflon to air. In addition, the absorbing properties of the Teflon plate 24 | # are visible. Note that the background-values are around 1. 25 | 26 | plt.figure() 27 | plt.title('intensity') 28 | plt.imshow(proj, cmap='gray', interpolation='none') 29 | plt.colorbar() 30 | 31 | # Also plot a line profile through the middle row. 32 | 33 | plt.figure() 34 | plt.plot(proj[proj.shape[0]//2][:]) 35 | 36 | # The parameters of the setup that influence the image formation process are 37 | # specified below. 38 | 39 | pixel_size = .964e-6 40 | distance = 8.57e-3 41 | 42 | # As Paganin assumes a single material which has to be know beforehand, we look 43 | # up the absorption index and the decrement of the real part of the complex 44 | # refractive index in some database for the given energy. I do that for you. 45 | 46 | mu = 691. 47 | delta = 2.6e-6 48 | 49 | # I help you with creating the frequencies that correspond to the different 50 | # parts of the Fourier image according to our convention. 51 | 52 | v = 2. * np.pi * np.fft.fftfreq(proj.shape[0], d=pixel_size) 53 | u = 2. * np.pi * np.fft.fftfreq(proj.shape[1], d=pixel_size) 54 | ky, kx = np.meshgrid(v, u, indexing='ij') 55 | 56 | # Build the Paganin kernel. Its representation was discussed in the lecture. 57 | 58 | Paganin = (1/(distance*(delta/mu)*(kx**2 + ky**2) + 1)) 59 | 60 | # Recover the thickness from the projection by applying the Paganin kernel onto 61 | # the intensity measurement. 62 | 63 | trace = np.multiply(np.divide(-1, mu), np.log(np.fft.ifft2(np.multiply(Paganin, np.fft.fft2(proj))))) 64 | 65 | # Plot the recovered thickness of the sample in microns. Also plot a line 66 | # through the center row of the trace. Check if the retrieved thickness matches 67 | # the stated thickness in the beginning of our exercise. 68 | 69 | plt.figure() 70 | plt.title('trace') 71 | plt.imshow(np.real(trace), cmap='gray', interpolation='none') 72 | plt.colorbar() 73 | 74 | plt.figure() 75 | plt.plot((trace[trace.shape[0]//2][:])*np.power(10, 6)) 76 | plt.ylabel('thickness in microns') 77 | -------------------------------------------------------------------------------- /Exercise 6-Phase Retrieval/ex2_iterative_phase_retrieval.py: -------------------------------------------------------------------------------- 1 | """ 2 | 07.06.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex2_iterative_phase_retrieval.py 7 | 8 | Using numpy, matplotlib, scipy 9 | 10 | Script to perform iterative phase retrieval to recover the phase at the 11 | aperture plane from the intensity at the focal plane and the known support 12 | magnitude in the aperture plane. 13 | 14 | The chosen phase screen is simply a sum of tip and tilt modes, to ensure fast 15 | convergence and no problems with phase wrapping, twin images or piston offset. 16 | 17 | As per usual, replace the ???s with the appropriate command(s). 18 | """ 19 | import numpy as np 20 | import matplotlib.pylab as plt 21 | 22 | N = 512 # Square dimension of phase screen 23 | 24 | # Calculate the support constraint (magnitude at the aperture plane) 25 | # Generate a round mask and make sure that the radius is 128 pixels. 26 | uu, vv = np.meshgrid(np.linspace(-N//2, N//2, N), np.linspace(-N//2, N//2, N)) 27 | aperture = (uu**2 + vv**2) < 128**2 28 | 29 | # Plot your aperture function 30 | plt.figure(1) 31 | plt.imshow(aperture, cmap='gray') 32 | plt.title('Support constraint') 33 | 34 | # Generate the tip & tilt zernikes 35 | x = range(N) - N/2*np.ones(N) + 0.5 36 | y = range(N) - N/2*np.ones(N) + 0.5 37 | xx, yy = np.meshgrid(x,y) 38 | tip = xx / np.max(xx) 39 | tip = tip * aperture 40 | tilt = yy / np.max(yy) 41 | tilt = tilt * aperture 42 | 43 | # set the phase screen as a combination of tip and tilt 44 | screen = tip*4. + tilt*3. 45 | 46 | plt.figure(2) 47 | plt.imshow(screen * aperture, cmap='jet') 48 | plt.colorbar() 49 | plt.title('Aperture phase') 50 | 51 | # Propagate the phase screen from the aperture to the focal plane using 52 | # Fraunhofer propagation. 53 | # Hints - aperture is the magnitude, and screen is the phase 54 | # You may need to use a fftshift here 55 | # Intensity is the absolute value of field at the focal plane squared 56 | speckle = np.abs(np.fft.fftshift(np.fft.fft2(aperture * np.exp(screen * -1j))))**2 57 | 58 | # Plot the speckle image (zoomed in to show the centre) 59 | plt.figure(3) 60 | plt.imshow(speckle[N//2-32:N//2+32,N//2-32:N//2+32], aspect='auto', 61 | extent=(N//2-32,N//2+32,N//2-32,N//2+32), interpolation='none', cmap='gray') 62 | plt.colorbar() 63 | plt.title('Intensity') 64 | 65 | nloops = 50 # Number of loops (iterations) to run the phase retrieval 66 | # If your code doesn't converge in <50, there is something wrong! 67 | 68 | # Calculate the magnitude at the focal plane as a function of the intensity 69 | focal_magnitude = np.sqrt(speckle) 70 | 71 | # Initial guess for the focal plane 72 | focal_plane = focal_magnitude * np.exp(1j*np.zeros((N, N))) 73 | 74 | # Create empty arrays to store the values for the errors and the strehl 75 | errors_aperture = np.zeros(nloops) 76 | errors_focal = np.zeros(nloops) 77 | 78 | for loop in np.arange(nloops): 79 | 80 | print(loop) 81 | 82 | # calculate the field at the aperture from the focal plane 83 | # using Fraunhofer (ifft2). May need an ifftshift here! 84 | aperture_plane = np.fft.ifft2(np.fft.ifftshift(focal_plane)) 85 | 86 | # Enforce the support constraint in the aperture plane 87 | # ie zero all the points outside the known extent of the aperture 88 | aperture_plane = aperture_plane*aperture 89 | 90 | # calculate the error in the apeture plane as the 91 | # difference between the amplitudes within the aperture 92 | errors_aperture[loop] = np.sum((np.abs(aperture_plane)-aperture)**2) 93 | 94 | # calculate the field at the focal plane from the aperture plane 95 | # using Fraunhofer (fft2). May need an fftshift here! 96 | focal_plane = np.fft.fftshift(np.fft.fft2(aperture_plane)) 97 | 98 | # calculate the error in the focal plane as the 99 | # difference between the estimated magnitude and known magnitude 100 | errors_focal[loop] = np.sum((np.abs(focal_plane)-focal_magnitude)**2) 101 | 102 | # enforce the magnitude constraint at the focal plane 103 | focal_plane = focal_magnitude * np.exp(1j*np.angle(focal_plane)) 104 | 105 | 106 | # Plot the figures - compare with the lecture slides 107 | plt.figure(4) 108 | plt.imshow(np.angle(aperture_plane) * aperture, cmap='jet') 109 | plt.title('Phase aperture plane') 110 | plt.colorbar() 111 | 112 | plt.figure(5) 113 | plt.imshow(np.abs(aperture_plane) * aperture, cmap='gray') 114 | plt.title('Magnitude aperture plane') 115 | plt.colorbar() 116 | 117 | plt.figure(6) 118 | plt.imshow(np.angle(focal_plane), cmap='jet') 119 | plt.title('Phase focal plane') 120 | plt.colorbar() 121 | 122 | plt.figure(7) 123 | plt.imshow(np.abs(focal_plane)[N//2-32:N//2+32,N//2-32:N//2+32], aspect='auto', 124 | extent=(N//2-32,N//2+32,N//2-32,N//2+32), interpolation='none', cmap='gray') 125 | plt.title('Magnitude focal plane') 126 | plt.colorbar() 127 | 128 | plt.figure(8) 129 | plt.plot(np.log(errors_aperture)) 130 | plt.xlabel('Iteration') 131 | plt.ylabel('Log Error') 132 | plt.title('Error reduction - Aperture plane') 133 | 134 | plt.figure(9) 135 | plt.plot(np.log(errors_focal)) 136 | plt.xlabel('Iteration') 137 | plt.ylabel('Log Error') 138 | plt.title('Error reduction - Focal plane') 139 | -------------------------------------------------------------------------------- /Exercise 6-Phase Retrieval/proj.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 6-Phase Retrieval/proj.npy -------------------------------------------------------------------------------- /Exercise 7-Resolution and Noise/2018_06_07_Resolution_and_Noise_filled.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 7-Resolution and Noise/2018_06_07_Resolution_and_Noise_filled.pdf -------------------------------------------------------------------------------- /Exercise 7-Resolution and Noise/ex1_correlation.py: -------------------------------------------------------------------------------- 1 | """ 2 | 14.06.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_correlation.py 7 | 8 | Using numpy, matplotlib, scipy 9 | 10 | The goal of this exercise is to get acquainted with noise, and the related 11 | concepts of noise power spectra and correlation. 12 | The exercise is split into two short subtasks. 13 | You need to replace the ??? in the code with the required commands. 14 | """ 15 | 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | import scipy.ndimage as nd 19 | #from scipy.signal.signaltools import correlate2d 20 | from skimage.feature import register_translation 21 | 22 | # Part A: noise and correlation 23 | # create noise, and calculate its noise power spectrum and correlation 24 | 25 | # Create a 100x100 array of Gaussian noise with mean=0 and standard deviation 26 | # sigma=1. Use the function numpy.rand.randn. 27 | # Then use scipy.ndimage.gaussian_filter to create a low and high pass 28 | # filtered version of your noise. (Remember from lecture 2: a high pass can be 29 | # modelled as the original image minus the low pass image) 30 | 31 | white_noise = np.random.randn(100, 100) 32 | low_pass = nd.gaussian_filter(white_noise, 1) 33 | high_pass = white_noise - low_pass 34 | 35 | # Calculate and plot the noise power spectra of each noise signal. 36 | 37 | nps_white = np.abs(np.fft.fft2(white_noise))**2 38 | nps_low = np.abs(np.fft.fft2(low_pass))**2 39 | nps_high = np.abs(np.fft.fft2(high_pass))**2 40 | 41 | # Calculate and plot the auto-correlation of each noise signal using the 42 | # correlation theorem. Center the maximum cross-correlation in the middle of 43 | # the image, as already shown in the lecture for white noise. 44 | 45 | corr_white = np.fft.fftshift(np.fft.ifft2(nps_white)).real 46 | corr_low = np.fft.fftshift(np.fft.ifft2(nps_low)).real 47 | corr_high = np.fft.fftshift(np.fft.ifft2(nps_high)).real 48 | 49 | # The autocorrelation tells you the correlation between two pixels as a 50 | # function of the distance between those pixels. For white noise, the 51 | # correlation is only nonzero if the distance is zero, else there is no 52 | # correlation between pixels. 53 | # For the low-pass noise, there is a correlation in the neighborhood of each 54 | # pixel due to the "patchy" character of the noise. The correlation falls off 55 | # quickly with increasing distance. The high-pass noise exhibits anti- 56 | # correlation in its immediate neighbourhood,i.e. you KNOW that a pixel will 57 | # be different from its neighbor due to the "fast" changes in the noise. 58 | 59 | # Part B: Image shift using cross-correlation 60 | # Use cross-correlation for a simple image registration task 61 | 62 | # Read in the two images worldA and worldB. Both images show the same object, 63 | # but shifted by a small amount relative to each other. The task is to 64 | # estimate the shift using cross-correlation. 65 | 66 | im_shifted1 = plt.imread('worldA.jpg') / 255. 67 | im_shifted2 = plt.imread('worldB.jpg') / 255. 68 | im_shifted1 = im_shifted1.mean(axis=2) 69 | im_shifted2 = im_shifted2.mean(axis=2) 70 | 71 | # Calculate the cross-correlation between the two images by using the 72 | # correlation theorem again. 73 | 74 | #ccorr = correlate2d(im_shifted1, im_shifted2, boundary = 'symm', mode = 'same') '''too much time consuming''' 75 | 76 | ccorr = np.fft.fftshift(np.abs(np.fft.ifft2(np.fft.fft2(im_shifted1) *\ 77 | np.conj(np.fft.fft2(im_shifted2))))) 78 | 79 | # Calculate the shift as a vector tuple. (you might want to use numpy.argmax 80 | # and np.unravel_index, or numpy.where) 81 | print("Register_translation function gives the relative translation of two images\ 82 | since you suggested the function to use, so I am using both of them to compute\ 83 | give the shift, for np.unravel_index we have to subtract shift_y by it's \ 84 | respective dimenion to determine the displacement") 85 | shift, error, phase = register_translation(im_shifted1, im_shifted2) 86 | shift_y, shift_x = shift 87 | print("Calculatig Shift Using register_translation") 88 | print(shift_y, shift_x) 89 | 90 | print("Calculatig Shift Using np.unravel_index") 91 | shift_y, shift_x = np.unravel_index(np.argmax(ccorr), ccorr.shape) 92 | # Print to screen the shifts 93 | 94 | print(shift_y - im_shifted1.shape[0], shift_x) 95 | 96 | # The crosscorrelation will be highest if the shift from the correlation 97 | # equals the shift between the two images, so you have to search for the 98 | # coordinate of the maximum cross-correlation relative to the origin. 99 | 100 | # Plot results 101 | 102 | # Part A 103 | 104 | plt.figure(1, figsize=(15, 5)) 105 | plt.subplot(1, 3, 1) 106 | plt.imshow(white_noise, cmap='gray', interpolation='none') 107 | plt.title('white noise spatial domain') 108 | plt.subplot(1, 3, 2) 109 | plt.imshow(low_pass, cmap='gray', interpolation='none') 110 | plt.title('low pass spatial domain') 111 | plt.subplot(1, 3, 3) 112 | plt.imshow(high_pass, cmap='gray', interpolation='none') 113 | plt.title('high pass spatial domain') 114 | 115 | plt.figure(2, figsize=(15, 5)) 116 | plt.subplot(1, 3, 1) 117 | plt.imshow(nps_white, cmap='gray', interpolation='none') 118 | plt.title('white noise power spectrum') 119 | plt.subplot(1, 3, 2) 120 | plt.imshow(nps_low, cmap='gray', interpolation='none') 121 | plt.title('low pass power spectrum') 122 | plt.subplot(1, 3, 3) 123 | plt.imshow(nps_high, cmap='gray', interpolation='none') 124 | plt.title('high pass power spectrum') 125 | 126 | plt.figure(3, figsize=(15, 5)) 127 | plt.subplot(1, 3, 1) 128 | plt.imshow(corr_white, cmap='gray', interpolation='none') 129 | plt.title('white noise autocorrelation') 130 | plt.subplot(1, 3, 2) 131 | plt.imshow(corr_low, cmap='gray', interpolation='none') 132 | plt.title('low pass noise autocorrelation') 133 | plt.subplot(1, 3, 3) 134 | plt.imshow(corr_high, cmap='gray', interpolation='none') 135 | plt.title('high pass noise autocorrelation') 136 | 137 | # Part B 138 | 139 | plt.figure(4, figsize=(15, 5)) 140 | plt.subplot(1, 3, 1) 141 | plt.imshow(im_shifted1, cmap='gray', interpolation='none') 142 | plt.title('image1') 143 | plt.subplot(1, 3, 2) 144 | plt.imshow(im_shifted2, cmap='gray', interpolation='none') 145 | plt.title('image2') 146 | plt.subplot(1, 3, 3) 147 | plt.imshow(ccorr, cmap='gray', interpolation='none') 148 | plt.title('crosscorrelation') 149 | -------------------------------------------------------------------------------- /Exercise 7-Resolution and Noise/ex2_deconvolution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 14.06.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_deconvolution.py 7 | 8 | Using numpy, matplotlib, scipy 9 | 10 | The goal of this exercise is to try out some deconvolution tasks in the 11 | presence of noise. First the "naive" deconvolution, then using the Wiener 12 | filter. 13 | You need to replace the ??? in the code with the required commands. 14 | """ 15 | 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | import scipy.ndimage as nd 19 | 20 | # load tree image 21 | 22 | img = plt.imread('tree.jpg') / 255. 23 | img = img.mean(axis=2) 24 | sh = img.shape 25 | 26 | # Let's create a convolution kernel (PSF) and produce the convolved image. We 27 | # want the convolved image to suffer from motion blur in the direction of the 28 | # diagonal. The function np.diag creates an appropriate convolution kernel. 29 | # The kernel should be 51x51 pixels with 1 on the diagonal, 0 otherwise, and 30 | # then normalized so that the sum of the diagonal is 1. 31 | 32 | M = 51 33 | psf = np.diag(np.ones(M))/M 34 | img_conv = nd.convolve(img, psf, mode='wrap') 35 | 36 | # Add zero-mean Gaussian noise with a standard deviation sigma to the image 37 | # Hint: look at np.random.randn 38 | 39 | sigma = .01 40 | noise = sigma*np.random.randn(sh[0], sh[0]) 41 | img_noisy = img_conv + noise 42 | 43 | # In order to use Fourier space deconvolution we need to zeropad our 44 | # convolution kernel to the same size as the original image 45 | 46 | psf_pad = np.zeros_like(img) 47 | psf_pad[sh[0]//2-M//2:sh[0]//2+M//2+1, sh[1]//2-M//2:sh[1]//2+M//2+1] = psf 48 | 49 | # Now we'll try out "naive" deconvolution by dividing the noisy, blurred image 50 | # by the filter function in the Fourier domain 51 | 52 | img_deconv = np.real(np.fft.fftshift(np.fft.ifft2(np.fft.fft2(img_noisy)/np.fft.fft2(psf_pad)))) 53 | 54 | #img_deconv = np.fft.ifft2((np.fft.fft2(img_noisy)/np.fft.fft2(psf_pad)) 55 | # As soon as you add a little noise to the image, the naive deconvolution will 56 | # go wrong, since for white noise, the noise power will exceed the signal 57 | # power for high frequencies. Since the inverse filtering also enhances the 58 | # power frequncies the result will be nonsense 59 | 60 | # Let's first define the Wiener deconvolution in a seperate function. 61 | 62 | 63 | def wiener_deconv(img, psf, nps): 64 | """ 65 | This function performs an image deconvolution using a Wiener filter. 66 | 67 | Parameters 68 | ---------- 69 | img : ndarray 70 | convolved image 71 | psf : ndarray 72 | the convolution kernel 73 | nps : float or ndarray 74 | noise power spectrum of the image, you will have to choose an 75 | appropriate value 76 | 77 | Returns 78 | ------- 79 | deconvolved_image : ndarray 80 | The deconvolved image or volume depending on the input image. 81 | 82 | Notes 83 | ----- 84 | If a float is given as nps, it assumes intrinsically white noise. 85 | A nps of 0 corresponds to no noise, and therefore naive image 86 | deconvolution. 87 | """ 88 | # Apart from the noise power spectrum (nps), which is passed as a 89 | # parameter, you'll also need the frequency representation of your psf, 90 | # the power spectrum of the filter and the signal power spectrum (sps). 91 | # Calculate them. 92 | 93 | f_psf = np.fft.fft2(psf) 94 | sps_psf = np.abs(f_psf)**2 95 | sps = np.abs(np.fft.fft2(img))**2 96 | 97 | # create the Wiener filter 98 | 99 | wiener_filter = (1/f_psf)*(sps_psf/(sps_psf + (nps/sps))) 100 | 101 | # Do a Fourier space convolution of the image with the wiener filter 102 | 103 | deconv_img = np.fft.fftshift(np.real(np.fft.ifft2( 104 | np.fft.fft2(img) * wiener_filter))) 105 | 106 | return deconv_img 107 | 108 | # Try out Wiener deconvolution. 109 | # Assume white noise, i.e. a noise power spectrum that has a constant value 110 | # for all frequencies. Try out a few values to get a good result. 111 | 112 | nps = np.fft.fft2(sigma * np.random.randn(sh[0], sh[1])) 113 | 114 | img_deconv_W = wiener_deconv(img_noisy, psf_pad, nps) 115 | 116 | # The Wiener filter is essentially the same as the naive filter, only with an 117 | # additional weighting factor that depends on the SNR in the image in 118 | # frequency domain. Frequencies where the noise power exceeds the signal power 119 | # will be damped. 120 | 121 | plt.figure(1) 122 | plt.subplot(2, 2, 1) 123 | plt.imshow(img, cmap='gray', interpolation='none') 124 | plt.title('original image') 125 | plt.subplot(2, 2, 2) 126 | plt.imshow(img_noisy, cmap='gray', interpolation='none') 127 | plt.title('acquired noisy image') 128 | plt.subplot(2, 2, 3) 129 | plt.imshow(img_deconv, cmap='gray', interpolation='none') 130 | plt.title('naive deconvolution') 131 | plt.subplot(2, 2, 4) 132 | plt.imshow(img_deconv_W, cmap='gray', interpolation='none') 133 | plt.title('Wiener deconvolution') 134 | -------------------------------------------------------------------------------- /Exercise 7-Resolution and Noise/tree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 7-Resolution and Noise/tree.jpg -------------------------------------------------------------------------------- /Exercise 7-Resolution and Noise/worldA.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 7-Resolution and Noise/worldA.jpg -------------------------------------------------------------------------------- /Exercise 7-Resolution and Noise/worldB.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 7-Resolution and Noise/worldB.jpg -------------------------------------------------------------------------------- /Exercise 8 - Tomography/Head_CT_scan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 8 - Tomography/Head_CT_scan.jpg -------------------------------------------------------------------------------- /Exercise 8 - Tomography/backup_filtered_sinogram.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 8 - Tomography/backup_filtered_sinogram.npy -------------------------------------------------------------------------------- /Exercise 8 - Tomography/backup_sinogram.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 8 - Tomography/backup_sinogram.npy -------------------------------------------------------------------------------- /Exercise 8 - Tomography/ex1_tomography.py: -------------------------------------------------------------------------------- 1 | """ 2 | 21.06.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_tomography.py 7 | 8 | This exercise will be about a very simplified implementation of tomographic 9 | reconstruction, using filtered backprojection. 10 | 11 | The exercise consists of three parts: 12 | First, you will simulate the data aquisistion in computed tomography, by 13 | calculating the sinogram from a given input sample slice. 14 | Second, you will have to apply a ramp filter to this sinogram. 15 | Third, you will implement a simple backprojection algorithm. 16 | 17 | If you do not manage to do one part of the exercise you can still go one by 18 | loading the provided .npy arrays 'backup_sinogram.npy' and 19 | 'backup_filtered_sinogram.npy'. 20 | 21 | You need to replace the ??? in the code with the required commands. 22 | """ 23 | import numpy as np 24 | import scipy.ndimage as nd 25 | import matplotlib.pyplot as plt 26 | import sys 27 | 28 | 29 | def roundmask(shape, radius=1): 30 | """ 31 | This function creates a ellipsoid mask given a certain shape and radius. 32 | Give shape as a tuple indicating each image axis. Radius=1 means the mask 33 | will exactly touch each image axis, with radius=0.5 the mask will fill half 34 | of the image. 35 | """ 36 | x = np.linspace(-1, 1, shape[1]) 37 | y = np.linspace(-1, 1, shape[0]) 38 | xx, yy = np.meshgrid(x, y) 39 | return xx**2 + yy**2 < radius**2 40 | 41 | 42 | def forwardproject(sample, angles): 43 | """ 44 | Simulate data aquisition in tomography from line projections. 45 | Forwardproject a given input sample slice to obtain a simulated sinogram. 46 | 47 | Hints 48 | ----- 49 | Use scipy.ndimage.rotate(..., reshape=False) to simulate the sample 50 | rotation. 51 | Use numpy.sum() along one axis to simulate the line projection integral. 52 | """ 53 | sh = np.shape(sample) # calculate shape of sample 54 | Nproj = len(angles) # calculate number of projections 55 | 56 | # define empty sinogram container, angles along y-axis 57 | sinogram = np.zeros((Nproj, sh[1])) 58 | 59 | for proj in np.arange(Nproj): # loop over all projections 60 | sys.stdout.write("\r Simulating: %03i/%i" % (proj+1, Nproj)) 61 | sys.stdout.flush() 62 | im_rot = nd.rotate(sample, angles[proj], reshape=False) 63 | sinogram[proj,:] = np.sum(im_rot, axis=0) 64 | return sinogram 65 | 66 | 67 | def filter_sino(sinogram): 68 | """ 69 | Filter a given sinogram using a ramp filter 70 | 71 | Hints: 72 | First define a ramp filter in Fourier domain (you can use np.fft.fftfreq). 73 | Filter the sinogram in Fourier space unsing the convolution theorem. 74 | """ 75 | 76 | Nproj, Npix = np.shape(sinogram) 77 | 78 | # Generate basic ramp filter (hint: there is the function np.fft.fftfreq. 79 | # Try it and see what it does. Watch out for a possible fftshift) 80 | ramp_filter = np.abs(np.fft.fftfreq(Npix)) 81 | 82 | # filter the sinogram in Fourier space in detector pixel direction 83 | # Use the np.fft.fft along the axis=1 84 | sino_ft = np.fft.fft(sinogram, axis=1) 85 | 86 | # Multiply the ramp filter onto the 1D-FT of the sinogram and transform it 87 | # back into spatial domain 88 | sino_filtered = np.real(np.fft.ifft(sino_ft * ramp_filter, axis=1)) 89 | 90 | return sino_filtered 91 | 92 | 93 | def backproject(sinogram, angles): 94 | """ 95 | Backproject a given sinogram. 96 | Hints: 97 | Perform the backprojection inversely to the way we did the 98 | forwardprojection, by smearing each projection in the sinogram back along 99 | the axis that you summed before in forwardproject() (you can use for 100 | example numpy.tile() for this), then rotating the resulting backprojection 101 | to get the right backprojection angle. 102 | Use scipy.ndimage.rotate(...,...,reshape=False) 103 | Using roundmask helps to improve the result. 104 | """ 105 | # calculate number of projections, and pixels 106 | Nproj, Npix = np.shape(sinogram) 107 | # define empty container for reconstruction of sample 108 | reconstruction = np.zeros((Npix, Npix)) 109 | 110 | for proj in np.arange(Nproj): # loop over all projections 111 | sys.stdout.write("\r Reconstructing: %03i/%i" % (proj+1, Nproj)) 112 | sys.stdout.flush() 113 | 114 | backprojection = np.tile(sinogram[proj, :], (Npix, 1)) 115 | backprojection /= Npix # Just normalization 116 | rotated_backprojection = nd.rotate(backprojection, -angles[proj], reshape=False) 117 | 118 | # Add the rotated backprojection multiplied with a roundmask 119 | reconstruction += rotated_backprojection * roundmask((Npix, Npix)) 120 | 121 | return reconstruction 122 | 123 | 124 | # read in sample data (in reality, this data is unknown and what you are 125 | # looking for) 126 | sample = plt.imread('Head_CT_scan.jpg') 127 | 128 | # define vector containing the projection angles 129 | Nangles = 301 130 | angles = np.linspace(0, 360, Nangles, False) 131 | 132 | # simulate the process of tomographic data acquisition by line projections 133 | sino = forwardproject(sample, angles) 134 | 135 | # use this line if you do not manage the last step 136 | # sino = np.load('backup_sinogram.npy') 137 | 138 | # filter the sinogram with the ramp filter (or some other filter) 139 | filtered_sino = filter_sino(sino) 140 | 141 | # use this line if you do not manage the last step 142 | # filtered_sino = np.load('backup_filtered_sinogram.npy') 143 | 144 | # reconstruct the image from its filtered sinogram 145 | reco = backproject(filtered_sino, angles) 146 | 147 | plt.figure(1, figsize=(12, 12)) 148 | plt.subplot(2, 2, 1) 149 | plt.imshow(sample, cmap='gray', interpolation='none') 150 | plt.subplot(2, 2, 2) 151 | plt.imshow(sino, cmap='gray', interpolation='none') 152 | plt.subplot(2, 2, 3) 153 | plt.imshow(filtered_sino, cmap='gray', interpolation='none') 154 | plt.subplot(2, 2, 4) 155 | plt.imshow(reco, vmin=0., cmap='gray', interpolation='none') 156 | 157 | 158 | # Image Artifacts 159 | # --------------- 160 | 161 | # Artifact 1 - Hot / Dead Pixel 162 | # ----------------------------- 163 | Nangles = 301 164 | angles = np.linspace(0, 360, Nangles, False) 165 | 166 | sino = forwardproject(sample, angles) 167 | 168 | # simulate a dead pixel in the detector line 169 | sino[:, 120] = 0 170 | 171 | # filter the sinogram with the ramp filter and reconstruct it 172 | filtered_sino = filter_sino(sino) 173 | reco = backproject(filtered_sino, angles) 174 | 175 | plt.figure(2, figsize=(12, 12)) 176 | plt.suptitle('dead pixel') 177 | plt.subplot(2, 2, 1) 178 | plt.imshow(sample, cmap='gray', interpolation='none') 179 | plt.subplot(2, 2, 2) 180 | plt.imshow(sino, cmap='gray', interpolation='none') 181 | plt.subplot(2, 2, 3) 182 | plt.imshow(filtered_sino, cmap='gray', interpolation='none') 183 | plt.subplot(2, 2, 4) 184 | plt.imshow(reco, vmin=sample.min(), vmax=sample.max(), 185 | cmap='gray', interpolation='none') 186 | 187 | 188 | # Artifact 2 - Simulate a center shift 189 | # ------------------------------------ 190 | # Intrinsically, tomography assumes that the rotation axis is in the center of 191 | # each projection. If this is not the case, each projection is shifted left or 192 | # right with respect to the optical axis. These are called center shift. 193 | 194 | Nangles = 301 195 | angles = np.linspace(0, 360, Nangles, False) 196 | 197 | sino = forwardproject(sample, angles) 198 | 199 | # shift the sinogram by a few pixels (~2) or pad the detector either to the 200 | # left or right side. 201 | np.append(sino, np.ones((Nangles, 10)), axis=1) 202 | 203 | # filter the sinogram with the ramp filter and reconstruct it 204 | filtered_sino = filter_sino(sino) 205 | reco = backproject(filtered_sino, angles) 206 | 207 | plt.figure(3, figsize=(12, 12)) 208 | plt.suptitle('center shift') 209 | plt.subplot(2, 2, 1) 210 | plt.imshow(sample, cmap='gray', interpolation='none') 211 | plt.subplot(2, 2, 2) 212 | plt.imshow(sino, cmap='gray', interpolation='none') 213 | plt.subplot(2, 2, 3) 214 | plt.imshow(filtered_sino, cmap='gray', interpolation='none') 215 | plt.subplot(2, 2, 4) 216 | plt.imshow(reco, vmin=0, cmap='gray', interpolation='none') 217 | 218 | 219 | # Artifact 3 - few angles / undersampling 220 | # --------------------------------------- 221 | Nangles = 91 222 | angles = np.linspace(0, 360, Nangles, False) 223 | 224 | sino = forwardproject(sample, angles) 225 | 226 | # filter the sinogram with the ramp filter and reconstruct it 227 | filtered_sino = filter_sino(sino) 228 | reco = backproject(filtered_sino, angles) 229 | 230 | plt.figure(4, figsize=(12, 12)) 231 | plt.suptitle('undersampling') 232 | plt.subplot(2, 2, 1) 233 | plt.imshow(sample, cmap='gray', interpolation='none') 234 | plt.subplot(2, 2, 2) 235 | plt.imshow(sino, cmap='gray', interpolation='none') 236 | plt.subplot(2, 2, 3) 237 | plt.imshow(filtered_sino, cmap='gray', interpolation='none') 238 | plt.subplot(2, 2, 4) 239 | plt.imshow(reco, vmin=0., cmap='gray', interpolation='none') 240 | 241 | 242 | # Artifact 4 - missing projections to tomosynthese 243 | # ------------------------------------------------ 244 | Nangles = 301 245 | angles = np.linspace(0, 180, Nangles, False) 246 | 247 | sino = forwardproject(sample, angles) 248 | 249 | # simulate one or more missing projections (e.g. replace with zeros) up to a 250 | # missing projection wedge 251 | sino[:100] = 0 252 | 253 | # filter the sinogram with the ramp filter and reconstruct it 254 | filtered_sino = filter_sino(sino) 255 | reco = backproject(filtered_sino, angles) 256 | 257 | plt.figure(5, figsize=(12, 12)) 258 | plt.suptitle('missing projections') 259 | plt.subplot(2, 2, 1) 260 | plt.imshow(sample, cmap='gray', interpolation='none') 261 | plt.subplot(2, 2, 2) 262 | plt.imshow(sino, cmap='gray', interpolation='none') 263 | plt.subplot(2, 2, 3) 264 | plt.imshow(filtered_sino, cmap='gray', interpolation='none') 265 | plt.subplot(2, 2, 4) 266 | plt.imshow(reco, vmin=0., cmap='gray', interpolation='none') 267 | 268 | 269 | # Artifact 5 - Noise 270 | # ------------------ 271 | Nangles = 301 272 | angles = np.linspace(0, 360, Nangles, False) 273 | 274 | sino = forwardproject(sample, angles) 275 | 276 | # simulate noise 277 | sino += 5000 * np.random.standard_normal(sino.shape) 278 | 279 | # filter the sinogram with the ramp filter and reconstruct it 280 | filtered_sino = filter_sino(sino) 281 | reco = backproject(filtered_sino, angles) 282 | 283 | plt.figure(6, figsize=(12, 12)) 284 | plt.suptitle('noise') 285 | plt.subplot(2, 2, 1) 286 | plt.imshow(sample, cmap='gray', interpolation='none') 287 | plt.subplot(2, 2, 2) 288 | plt.imshow(sino, cmap='gray', interpolation='none') 289 | plt.subplot(2, 2, 3) 290 | plt.imshow(filtered_sino, cmap='gray', interpolation='none') 291 | plt.subplot(2, 2, 4) 292 | plt.imshow(reco, vmin=0, cmap='gray', interpolation='none') 293 | 294 | -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0000.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0000.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0001.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0001.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0002.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0002.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0003.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0003.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0004.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0004.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0005.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0005.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0006.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0006.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0007.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0007.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0008.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0008.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0009.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0009.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/data_stepping_0010.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/data_stepping_0010.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0000.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0000.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0001.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0001.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0002.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0002.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0003.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0003.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0004.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0004.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0005.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0005.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0006.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0006.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0007.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0007.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0008.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0008.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0009.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0009.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/data/flat_stepping_0010.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arslansadiq/Image-Processing-In-Physics/4e3b636ff51f74f110434f1df93c737468c16c62/Exercise 9- Grating based phase contrast/data/flat_stepping_0010.npy -------------------------------------------------------------------------------- /Exercise 9- Grating based phase contrast/ex1_interferometry_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | 28.06.2018 3 | Image Processing Physics TU Muenchen 4 | Julia Herzen, Klaus Achterhold, (Maximilian Teuffenbach, Juanjuan Huang) 5 | 6 | ex1_interferometry.py 7 | 8 | Grating interferometry 9 | In this exercise we will process and analyze a dataset from grating 10 | interferometry. 11 | 12 | As per usual, replace the ???s with the appropriate command(s). 13 | """ 14 | 15 | import os 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | 19 | 20 | def wrap_phase(inarray): 21 | """ 22 | This function just makes sure that differential phase values 23 | stay in the range -pi,pi. 24 | """ 25 | outarray = np.angle(np.exp(1j * inarray)) 26 | return outarray 27 | 28 | # path to the raw data, os.sep is '/' on unix and '\\' on windows 29 | 30 | PATH = 'data' + os.sep 31 | 32 | # the number of stepping images 33 | 34 | NUMIMGS = 11 35 | 36 | # Format string for filenames, %s stands for string, %04d stands for 37 | # a 4-digit integer with leading zeros. 38 | # (you can use for example FILEPATTERN % ('data', 5) to get 39 | # the string 'data_stepping_0005.npy') 40 | 41 | FILEPATTERN = '%s_stepping_%04d.npy' 42 | 43 | # Read in raw data 44 | # there are two series of 11 .npy files: 45 | # the stepping images with sample in the beam 46 | # (data_stepping_0000.npy to data_stepping_0010.npy) 47 | # and without sample, i.e. the flatfields 48 | # (flat_stepping_0000.npy to flat_stepping_0010.npy) 49 | # You should read them in one after the other and combine them into 50 | # two numpy arrays with shape (11, 195, 487), the first dimension 51 | # represents the stepping images, the other two the actual dimensions 52 | # of each image. 53 | # Use np.load to read the data. 54 | 55 | imglist = [] 56 | flatlist = [] 57 | for i in range(NUMIMGS): 58 | # load the image 59 | img = np.load(os.path.join(os.getcwd(), PATH, FILEPATTERN % ('data', i))) 60 | imglist.append(img) 61 | # load the flatfield 62 | flat = np.load(os.path.join(os.getcwd(), PATH, FILEPATTERN % ('flat', i))) 63 | flatlist.append(flat) 64 | 65 | imgarr = np.array(imglist) 66 | flatarr = np.array(flatlist) 67 | 68 | # Plot stepping curve of pixel (50, 200) of the data array 69 | # you should see a cosine curve 70 | 71 | stepping_curve = imgarr[:, 50, 200] 72 | ref_curve = flatarr[:, 50, 200] 73 | 74 | plt.figure(1) 75 | plt.plot(stepping_curve, '*', label='stepping curve') 76 | plt.plot(ref_curve, 'bo', label='reference curve') 77 | 78 | point_ft = np.fft.fft(stepping_curve) 79 | 80 | cons = np.abs(point_ft[0]) / NUMIMGS 81 | ang = np.angle(point_ft[1]) 82 | mod = np.abs(point_ft[1]) / NUMIMGS 83 | 84 | x = np.linspace(0, NUMIMGS, 1000) 85 | fit_stepping_curve = cons + 2. * mod * np.cos(x / NUMIMGS * 2 * np.pi + ang) 86 | 87 | plt.plot(x, fit_stepping_curve, label='fit') 88 | plt.legend() 89 | 90 | # Cropping 91 | # Have a look at one of the stepping images. You will see a rounded 92 | # white border around the actual image. This is an area in the field 93 | # of view, where there is no grating and thus no interference. 94 | # You should crop all of the images, so that this outside area is not 95 | # in the image anymore. 96 | 97 | data_cropped = imgarr[:, :, 72:430] 98 | flatfield_cropped = flatarr[:, :, 72:430] 99 | 100 | # Fourier processing 101 | # With the images cropped to their actual content, you will now do a 102 | # Fourier processing to extract absorption, differential phase and 103 | # darkfield signals. 104 | # You will have to do a one-dimensional Fourier transform of both arrays 105 | # along the stepping dimension (remember that there is a stepping curve 106 | # for each pixel in the images) and normalize by the number of stepping 107 | # images. 108 | # Then extract the signals for the data and the flatfields: 109 | # absorption: the absolute value of the zeroth (DC) term (equivalent to 110 | # the mean value of the stepping curve) 111 | # differential phase: the phase of the first order term (equivalent to 112 | # the phase of the sine curve) 113 | # darkfield: the absolute value of the first order term divided by 114 | # absorpion (equivalent to half of the amplitude of the stepping curve) 115 | 116 | data_fft = np.fft.fft(data_cropped, axis=0) / NUMIMGS 117 | flat_fft = np.fft.fft(flatfield_cropped, axis=0) / NUMIMGS 118 | 119 | data_absorption = np.abs(data_fft[0]) 120 | data_differential_phase = np.angle(data_fft[1]) 121 | data_darkfield = np.abs(data_fft[1]) / data_absorption 122 | 123 | flatfield_absorption = np.abs(flat_fft[0]) 124 | flatfield_differential_phase = np.angle(flat_fft[1]) 125 | flatfield_darkfield = np.abs(flat_fft[1]) / flatfield_absorption 126 | 127 | # now that you have the three signals for both stepping scans, you can 128 | # do a flatfield correction 129 | # for absorption, this is data divided by flatfield 130 | # for differential phase: wrap_phase(data - flatfield) 131 | # for darkfield you have to divide for the data by the 132 | # values of the flatfield. 133 | # for absorption and darkfield you also should then use the negative 134 | # logarithm of the flatfield corrected images. 135 | 136 | absorption = -np.log(data_absorption / flatfield_absorption) 137 | differential_phase = wrap_phase( 138 | data_differential_phase - flatfield_differential_phase) 139 | darkfield = -np.log(data_darkfield / flatfield_darkfield ) 140 | 141 | plt.figure(2) 142 | plt.subplot(3, 1, 1) 143 | plt.title('absorption') 144 | plt.imshow(absorption, cmap='gray') 145 | plt.subplot(3, 1, 2) 146 | plt.title('differential phase') 147 | plt.imshow(differential_phase, cmap='gray') 148 | plt.subplot(3, 1, 3) 149 | plt.title('darkfield') 150 | plt.imshow(darkfield, cmap='gray') 151 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Image-Processing-In-Physics 2 | Graduate Level Course of Technical University of Munich 3 | ## About this course 4 | Course is in English 5 | 2 hours/week lecture, 1 hour/week tutorial 6 | Computer exercises are given as homework 7 | Tutorials are discussion of homework exercises 8 | Must submit at least 3 of the 11 homework exercises in order to sit the exam 9 | Oral exams at end of semester 10 | 5 ECTS 11 | Overview of recurring topics from all fields of imaging in physics 12 | Essential tools and expressions used in imaging 13 | Broad rather than in-depth 14 | Focused on underlying principles 15 | Few rigorous mathematical derivations 16 | Oriented towards practical implementations 17 | ## Course content 18 | ### A. Basics of Image Processing 19 | A.1 Image processing in spatial domain 20 | A.2 Image processing in Fourier domain 21 | A.3 Sampling, interpolation and pixel representations 22 | A.4 Segmentation 23 | ### B. Wave optics and instrumentation 24 | B.1 Detectors & noise 25 | B.2 Wave propagation 26 | B.3 Phase retrieval 27 | B.4 Grating-based imaging 28 | ### C. Abstract algorithms and optimization 29 | C.1 Tomography 30 | C.2 Least squares optimization 31 | C.3 Wavelets 32 | ## Exercise Tutorials 33 | Hands-on computer exercises on the topics covered in class in Python 34 | --------------------------------------------------------------------------------