├── .gitignore
├── 5955-convolutional-lstm-network-a-machine-learning-approach-for-precipitation-nowcasting.pdf
├── Moving-MNIST
├── README.md
├── generate-dataset.py
├── images
│ ├── 32-32
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 1_6.gif
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── 7_9.gif
│ │ ├── a11.png
│ │ ├── a12.png
│ │ ├── a13.png
│ │ ├── a14.png
│ │ ├── a15.png
│ │ ├── a16.png
│ │ ├── a17.png
│ │ ├── a18.png
│ │ ├── a19.png
│ │ ├── a20.png
│ │ ├── a21.png
│ │ ├── a22.png
│ │ ├── a23.png
│ │ ├── a24.png
│ │ ├── a25.png
│ │ ├── a26.png
│ │ ├── a27.png
│ │ ├── a28.png
│ │ ├── a29.png
│ │ ├── a30.png
│ │ ├── load.py
│ │ ├── p11.png
│ │ ├── p12.png
│ │ ├── p13.png
│ │ ├── p14.png
│ │ ├── p15.png
│ │ ├── p16.png
│ │ ├── p17.png
│ │ ├── p18.png
│ │ ├── p19.png
│ │ ├── p20.png
│ │ ├── p21.png
│ │ ├── p22.png
│ │ ├── p23.png
│ │ ├── p24.png
│ │ ├── p25.png
│ │ ├── p26.png
│ │ ├── p27.png
│ │ ├── p28.png
│ │ ├── p29.png
│ │ └── p30.png
│ ├── actual-images
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ └── 30.png
│ ├── reshape-128-128
│ │ ├── 01.png
│ │ ├── 02.png
│ │ ├── 03.png
│ │ ├── 04.png
│ │ ├── 05.png
│ │ ├── 06.png
│ │ ├── 07.png
│ │ ├── 08.png
│ │ ├── 09.png
│ │ ├── 0_5.gif
│ │ ├── 10.png
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── 6_1.gif
│ │ └── 7_9.gif
│ └── reshape-128-64-64
│ │ ├── 01.png
│ │ ├── 02.png
│ │ ├── 03.png
│ │ ├── 04.png
│ │ ├── 05.png
│ │ ├── 06.png
│ │ ├── 07.png
│ │ ├── 08.png
│ │ ├── 09.png
│ │ ├── 0_5.gif
│ │ ├── 10.png
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── 6_1.gif
│ │ └── 7_9.gif
├── main_notebook.py
└── mnist-read.py
├── NEXRAD
├── KATX
│ ├── 96-96-32-32
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── 31.png
│ │ ├── 32.png
│ │ ├── 33.png
│ │ ├── 34.png
│ │ ├── 35.png
│ │ ├── 36.png
│ │ ├── 37.png
│ │ ├── 38.png
│ │ ├── 39.png
│ │ ├── 40.png
│ │ ├── load.py
│ │ ├── radar.gif
│ │ ├── radar1.gif
│ │ ├── radar2.gif
│ │ └── radar3.gif
│ └── katx.png
├── PHWA
│ ├── 64_48
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── a20.png
│ │ ├── a21.png
│ │ ├── a22.png
│ │ ├── a23.png
│ │ ├── a24.png
│ │ ├── a25.png
│ │ ├── a26.png
│ │ ├── a27.png
│ │ ├── a28.png
│ │ ├── a29.png
│ │ ├── a30.png
│ │ ├── load.py
│ │ ├── p20.png
│ │ ├── p21.png
│ │ ├── p22.png
│ │ ├── p23.png
│ │ ├── p24.png
│ │ ├── p25.png
│ │ ├── p26.png
│ │ ├── p27.png
│ │ ├── p28.png
│ │ ├── p29.png
│ │ ├── p30.png
│ │ └── radar.gif
│ ├── 96_64_64_32
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── a20.png
│ │ ├── a21.png
│ │ ├── a22.png
│ │ ├── a23.png
│ │ ├── a24.png
│ │ ├── a25.png
│ │ ├── a26.png
│ │ ├── a27.png
│ │ ├── a28.png
│ │ ├── a29.png
│ │ ├── a30.png
│ │ ├── load.py
│ │ ├── p20.png
│ │ ├── p21.png
│ │ ├── p22.png
│ │ ├── p23.png
│ │ ├── p24.png
│ │ ├── p25.png
│ │ ├── p26.png
│ │ ├── p27.png
│ │ ├── p28.png
│ │ ├── p29.png
│ │ ├── p30.png
│ │ └── radar.gif
│ ├── enc_dec_64_48.py
│ └── reshape-64_48
│ │ ├── 10.png
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── 31.png
│ │ ├── 32.png
│ │ ├── 33.png
│ │ ├── 34.png
│ │ ├── 35.png
│ │ ├── 36.png
│ │ ├── 37.png
│ │ ├── 38.png
│ │ ├── 39.png
│ │ ├── output1.gif
│ │ ├── output2.gif
│ │ └── output3.gif
├── README.md
├── dataset_day.py
├── dataset_mult_day.py
├── download_day.py
├── latest_Small.png
├── radar.png
├── radar_metpy.png
├── read_metpy.py
└── read_pyart.py
├── README.md
├── btp_report.pdf
├── decoder.py
├── encoder.py
├── encoder_decoder.py
├── lstm-keras
├── conv-lstm-one.py
├── images
│ ├── mnist-one-layer-2.png
│ ├── mnist-one-layer-3.png
│ └── mnist-one-layer.png
├── lstm-encoder-decoder.py
├── lstm-one-one-50.py
└── lstm-one-one-50.py~
├── main.py
├── next-frame-tutorial
├── README.md
├── encoder-decoder.py
├── images
│ ├── enc_dec0
│ │ ├── actual0_1.png
│ │ ├── actual0_2.png
│ │ ├── actual0_3.png
│ │ ├── actual0_4.png
│ │ ├── actual0_5.png
│ │ ├── actual0_6.png
│ │ ├── actual0_7.png
│ │ ├── actual0_8.png
│ │ ├── actual0_9.png
│ │ ├── actual0_fin.png
│ │ ├── input.gif
│ │ ├── output.gif
│ │ ├── output0_1.png
│ │ ├── output0_2.png
│ │ ├── output0_3.png
│ │ ├── output0_4.png
│ │ ├── output0_5.png
│ │ ├── output0_6.png
│ │ ├── output0_7.png
│ │ ├── output0_8.png
│ │ ├── output0_9.png
│ │ └── output0_fin.png
│ ├── enc_dec1
│ │ ├── actual1_1.png
│ │ ├── actual1_2.png
│ │ ├── actual1_3.png
│ │ ├── actual1_4.png
│ │ ├── actual1_5.png
│ │ ├── actual1_6.png
│ │ ├── actual1_7.png
│ │ ├── actual1_8.png
│ │ ├── actual1_9.png
│ │ ├── actual1_fin.png
│ │ ├── input.gif
│ │ ├── output.gif
│ │ ├── output1_1.png
│ │ ├── output1_2.png
│ │ ├── output1_3.png
│ │ ├── output1_4.png
│ │ ├── output1_5.png
│ │ ├── output1_6.png
│ │ ├── output1_7.png
│ │ ├── output1_8.png
│ │ ├── output1_9.png
│ │ └── output1_fin.png
│ └── stack
│ │ ├── actual0.png
│ │ ├── actual1.png
│ │ ├── actual2.png
│ │ ├── actual3.png
│ │ ├── actual4.png
│ │ ├── actual5.png
│ │ ├── actual6.png
│ │ ├── actual7.png
│ │ ├── actual8.png
│ │ ├── actual9.png
│ │ ├── encoder.drawio
│ │ ├── input.gif
│ │ ├── o0.png
│ │ ├── o1.png
│ │ ├── o2.png
│ │ ├── o3.png
│ │ ├── o4.png
│ │ ├── o5.png
│ │ ├── o6.png
│ │ ├── o7.png
│ │ ├── o8.png
│ │ ├── o9.png
│ │ └── output.gif
└── next-frame.py
├── test.py
└── utility.py
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/*
2 | t10k-images-idx3-ubyte.gz
3 | t10k-labels-idx1-ubyte.gz
4 | train1000.npy
5 | train-images-idx3-ubyte.gz
6 | train-labels-idx1-ubyte.gz
7 | *~
8 | lstm-keras/*~
9 |
--------------------------------------------------------------------------------
/5955-convolutional-lstm-network-a-machine-learning-approach-for-precipitation-nowcasting.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/5955-convolutional-lstm-network-a-machine-learning-approach-for-precipitation-nowcasting.pdf
--------------------------------------------------------------------------------
/Moving-MNIST/README.md:
--------------------------------------------------------------------------------
1 | # Moving MNIST
2 |
3 | Moving MNIST Dataset consists of sequences of 20 image frames showing 2 handwritten digits
4 | from MNIST database moving in the 64 x 64 frame. Each sequence is generated by randomly
5 | choosing 2 digits from the MNIST database and each of them is assigned a random velocity
6 | in the beginning. The digits move inside the frame and bounce back when they reaches the
7 | edge of the frame. Moving MNIST dataset of 10,000 sequences can also be found at
8 | http://www.cs.toronto.edu/~nitish/unsupervised_video/
9 |
10 | The Moving MNIST dataset frames has 256 levels (0 - 255). The frames were thresholded
11 | to have binary intensities (0 and 1). Each 20 frames sequence was splited into two sequence
12 | of 10 frames (first 10 frames for input and other 10 for prediction). The models were trained
13 | to minimize the binary crossentropy loss and RMSProp optimizer (learning rate = 0.001 and
14 | decay rate = 0.9) was used during the training. Also, we performed early stopping on the
15 | validation set.
16 |
17 | * 2 Layers Encoder-Decoder network with 32, 32 hidden units and (5 x 5) filter size in each
18 | layer: This model was trained over 1,000 sequences and tested on 200 sequences (200
19 | validation sequences). The average binary crossentropy loss was 0.2998.
20 |
21 |
22 |
23 |
24 |
25 |
26 | ## Image Reshaping
27 | The above models were trained on NVidia K80 GPU provided by Kaggle. The weights in the ConvLSTM cells depends on
28 | batch_size x image_rows x image_cols x filters. Training deeper models with many hidden units required good
29 | memory space. So, deeper Encoder-Decoder model couldn’t be trained on the actual MNIST images due to the limited resource.
30 | Since, the memory allocated for ConvLSTM depends on image_size (rows x cols),
31 | the 64 x 64 frames were reshaped into 16 x 16 x 16 frame (increasing the channel dimension).
32 | This allowed to train deeper Encoder-Decoder networks. Two Encoder-Decoder networks were
33 | trained on the sequences (7,000 training sequences, 1,500 validation sequences and 1,500 test
34 | sequences).
35 | * 2 Layers with 128, 128 hidden units and (5 x 5) filter size in each layer. The average
36 | binary crossentropy loss was 0.1868 on the test data.
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 | * 3 Layers with 128, 64, 64 hidden units and (5 x 5) filter size in each layer. The average
45 | binary crossentropy loss on the test data was 0.1675.
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/Moving-MNIST/generate-dataset.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import random
4 | import math
5 | import gzip
6 |
7 | image_rows = 28
8 | image_cols = 28
9 | total_images = 60000
10 |
11 | total_test = 1000
12 | frame_no = 20
13 | # (row, col)
14 | frame_size = (64, 64)
15 | top_left = (13, 13)
16 |
17 | # image shouldn't go outside
18 | def valid(r, c):
19 | if r >= 0 and r < frame_size[0] and c >= 0 and c < frame_size[1]:
20 | return True
21 | return False
22 |
23 | '''
24 | def move_corner(corner_):
25 | # constant appilcable for this image type
26 | success = False
27 | corner = None
28 | while (success == False):
29 | dr = random.randint(-2, 2)
30 | dc = random.randint(-2, 2)
31 | corner = (corner_[0] + dr, corner_[1] + dc)
32 | horz = (corner[1], corner[1] + image_cols - 1)
33 | vert = (corner[0], corner[1] + image_rows - 1)
34 | #print (center, horz, vert)
35 | success = valid(horz, vert)
36 | return (dr, dc)
37 | '''
38 |
39 | # 3 images
40 | def cons_frame(imgs, corners, vel):
41 |
42 | frame = np.zeros(frame_size, dtype=np.uint8)
43 |
44 | for i in range(len(imgs)):
45 | for r in range(imgs[i].shape[0]):
46 | for c in range(imgs[i].shape[1]):
47 | x, y = (r + vel[i][0] + corners[i][0], c + vel[i][1] + corners[i][1])
48 |
49 | if valid(x, y) == False:
50 | rev_x = 1
51 | rev_y = 1
52 | if valid(x, 0) == False:
53 | rev_x = -1
54 | if valid(0, y) == False:
55 | rev_y = -1
56 |
57 | vel[i] = (rev_x * vel[i][0], rev_y * vel[i][1])
58 | x, y = (r + vel[i][0] + corners[i][0], c + vel[i][1] + corners[i][1])
59 |
60 | frame[x][y] = max(frame[x][y], imgs[i][r][c])
61 |
62 | corners = ((corners[0][0] + vel[0][0], corners[0][1] + vel[0][1]),
63 | (corners[1][0] + vel[1][0], corners[1][1] + vel[1][1])
64 | )
65 | return (frame, corners, vel)
66 |
67 | def gen_test(data, num_images):
68 | choices = ()
69 | corners = ()
70 | velocity = []
71 | count = 0
72 |
73 | # two images per frame
74 | while count < 2:
75 | choices += (random.randint(0, num_images - 1), )
76 | count += 1
77 |
78 | count = 0
79 | while count < 2:
80 | r = random.randint(0, frame_size[0] - 28 - 1)
81 | c = random.randint(0, frame_size[1] - 28 - 1)
82 | corners += ((r, c), )
83 | count += 1
84 |
85 | count = 0
86 | while count < 2:
87 | amp = random.randint(3, 5)
88 | theta = random.uniform(0, 2 * math.pi)
89 | vr = int(amp * math.cos(theta))
90 | vc = int(amp * math.sin(theta))
91 | velocity += ((vr, vc), )
92 | count += 1
93 |
94 | imgs = ()
95 | for ind in choices:
96 | #display(data, ind)
97 | imgs += (np.asarray(data[ind]).squeeze(), )
98 |
99 | frames = []
100 | for i in range(frame_no):
101 | frame, corners, velocity = cons_frame(imgs, corners, velocity)
102 | frames.append(frame)
103 | #plt.imshow(frame, cmap='gray')
104 | #plt.show()
105 | return frames
106 |
107 | def dataset(data, num_images):
108 | train = []
109 | #for i in range(total_test):
110 | # train.append(gen_test(data, num_images))
111 | #print(train)
112 | #np.save('train' + str(total_test) + '.npy', train)
113 | train = np.load('train' + str(total_test) + '.npy')
114 | print (train.shape)
115 | plt.imshow(train[0][2], cmap='gray')
116 | plt.show()
117 |
118 | def display(data, ind):
119 | #print (data.shape)
120 | image = np.asarray(data[ind]).squeeze()
121 | #print (image.shape)
122 | plt.imshow(image, cmap='gray')
123 | plt.show()
124 |
125 | if __name__ == "__main__":
126 | num_images = 500
127 | fd = gzip.open('train-images-idx3-ubyte.gz','r')
128 | buf = fd.read(16)
129 | buf = fd.read(image_rows * image_cols * num_images)
130 | data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
131 | data = data.reshape(num_images, image_rows, image_cols, 1)
132 |
133 | dataset(data, num_images)
134 | #print (move_corner(top_left))
135 | #display(data, 2)
136 |
137 |
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/11.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/12.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/13.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/14.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/15.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/16.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/17.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/18.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/19.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/1_6.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/1_6.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/20.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/21.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/22.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/23.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/24.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/25.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/26.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/27.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/28.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/29.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/30.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/7_9.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/7_9.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a11.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a12.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a13.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a14.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a15.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a16.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a17.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a18.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a19.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a20.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a21.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a22.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a23.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a24.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a25.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a26.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a27.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a28.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a29.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/a30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/a30.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/load.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | from os import listdir
4 |
5 | filenames = listdir(".")
6 | for files in filenames:
7 | split = files.split(".")
8 | if len(split) <= 1:
9 | continue
10 | ext = split[-1]
11 | if ext != "png":
12 | continue
13 | img = cv2.imread(files)
14 | cols = img.shape[1] // 2
15 | act = img[:, :cols, :]
16 | pred = img[:, cols:, :]
17 | cv2.imwrite("a"+files, act)
18 | cv2.imwrite("p"+files, pred)
19 |
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p11.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p12.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p13.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p14.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p15.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p16.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p17.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p18.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p19.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p20.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p21.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p22.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p23.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p24.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p25.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p26.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p27.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p28.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p29.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/32-32/p30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/32-32/p30.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/11.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/12.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/13.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/14.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/15.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/16.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/17.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/18.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/19.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/20.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/21.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/22.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/23.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/24.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/25.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/26.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/27.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/28.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/29.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/actual-images/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/actual-images/30.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/01.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/02.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/03.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/04.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/05.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/06.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/07.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/08.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/09.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/0_5.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/0_5.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/10.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/11.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/12.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/13.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/14.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/15.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/16.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/17.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/18.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/19.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/20.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/21.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/22.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/23.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/24.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/25.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/26.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/27.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/28.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/29.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/30.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/6_1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/6_1.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-128/7_9.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-128/7_9.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/01.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/02.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/03.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/04.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/05.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/06.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/07.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/08.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/09.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/0_5.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/0_5.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/10.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/11.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/12.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/13.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/14.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/15.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/16.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/17.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/18.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/19.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/20.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/21.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/22.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/23.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/24.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/25.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/26.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/27.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/28.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/29.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/30.png
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/6_1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/6_1.gif
--------------------------------------------------------------------------------
/Moving-MNIST/images/reshape-128-64-64/7_9.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/Moving-MNIST/images/reshape-128-64-64/7_9.gif
--------------------------------------------------------------------------------
/Moving-MNIST/mnist-read.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import gzip
4 |
5 | image_rows = 28
6 | image_cols = 28
7 | total_images = 60000
8 |
9 | def display(data, ind):
10 | #print (data.shape)
11 | image = np.asarray(data[ind]).squeeze()
12 | #print (image.shape)
13 | plt.imshow(image, cmap='gray')
14 | plt.show()
15 |
16 | if __name__ == "__main__":
17 | num_images = 10
18 | fd = gzip.open('train-images-idx3-ubyte.gz','r')
19 | buf = fd.read(16)
20 | buf = fd.read(image_rows * image_cols * num_images)
21 | data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
22 | data = data.reshape(num_images, image_rows, image_cols, 1)
23 |
24 | display(data, 2)
25 |
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/11.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/12.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/13.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/14.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/15.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/16.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/17.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/18.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/19.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/20.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/21.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/22.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/23.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/24.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/25.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/26.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/27.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/28.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/29.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/30.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/31.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/32.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/33.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/34.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/34.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/35.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/35.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/36.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/37.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/37.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/38.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/38.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/39.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/39.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/40.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/40.png
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/load.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | from os import listdir
4 |
5 | filenames = listdir(".")
6 | for files in filenames:
7 | split = files.split(".")
8 | if len(split) <= 1:
9 | continue
10 | ext = split[-1]
11 | if ext != "png":
12 | continue
13 | img = cv2.imread(files)
14 | cols = img.shape[1] // 2
15 | act = img[:, :cols, :]
16 | pred = img[:, cols:, :]
17 | cv2.imwrite("a"+files, act)
18 | cv2.imwrite("p"+files, pred)
19 |
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/radar.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/radar.gif
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/radar1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/radar1.gif
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/radar2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/radar2.gif
--------------------------------------------------------------------------------
/NEXRAD/KATX/96-96-32-32/radar3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/96-96-32-32/radar3.gif
--------------------------------------------------------------------------------
/NEXRAD/KATX/katx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/KATX/katx.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/11.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/12.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/13.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/14.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/15.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/16.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/17.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/18.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/19.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/a30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/a30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/load.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | from os import listdir
4 |
5 | filenames = listdir(".")
6 | for files in filenames:
7 | split = files.split(".")
8 | if len(split) <= 1:
9 | continue
10 | ext = split[-1]
11 | if ext != "png":
12 | continue
13 | img = cv2.imread(files)
14 | cols = img.shape[1] // 2
15 | act = img[:, :cols, :]
16 | pred = img[:, cols:, :]
17 | cv2.imwrite("a"+files, act)
18 | cv2.imwrite("p"+files, pred)
19 |
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/p30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/p30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/64_48/radar.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/64_48/radar.gif
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/11.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/12.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/13.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/14.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/15.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/16.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/17.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/18.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/19.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/a30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/a30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/load.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | from os import listdir
4 |
5 | filenames = listdir(".")
6 | for files in filenames:
7 | split = files.split(".")
8 | if len(split) <= 1:
9 | continue
10 | ext = split[-1]
11 | if ext != "png":
12 | continue
13 | img = cv2.imread(files)
14 | cols = img.shape[1] // 2
15 | act = img[:, :cols, :]
16 | pred = img[:, cols:, :]
17 | cv2.imwrite("a"+files, act)
18 | cv2.imwrite("p"+files, pred)
19 |
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/p30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/p30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/96_64_64_32/radar.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/96_64_64_32/radar.gif
--------------------------------------------------------------------------------
/NEXRAD/PHWA/enc_dec_64_48.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 | import os
6 |
7 | class Encoder(tf.keras.Model):
8 |
9 | # unit_list -> list of units in each layer
10 | # filter_sz -> list of filter sizes for each layer
11 | def __init__(self, enc_layers, unit_list, filter_sz, image_sz, batch_sz):
12 | super(Encoder, self).__init__()
13 |
14 | self.enc_layers = enc_layers
15 | self.unit_list = unit_list
16 | self.filter_sz = filter_sz
17 | self.image_sz = image_sz
18 | self.batch_sz = batch_sz
19 | self.conv_lstm = []
20 | self.batch_norm = []
21 |
22 | for layer in range(self.enc_layers):
23 | lstm = tf.keras.layers.ConvLSTM2D(filters=self.unit_list[layer],
24 | kernel_size=self.filter_sz[layer],
25 | padding="same",
26 | return_sequences=True,
27 | return_state=True,
28 | data_format="channels_last")
29 |
30 | #if layer != self.enc_layers - 1:
31 | # norm = tf.keras.layers.BatchNormalization()
32 | # self.batch_norm.append(norm)
33 | self.conv_lstm.append(lstm)
34 |
35 |
36 | def initialize_states(self, layer, batch_sz):
37 | return [tf.zeros([batch_sz, self.image_sz[0], self.image_sz[1], self.unit_list[layer]]),
38 | tf.zeros([batch_sz, self.image_sz[0], self.image_sz[1], self.unit_list[layer]])]
39 |
40 |
41 | # Encoder doesn't need states input
42 | # x.shape -> (batch_size, time_steps, rows, cols, channels)
43 | def call(self, input_, batch_sz, training=True):
44 |
45 | states = []
46 | for layer in range(self.enc_layers):
47 | outputs, hidden_state, cell_state = self.conv_lstm[layer](
48 | input_,
49 | initial_state = self.initialize_states(layer, batch_sz)
50 | )
51 | input_ = outputs
52 |
53 | # No batch normalization for now
54 | #if layer != self.enc_layers - 1:
55 | # input_ = self.batch_norm[layer](outputs, training=training)
56 |
57 | states.append([hidden_state, cell_state])
58 |
59 | return states
60 |
61 |
62 | class Decoder(tf.keras.Model):
63 |
64 | # unit_list -> list of units in each layer
65 | # filter_sz -> list of filter sizes for each layer
66 | # keep parameters same as Encoder
67 | def __init__(self, dec_layers, unit_list, filter_sz):
68 | super(Decoder, self).__init__()
69 |
70 | self.dec_layers = dec_layers
71 | self.unit_list = unit_list
72 | self.filter_sz = filter_sz
73 | self.conv_lstm = []
74 | self.batch_norm = []
75 |
76 | # volume convolution for the time step outputs
77 | # 1 x 1 CNN (patch size -> 4 x 4)
78 | self.conv_nn = tf.keras.layers.Conv2D(filters=4,
79 | kernel_size=(1, 1),
80 | padding="same",
81 | activation='sigmoid',
82 | data_format="channels_last")
83 |
84 | # ConvLSTM layers and Batch Normalization
85 | for layer in range(self.dec_layers):
86 | lstm = tf.keras.layers.ConvLSTM2D(filters=self.unit_list[layer],
87 | kernel_size=self.filter_sz[layer],
88 | padding="same",
89 | return_state=True,
90 | data_format="channels_last")
91 |
92 | #norm = tf.keras.layers.BatchNormalization()
93 | #self.batch_norm.append(norm)
94 | self.conv_lstm.append(lstm)
95 |
96 | # input_.shape -> (batch_size, time_steps, rows, cols, channels)
97 | def call(self, input_, states, training=True):
98 |
99 | new_states = []
100 | for layer in range(self.dec_layers):
101 | output, hidden_state, cell_state = self.conv_lstm[layer](
102 | input_,
103 | initial_state=states[layer]
104 | )
105 | new_states.append([hidden_state, cell_state])
106 | #input_ = self.batch_norm[layer](output, training=training)
107 | #input_ = tf.expand_dims(input_, 1)
108 | input_ = tf.expand_dims(output, 1)
109 |
110 | frames = self.conv_nn(output)
111 | return frames, new_states
112 |
113 |
114 | # Builds an encoder-decoder
115 | class EncoderDecoder:
116 | def __init__(
117 | self,
118 | num_layers,
119 | unit_list,
120 | filter_sz,
121 | batch_sz,
122 | image_sz,
123 | checkpoint_dir,
124 | ):
125 | self.num_layers = num_layers
126 | self.batch_sz = batch_sz
127 | self.image_sz = image_sz
128 | self.encoder = Encoder(num_layers, unit_list, filter_sz, image_sz, batch_sz)
129 | self.decoder = Decoder(num_layers, unit_list, filter_sz)
130 | self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
131 | self.checkpoint_dir = checkpoint_dir
132 | self.checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
133 | self.checkpoint = tf.train.Checkpoint(
134 | optimizer=self.optimizer,
135 | encoder=self.encoder,
136 | decoder=self.decoder
137 | )
138 |
139 | # Binary crossentropy
140 | # T * logP + (1 - T) * log(1 - P)
141 | self.loss_object = tf.keras.losses.BinaryCrossentropy()
142 | # self.loss_object = tf.keras.losses.CategoricalCrossentropy()
143 | # self.loss_object = tf.keras.losses.CategoricalCrossentropy(
144 | # reduction=tf.keras.losses.Reduction.SUM
145 | #)
146 |
147 | def loss_function(self, real_frame, pred_frame):
148 | return self.loss_object(real_frame, pred_frame)
149 |
150 | # input_ -> (batch_size, time_steps, rows, cols, channels)
151 | # target -> (batch_size, time_steps, rows, cols, channels)
152 | def train_step(self, input_, target):
153 | batch_loss = 0
154 | start_pred = input_.shape[1] - 1
155 |
156 | with tf.GradientTape() as tape:
157 |
158 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], self.batch_sz, True)
159 | dec_input = tf.expand_dims(input_[:, start_pred, :, :, :], 1)
160 |
161 | # Teacher forcing
162 | for t in range(0, target.shape[1]):
163 | prediction, dec_states = self.decoder(dec_input, dec_states)
164 |
165 | batch_loss += self.loss_function(target[:, t, :, :, :], prediction)
166 |
167 | # using teacher forcing
168 | dec_input = tf.expand_dims(target[:, t, :, :, :], 1)
169 |
170 |
171 | variables = self.encoder.trainable_variables + self.decoder.trainable_variables
172 | gradients = tape.gradient(batch_loss, variables)
173 | self.optimizer.apply_gradients(zip(gradients, variables))
174 | return (batch_loss / int(target.shape[1]))
175 |
176 | # inputX - > (total, time_steps, rows, cols, channels)
177 | # targetY -> (total, time_steps, rows, cols, channels)
178 | def train(self, inputX, targetY, epochs, X, Y):
179 | init_time = time.time()
180 | for epoch in range(epochs):
181 | start = time.time()
182 | total_loss = 0
183 | total_batch = inputX.shape[0] // self.batch_sz
184 | #print(total_batch)
185 |
186 | for batch in range(total_batch):
187 | index = batch * self.batch_sz
188 | input_ = inputX[index:index + self.batch_sz, :, :, :, :]
189 | target = targetY[index:index + self.batch_sz, :, :, :, :]
190 |
191 | # print(input_.shape, target.shape)
192 |
193 | batch_loss = self.train_step(input_, target)
194 | total_loss += batch_loss
195 |
196 | # saving (checkpoint) the model every 5 epochs
197 | if epoch % 25 == 0:
198 | self.checkpoint.save(file_prefix = self.checkpoint_prefix)
199 | if epoch % 50 == 0:
200 | self.test_model(X, Y)
201 | if (time.time() - init_time) / 3600.0 > 8:
202 | break
203 | # self.checkpoint.save(file_prefix = self.checkpoint_prefix)
204 | total_batch += 1
205 | print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / total_batch))
206 | print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
207 |
208 | def restore(self):
209 | self.checkpoint.restore(tf.train.latest_checkpoint(self.checkpoint_dir))
210 | self.checkpoint_dir = "./training_checkpoints"
211 | self.checkpoint_prefix = os.path.join(self.checkpoint_dir, "ckpt")
212 | self.checkpoint = tf.train.Checkpoint(
213 | optimizer=self.optimizer,
214 | encoder=self.encoder,
215 | decoder=self.decoder
216 | )
217 |
218 | # input -> (time_steps, rows, cols, channels)
219 | def predict(self, input_, output_seq):
220 | input_ = tf.expand_dims(input_, 0)
221 | start_pred = input_.shape[1] - 1
222 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], 1, False)
223 | dec_input = tf.expand_dims(input_[:,-1, :, :, :], 1)
224 |
225 | predictions = []
226 |
227 | for t in range(output_seq):
228 | prediction, dec_states = self.decoder(dec_input, dec_states, False)
229 | dec_input = tf.expand_dims(prediction, 0)
230 | predictions.append(prediction.numpy().reshape(100, 100))
231 |
232 | return np.array(predictions)
233 |
234 | # input_ -> (batch_size, time_steps, rows, cols, channels)
235 | # target -> (batch_size, time_steps, rows, cols, channels)
236 | def eval_step(self, input_, target):
237 |
238 | batch_loss = 0
239 | start_pred = input_.shape[1] - 1
240 |
241 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], self.batch_sz, True)
242 | dec_input = tf.expand_dims(input_[:, start_pred, :, :, :], 1)
243 |
244 | for t in range(0, target.shape[1]):
245 | prediction, dec_states = self.decoder(dec_input, dec_states)
246 | batch_loss += self.loss_function(target[:, t, :, :, :], prediction)
247 | # using teacher forcing
248 | dec_input = tf.expand_dims(target[:, t, :, :, :], 1)
249 |
250 | return (batch_loss / int(target.shape[1]))
251 |
252 | def pred_step(self, input_, target):
253 |
254 | batch_loss = 0
255 | start_pred = input_.shape[1] - 1
256 |
257 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], self.batch_sz, True)
258 | dec_input = tf.expand_dims(input_[:, start_pred, :, :, :], 1)
259 |
260 | for t in range(0, target.shape[1]):
261 | prediction, dec_states = self.decoder(dec_input, dec_states)
262 | batch_loss += self.loss_function(target[:, t, :, :, :], prediction)
263 | dec_input = tf.expand_dims(prediction, 1)
264 |
265 | return (batch_loss / int(target.shape[1]))
266 |
267 | def evaluate(self, inputX, outputY, valid=True):
268 |
269 | total_loss = 0
270 | total_batch = inputX.shape[0] // self.batch_sz
271 | for batch in range(total_batch):
272 | index = batch * self.batch_sz
273 | input_ = inputX[index:index + self.batch_sz, :, :, :, :]
274 | target = outputY[index:index + self.batch_sz, :, :, :, :]
275 |
276 | if valid == True:
277 | batch_loss = self.eval_step(input_, target)
278 | total_loss += batch_loss
279 | else:
280 | batch_loss = self.pred_step(input_, target)
281 | total_loss += batch_loss
282 |
283 | total_batch += 1
284 | print('Evaluation: Total Loss {:.4f}'.format(total_loss / total_batch))
285 | return total_loss / total_batch
286 |
287 | def test_model(self, X, Y):
288 | e1 = self.evaluate(X[700:800], Y[700:800], True)
289 | e2 = self.evaluate(X[800:], Y[800:], False)
290 | y1 = self.predict(X[50], 10)
291 | y2 = self.predict(X[150], 10)
292 | y3 = self.predict(X[940], 10)
293 | y4 = self.predict(X[934], 10)
294 | plot_result(X[50].numpy().reshape(10, 100, 100), Y[50].numpy().reshape(10, 100, 100), y1)
295 | plot_result(X[150].numpy().reshape(10, 100, 100), Y[150].numpy().reshape(10, 100, 100), y2)
296 | plot_result(X[940].numpy().reshape(10, 100, 100), Y[940].numpy().reshape(10, 100, 100), y3)
297 | plot_result(X[934].numpy().reshape(10, 100, 100), Y[934].numpy().reshape(10, 100, 100), y4)
298 |
299 |
300 | def load_dataset(path, filename):
301 | train_data = np.load(path + filename)
302 | # patch size 4 x 4
303 | train_data = train_data.reshape(train_data.shape[0], train_data.shape[1], 50, 50, 4)
304 | train_data[train_data < 200] = 0
305 | train_data[train_data >= 200] = 1
306 | #train_data = train_data / 255.0
307 | print(train_data.min(), train_data.max())
308 | # train_data = np.expand_dims(train_data, 4)
309 | print(train_data.shape)
310 | X = train_data[:, :10, :, :, :]
311 | Y = train_data[:, 10:21, :, :, :]
312 | plt.show()
313 | X = tf.convert_to_tensor(X, dtype=tf.float32)
314 | Y = tf.convert_to_tensor(Y, dtype=tf.float32)
315 | return (X, Y)
316 |
317 | def plot_result(input_, actual, predict):
318 |
319 | for i in range(input_.shape[0]):
320 | plt.imshow(input_[i])
321 | plt.title("Actual_" + str(i + 1))
322 | plt.show()
323 |
324 | for i in range(actual.shape[0]):
325 | plt.subplot(121), plt.imshow(actual[i]),
326 | plt.title("Actual_" + str(i + 1 + input_.shape[0]))
327 | plt.subplot(122), plt.imshow(predict[i]),
328 | plt.title("Predicted_" + str(i + 1 + input_.shape[0]))
329 | plt.show()
330 |
331 | X, Y = load_dataset("../input/nexraddata/", 'data.npy')
332 | model = EncoderDecoder(2, [64, 48], [(3, 3), (3, 3)], 16, (X.shape[2], X.shape[3]), './training_checkpoints')
333 | #model.restore()
334 | model.train(X[:700], Y[:700], 400, X, Y)
335 |
336 | model.test_model(X, Y)
337 |
338 |
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/10.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/11.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/12.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/13.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/14.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/15.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/16.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/17.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/18.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/19.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/20.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/21.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/22.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/23.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/24.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/25.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/26.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/27.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/28.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/29.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/30.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/31.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/32.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/33.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/34.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/34.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/35.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/35.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/36.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/37.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/37.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/38.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/38.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/39.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/39.png
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/output1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/output1.gif
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/output2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/output2.gif
--------------------------------------------------------------------------------
/NEXRAD/PHWA/reshape-64_48/output3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/PHWA/reshape-64_48/output3.gif
--------------------------------------------------------------------------------
/NEXRAD/README.md:
--------------------------------------------------------------------------------
1 | # NEXRAD
2 | The Next Generation Weather Radar (NEXRAD) system currently comprises 160 sites throughout the United States and select overseas locations.
3 | NEXRAD detects precipitation and atmospheric movement or wind. It returns data which when processed can be displayed in a mosaic map which
4 | shows patterns of precipitation and its movement. The NCEI archive includes the base data, called Level-II, and the derived products,
5 | called Level-III. Level-II data include the original three meteorological base data quantities: reflectivity, mean radial velocity, and
6 | spectrum width, as well as the dual-polarization base data of differential reflectivity, correlation coefficient, and differential phase.
7 |
8 | Radar Data can be accessed at [https://www.ncdc.noaa.gov/nexradinv/]. There are different ways to access the data. For eg., Data can accessed
9 | by Single Site and Day, Multiple Sites and Days, etc.
10 |
11 | ## Download Data
12 | Single day and single site Level-II can be downloaded directly (~500 MB per day). Other data such as Level-III products, multiple days, etc need to be ordered and processing takes around 1-2 hours.
13 | To directly download the data for single site and single day, change the GET request paramaeters for site, day, month and year in `download_day.py` and run
14 | ```
15 | python3 download_day.py
16 | ```
17 |
18 | ## Reflexivity Plot
19 | The reflexivity can be plotted using the libraries : MetPy and Py-Art.
20 | MetPy:
21 | ```
22 | python3 read_metpy.py
23 | ```
24 | Py-Art:
25 | ```
26 | python3 read_pyart.py
27 | ```
28 |
29 |
30 |
31 |
32 | (a) Using MetPy
33 |
34 | (b) Using Py-Art
35 |
36 |
37 | ## Create Dataset
38 | To extract the reflexivity plot from Level-II data, run
39 | ```
40 | python3 dataset_day.py
41 | ```
42 | It will download the data for a single day and save a numpy array for the plots with each frame size 100 x 100.
43 | To create dataset for multiple days, run
44 | ```
45 | python3 dataset_mult_day.py
46 | ```
47 |
48 |
49 | ## References
50 | [1] (http://arm-doe.github.io/pyart/source/auto_examples/plotting/plot_nexrad_reflectivity.html)
51 | [2] (https://unidata.github.io/MetPy/latest/examples/formats/NEXRAD_Level_2_File.html)
52 |
--------------------------------------------------------------------------------
/NEXRAD/dataset_day.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import cv2
4 | from os import listdir
5 |
6 | from metpy.cbook import get_test_data
7 | from metpy.io import Level2File
8 | from metpy.plots import add_timestamp
9 |
10 | def read_nexRad(filename):
11 |
12 | # Open the file
13 | # name = get_test_data('PHWA20201031_000332_V06.gz', as_file_obj=False)
14 | f = Level2File(filename)
15 |
16 | # print(f.sweeps[0][0])
17 | # Pull data out of the file
18 | sweep = 0
19 |
20 | # First item in ray is header, which has azimuth angle
21 | az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])
22 |
23 | # 5th item is a dict mapping a var name (byte string) to a tuple
24 | # of (header, data array)
25 | ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
26 | ref_range = np.arange(ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
27 | ref = np.array([ray[4][b'REF'][1] for ray in f.sweeps[sweep]])
28 |
29 | # rho_hdr = f.sweeps[sweep][0][4][b'RHO'][0]
30 | # rho_range = (np.arange(rho_hdr.num_gates + 1) - 0.5) * rho_hdr.gate_width + rho_hdr.first_gate
31 | # rho = np.array([ray[4][b'RHO'][1] for ray in f.sweeps[sweep]])
32 |
33 |
34 | fig, axes = plt.subplots(1, 1, figsize=(15, 8))
35 |
36 | # reflexivity plot
37 | data = np.ma.array(ref)
38 | data[np.isnan(data)] = np.ma.masked
39 |
40 | # Convert az,range to x,y
41 | xlocs = ref_range * np.sin(np.deg2rad(az[:, np.newaxis]))
42 | ylocs = ref_range * np.cos(np.deg2rad(az[:, np.newaxis]))
43 |
44 | # Plot the data
45 | axes.pcolormesh(xlocs, ylocs, data, cmap='viridis')
46 | axes.set_aspect('equal', 'datalim')
47 | axes.set_xlim(-150, 150)
48 | axes.set_ylim(-150, 150)
49 | add_timestamp(axes, f.dt, y=0.02, high_contrast=True)
50 | axes.axis('off')
51 | # fig.show()
52 |
53 | # redraw the plot
54 | fig.canvas.draw()
55 |
56 | # Now we can save it to a numpy array.
57 | width, height = fig.get_size_inches() * fig.get_dpi()
58 | data = np.fromstring(fig.canvas.tostring_rgb(),
59 | dtype=np.uint8).reshape(int(height), int(width), 3)
60 | data = cv2.cvtColor(data[200:600, 600:1000], cv2.COLOR_BGR2GRAY)
61 | data = cv2.resize(data, (200, 200), interpolation = cv2.INTER_NEAREST)
62 |
63 | plt.close()
64 | # data = cv2.blur(data, (3, 3))
65 | # print(data.shape)
66 | # plt.show()
67 | # plt.imshow(data, cmap='gray')
68 | # plt.show()
69 | #plt.savefig('test.png', cmap='gray')
70 |
71 | # save into a file
72 | return data
73 |
74 | def main():
75 | dirname = "31102020"
76 | # sort to get files in the correct sequence
77 | filenames = listdir(dirname)
78 | filenames.sort()
79 |
80 | data = []
81 | itern = 0
82 | for filename in filenames:
83 | path = dirname + "/" + filename
84 | data.append(read_nexRad(path))
85 | itern += 1
86 | if (itern % 5 == 0):
87 | print(itern, "files read")
88 |
89 | data = np.array(data)
90 | np.save("oct31", data)
91 | '''
92 | filename = dirname + "/" + filenames[0] + ".npy"
93 | data = np.load(filename)
94 | plt.imshow(data)
95 | plt.show()
96 | '''
97 |
98 | if __name__ == "__main__":
99 | main()
100 |
--------------------------------------------------------------------------------
/NEXRAD/dataset_mult_day.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import cv2
4 | import shutil
5 | import requests
6 | import os
7 | from lxml import html
8 |
9 | # from metpy.cbook import get_test_data
10 | from metpy.io import Level2File
11 | from metpy.plots import add_timestamp
12 |
13 | def read_nexRad(filename):
14 |
15 | # Open the file
16 | # name = get_test_data('PHWA20201031_000332_V06.gz', as_file_obj=False)
17 | f = Level2File(filename)
18 | # f = filename
19 | # print(f.sweeps[0][0])
20 | # Pull data out of the file
21 | sweep = 0
22 |
23 | # First item in ray is header, which has azimuth angle
24 | az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])
25 |
26 | # 5th item is a dict mapping a var name (byte string) to a tuple
27 | # of (header, data array)
28 | ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
29 | ref_range = np.arange(ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
30 | ref = np.array([ray[4][b'REF'][1] for ray in f.sweeps[sweep]])
31 |
32 | # rho_hdr = f.sweeps[sweep][0][4][b'RHO'][0]
33 | # rho_range = (np.arange(rho_hdr.num_gates + 1) - 0.5) * rho_hdr.gate_width + rho_hdr.first_gate
34 | # rho = np.array([ray[4][b'RHO'][1] for ray in f.sweeps[sweep]])
35 |
36 |
37 | fig, axes = plt.subplots(1, 1, figsize=(6, 3))
38 |
39 | # reflexivity plot
40 | data = np.ma.array(ref)
41 | data[np.isnan(data)] = np.ma.masked
42 |
43 | # Convert az,range to x,y
44 | xlocs = ref_range * np.sin(np.deg2rad(az[:, np.newaxis]))
45 | ylocs = ref_range * np.cos(np.deg2rad(az[:, np.newaxis]))
46 |
47 | # Plot the data
48 | axes.pcolormesh(xlocs, ylocs, data, cmap='viridis')
49 | axes.set_aspect('equal', 'datalim')
50 | axes.set_xlim(-150, 150)
51 | axes.set_ylim(-150, 150)
52 | # add_timestamp(axes, f.dt, y=0.02, high_contrast=True)
53 | axes.axis('off')
54 | # plt.show()
55 |
56 | # redraw the plot
57 | fig.canvas.draw()
58 |
59 | # Now we can save it to a numpy array.
60 | width, height = fig.get_size_inches() * fig.get_dpi()
61 | data = np.fromstring(fig.canvas.tostring_rgb(),
62 | dtype=np.uint8).reshape(int(height), int(width), 3)
63 | # print(data.shape)
64 | data = cv2.cvtColor(data[30:180, 150:300], cv2.COLOR_BGR2GRAY)
65 | data = cv2.resize(data, (100, 100), interpolation = cv2.INTER_AREA)
66 | fig.clf()
67 | plt.close()
68 | # data = cv2.blur(data, (3, 3))
69 | # print(data.shape)
70 | # plt.show()
71 | # plt.imshow(data, cmap='gray')
72 | # plt.show()
73 | #plt.savefig('test.png', cmap='gray')
74 |
75 | # save into a file
76 | return data
77 |
78 | # generates radar data for a day
79 | def save_day(dirname):
80 | # sort to get files in the correct sequence
81 | filenames = os.listdir(dirname)
82 | filenames.sort()
83 |
84 | data = []
85 | itern = 0
86 | for filename in filenames:
87 | path = dirname + "/" + filename
88 | data.append(read_nexRad(path))
89 | itern += 1
90 | if (itern % 5 == 0):
91 | print(itern, "files read")
92 |
93 | data = np.array(data)
94 | return data
95 |
96 |
97 | # Downloads all the NexRad level 2 data for the day
98 | def download_data(url, params):
99 | page = requests.get(url, params=params)
100 | tree = html.fromstring(page.content)
101 | data_links = tree.xpath('//div[@class="bdpLink"]/a/@href')
102 | data_name = tree.xpath('//div[@class="bdpLink"]/a/text()')
103 | print("Total NexRad level 2 data : ", len(data_links))
104 |
105 | # one directory for each day
106 | path = params["dd"] + params["mm"] + params["yyyy"] + "/"
107 | if not os.path.exists(path):
108 | os.makedirs(path)
109 |
110 | # downloads the data
111 | cnt = 0
112 | filenames = set(os.listdir(path))
113 | for (name, link) in zip(data_name, data_links):
114 | cnt += 1
115 | if cnt % 5 == 0:
116 | print(cnt, "files downloaded")
117 | # first 23 charaters
118 | filename = name.lstrip()[:23]
119 | if filename in filenames:
120 | continue
121 | radar = requests.get(link)
122 | open(path + filename, 'wb').write(radar.content)
123 |
124 |
125 | def main():
126 |
127 | url = 'https://www.ncdc.noaa.gov/nexradinv/bdp-download.jsp'
128 | params = {
129 | "id" : "PHWA",
130 | "yyyy" : "2020",
131 | "mm" : "10",
132 | "dd" : "27",
133 | "product" : "AAL2"
134 | }
135 | data = []
136 | for date in range(0, 31):
137 | if date < 10:
138 | params["dd"] = "0" + str(date)
139 | else:
140 | params["dd"] = str(date)
141 | download_data(url, params)
142 | dirname = params["mm"] + params["yyyy"]
143 |
144 | if date < 10:
145 | dirname = "0" + str(date) + dirname
146 | else:
147 | dirname = str(date) + dirname
148 |
149 | temp = save_day(dirname)
150 | if len(data) == 0:
151 | data = temp
152 | else:
153 | data = np.concatenate((data, temp), 0)
154 | #os.rmdir(dirname)
155 | shutil.rmtree(dirname, ignore_errors=True)
156 | np.save("oct", data)
157 |
158 | if __name__ == "__main__":
159 | main()
160 |
--------------------------------------------------------------------------------
/NEXRAD/download_day.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import os
3 | from lxml import html
4 |
5 | # Downloads all the NexRad level 2 data for the day
6 | def download_data(url, params):
7 | page = requests.get(url, params=params)
8 | tree = html.fromstring(page.content)
9 | data_links = tree.xpath('//div[@class="bdpLink"]/a/@href')
10 | data_name = tree.xpath('//div[@class="bdpLink"]/a/text()')
11 | print("Total NexRad level 2 data : ", len(data_links))
12 |
13 | # one directory for each day
14 | path = params["dd"] + params["mm"] + params["yyyy"] + "/"
15 | if not os.path.exists(path):
16 | os.makedirs(path)
17 |
18 | # downloads the data
19 | for (name, link) in zip(data_name, data_links):
20 | # first 23 charaters
21 | filename = name.lstrip()[:23]
22 | radar = requests.get(link)
23 | open(path + filename, 'wb').write(radar.content)
24 |
25 |
26 | def main():
27 |
28 | url = 'https://www.ncdc.noaa.gov/nexradinv/bdp-download.jsp'
29 | params = {
30 | "id" : "PHWA",
31 | "yyyy" : "2020",
32 | "mm" : "10",
33 | "dd" : "31",
34 | "product" : "AAL2"
35 | }
36 | download_data(url, params)
37 |
38 | if __name__ == "__main__":
39 | main()
40 |
--------------------------------------------------------------------------------
/NEXRAD/latest_Small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/latest_Small.png
--------------------------------------------------------------------------------
/NEXRAD/radar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/radar.png
--------------------------------------------------------------------------------
/NEXRAD/radar_metpy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/NEXRAD/radar_metpy.png
--------------------------------------------------------------------------------
/NEXRAD/read_metpy.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | from metpy.cbook import get_test_data
5 | from metpy.io import Level2File
6 | from metpy.plots import add_timestamp
7 |
8 | def read_nexRad(filename):
9 |
10 | # Open the file
11 | # name = get_test_data('PHWA20201031_000332_V06.gz', as_file_obj=False)
12 | f = Level2File(filename)
13 |
14 | # print(f.sweeps[0][0])
15 | # Pull data out of the file
16 | sweep = 0
17 |
18 | # First item in ray is header, which has azimuth angle
19 | az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])
20 |
21 | # 5th item is a dict mapping a var name (byte string) to a tuple
22 | # of (header, data array)
23 | ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
24 | ref_range = np.arange(ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
25 | ref = np.array([ray[4][b'REF'][1] for ray in f.sweeps[sweep]])
26 |
27 | # rho_hdr = f.sweeps[sweep][0][4][b'RHO'][0]
28 | # rho_range = (np.arange(rho_hdr.num_gates + 1) - 0.5) * rho_hdr.gate_width + rho_hdr.first_gate
29 | # rho = np.array([ray[4][b'RHO'][1] for ray in f.sweeps[sweep]])
30 |
31 |
32 | fig, axes = plt.subplots(1, 1, figsize=(15, 8))
33 |
34 | # reflexivity plot
35 | data = np.ma.array(ref)
36 | data[np.isnan(data)] = np.ma.masked
37 |
38 | # Convert az,range to x,y
39 | xlocs = ref_range * np.sin(np.deg2rad(az[:, np.newaxis]))
40 | ylocs = ref_range * np.cos(np.deg2rad(az[:, np.newaxis]))
41 |
42 | # Plot the data
43 | axes.pcolormesh(xlocs, ylocs, data, cmap='viridis')
44 | axes.set_aspect('equal', 'datalim')
45 | axes.set_xlim(-150, 150)
46 | axes.set_ylim(-150, 150)
47 | #plt.axis('off')
48 | plt.show()
49 | return
50 | # redraw the plot
51 | fig.canvas.draw()
52 |
53 | # Now we can save it to a numpy array.
54 | width, height = fig.get_size_inches() * fig.get_dpi()
55 | data = np.fromstring(fig.canvas.tostring_rgb(),
56 | dtype=np.uint8).reshape(int(height), int(width), 3)
57 | print(data.shape)
58 | plt.show()
59 | plt.imshow(data)
60 | plt.savefig('test.png', cmap='gray')
61 |
62 | '''
63 |
64 | Example code for plotting ref and rho
65 |
66 | fig, axes = plt.subplots(1, 2, figsize=(15, 8))
67 | add_metpy_logo(fig, 190, 85, size='large')
68 | for var_data, var_range, ax in zip((ref, rho), (ref_range, rho_range), axes):
69 | # Turn into an array, then mask
70 | data = np.ma.array(var_data)
71 | data[np.isnan(data)] = np.ma.masked
72 |
73 | # Convert az,range to x,y
74 | xlocs = var_range * np.sin(np.deg2rad(az[:, np.newaxis]))
75 | ylocs = var_range * np.cos(np.deg2rad(az[:, np.newaxis]))
76 | print((xlocs.shape), (ylocs.shape), (data.shape))
77 | # Plot the data
78 | ax.pcolormesh(xlocs, ylocs, data, cmap='viridis')
79 | ax.set_aspect('equal', 'datalim')
80 | ax.set_xlim(-150, 150)
81 | ax.set_ylim(-150, 150)
82 | plt.savefig('test.png')
83 | #add_timestamp(ax, f.dt, y=0.02, high_contrast=True)
84 |
85 |
86 | plt.show()
87 | '''
88 |
89 | def main():
90 | filename = "KATX20200130_001651_V06"
91 | read_nexRad(filename)
92 |
93 | if __name__ == "__main__":
94 | main()
95 |
--------------------------------------------------------------------------------
/NEXRAD/read_pyart.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | from netCDF4 import Dataset
4 | import pyart
5 |
6 | def main():
7 |
8 | filename = "PHWA20201031_000332_V06"
9 | radar = pyart.io.read_nexrad_archive(filename)
10 |
11 | radar_np = radar.fields['reflectivity']['data']
12 | print(radar_np.shape)
13 | #plt.imshow(radar_np)
14 | #plt.show()
15 |
16 |
17 | display = pyart.graph.RadarDisplay(radar)
18 | fig = plt.figure(figsize=(6, 5))
19 |
20 | # plot super resolution reflectivityx
21 | ax = fig.add_subplot(111)
22 | display.plot('reflectivity', 0, title='NEXRAD Reflectivity',
23 | vmin=-32, vmax=64, colorbar_label='', ax=ax)
24 | display.plot_range_ring(radar.range['data'][-1]/1000., ax=ax)
25 | display.set_limits(xlim=(-200, 200), ylim=(-200, 200), ax=ax)
26 | plt.show()
27 |
28 |
29 | if __name__ == "__main__":
30 | main()
31 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep-Learning-for-Weather-and-Climate-Science
2 |
3 | Nowcasting is weather forecasting on a very short term mesosacle period upto 6 hours. The goal is to give precise and timely prediction of precipitation, storm structure, hail potential, etc. in a local region over a short time period
4 | (eg., 0-6 hours). These predictions are useful for producing rainfall, storms, hails, etc alerts, providing weather guidance for airports, etc.
5 |
6 | Weather Radar’s reflectivity is used by scientists to detect precipitation, evaluate storm structure, determine hail potential, etc. Sequence of radar reflectivity over a region for some time duration has spatiotemporal nature. Weather nowcasting is a spatiotemporal sequence forecasting problem with the sequence of past reflectivity maps as input and the
7 | sequence of future reflectivity maps as output.
8 |
9 | The LSTM encoder-decoder framework provides a general framework for sequence-to-sequence learning problems. I have implemented Convolutional LSTM Encoder-Decoder Network [1] for weather forecasting with the sequences being maps of
10 | radar reflectivity.
11 |
12 | ## Weather Forecasting using NEXRAD
13 |
14 | The Next Generation Weather Radar (NEXRAD) [3] system currently comprises 160 sites throughout the United States and select overseas locations. NEXRAD detects precipitation and atmospheric movement or wind. It returns data which when processed can be displayed in a mosaic map which shows patterns of precipitation and its movement. NEXRAD Level-II
15 | (Base) data include the original three meteorological base data quantities: reflectivity, mean radial velocity, and spectrum width. Data is collected from the radar sites usually at the interval of 4, 5, 6 or 10 minutes depending upon
16 | the volume coverage. Radar Data can be accessed at https://www.ncdc.noaa.gov/nexradinv/.
17 |
18 | reflectivity is expressed in dBZ. Higher value of reflectivity tells heavy precipiation or hail at that place and lower value tells light precipiation. For examples, 65 dBZ means extremely heavy precipitation (410 mm per hour, but likely hail), 50 dBZ means heavy precipitation (51 mm per hour), 35 dBZ tells moderate precipitation of 6.4 mm per hour [2], and so on. So, reflectivity component from the Level-II data can be used for weather forecasting for short duration.
19 |
20 |
21 |
23 |
24 |
25 | (a) Available Radar Sites
26 |
27 |
28 | (b) reflectivity plot of Seattle radar site
29 |
30 |
31 |
32 |
33 |
34 | (c) reflectivity maps of combined radars
35 |
36 |
37 | In this project, weather forecasting was done for two regions : Seattle, WA and South Shore,
38 | Hawaii. For each region, radar level-II data was collected for some duration and reflectivity plots
39 | were extracted. These plots or images were resized into 100 x 100 images using nearest-neighbor
40 | interpolation. Further, the images were converted to gray scale and later thresholded to have
41 | binary intensities. These image sequences were later used for training and testing the models.
42 | For each region, weather forecasting was done independently. (For simpler dataset, the images
43 | were modified to have only two intensities and the model were trained to predict only the shapes
44 | not the intensities)
45 |
46 | ### PHWA-SOUTH SHORE, HAWAII
47 | PHWA is the id of radar at South Shore, Hawaii. A dataset of 959 sequences with 20 radar
48 | maps or images in each sequence was created by collecting radar data of around 30 days from
49 | August-2020 to October-2020. Time gap between each frame of a sequence was around 5
50 | minutes. These sequences were separated into 700 training sequences, 100 validation sequences
51 | and 159 test sequences. The following Encoder-Decoder networks were trained for forecasting.
52 | * 2 Layers with 64, 48 hidden units and (3 x 3) filter size in each layer. The input frames
53 | were reshaped into 50 x 50 x 4 vectors. The average binary crossentropy loss was 0.1492.
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 | * 4 Layers with 96, 64, 64, 32 hidden units and (3 x 3) filter size in each layer. The input
63 | frames were reshaped into 25 x 25 x 16 vectors. The average binary crossentropy loss was
64 | 0.1790. (Old result)
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | ### KATX-SEATTLE, WA
73 | Radar id at Seattle, WA is KATX. A dataset of 499 sequences with 20 radar maps in each
74 | sequence was created by collecting radar data of around 30 days from January-2020 to April-
75 | 2020. Time gap between each frame of a sequence was around 10 minutes. These sequences
76 | were separated into 350 training sequences, 75 validation sequences and 74 test sequences.
77 | * 4 Layers with 96, 96, 32, 32 hidden units and (3 x 3) filter size in each layer. The input
78 | frames were reshaped into 25 x 25 x 16 vectors. The average binary crossentropy loss was
79 | 0.1698 on the test data.
80 |
81 |
82 |
83 |
84 |
85 |
86 | ## Predictions on other Datasets
87 | * [Moving-MNIST](https://github.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/tree/master/Moving-MNIST)
88 | * [Keras-next-frame-tutorial](https://github.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/tree/master/next-frame-tutorial)
89 |
90 | ## References
91 | [1] (https://arxiv.org/abs/1506.04214)
92 | [2] (https://en.wikipedia.org/wiki/Weather_radar)
93 | [3] (https://www.ncdc.noaa.gov/data-access/radar-data/nexrad)
94 | [4] (https://www.tensorflow.org/tutorials/text/nmt_with_attention)
95 |
--------------------------------------------------------------------------------
/btp_report.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/btp_report.pdf
--------------------------------------------------------------------------------
/decoder.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | class Decoder(tf.keras.Model):
4 |
5 | # unit_list -> list of units in each layer
6 | # filter_sz -> list of filter sizes for each layer
7 | # keep parameters same as Encoder
8 | def __init__(self, dec_layers, unit_list, filter_sz, out_channel):
9 | super(Decoder, self).__init__()
10 |
11 | self.dec_layers = dec_layers
12 | self.unit_list = unit_list
13 | self.filter_sz = filter_sz
14 | self.conv_lstm = []
15 | self.batch_norm = []
16 |
17 | # volume convolution for the time step outputs
18 | # 1 x 1 CNN (patch size -> 4 x 4)
19 | self.conv_nn = tf.keras.layers.Conv2D(filters=out_channel,
20 | kernel_size=(1, 1),
21 | padding="same",
22 | activation="sigmoid",
23 | data_format="channels_last")
24 |
25 | # ConvLSTM layers and Batch Normalization
26 | for layer in range(self.dec_layers):
27 | lstm = tf.keras.layers.ConvLSTM2D(filters=self.unit_list[layer],
28 | kernel_size=self.filter_sz[layer],
29 | padding="same",
30 | return_state=True,
31 | data_format="channels_last")
32 |
33 | # norm = tf.keras.layers.BatchNormalization()
34 | # self.batch_norm.append(norm)
35 | self.conv_lstm.append(lstm)
36 |
37 | # input_.shape -> (batch_size, time_steps, rows, cols, channels)
38 | def call(self, input_, states, training=True):
39 |
40 | new_states = []
41 | for layer in range(self.dec_layers):
42 | output, hidden_state, cell_state = self.conv_lstm[layer](
43 | input_,
44 | initial_state=states[layer]
45 | )
46 | new_states.append([hidden_state, cell_state])
47 | # input_ = self.batch_norm[layer](output, training=training)
48 | # input_ = tf.expand_dims(input_, 1)
49 | input_ = tf.expand_dims(output, 1)
50 |
51 | frames = self.conv_nn(output)
52 | return frames, new_states
53 |
--------------------------------------------------------------------------------
/encoder.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | class Encoder(tf.keras.Model):
4 |
5 | # unit_list -> list of units in each layer
6 | # filter_sz -> list of filter sizes for each layer
7 | def __init__(self, enc_layers, unit_list, filter_sz, image_sz, batch_sz):
8 | super(Encoder, self).__init__()
9 |
10 | self.enc_layers = enc_layers
11 | self.unit_list = unit_list
12 | self.filter_sz = filter_sz
13 | self.image_sz = image_sz
14 | self.batch_sz = batch_sz
15 | self.conv_lstm = []
16 | self.batch_norm = []
17 |
18 | for layer in range(self.enc_layers):
19 | lstm = tf.keras.layers.ConvLSTM2D(filters=self.unit_list[layer],
20 | kernel_size=self.filter_sz[layer],
21 | padding="same",
22 | return_sequences=True,
23 | return_state=True,
24 | data_format="channels_last")
25 |
26 | #if layer != self.enc_layers - 1:
27 | # norm = tf.keras.layers.BatchNormalization()
28 | # self.batch_norm.append(norm)
29 | self.conv_lstm.append(lstm)
30 |
31 |
32 | def initialize_states(self, layer, batch_sz):
33 | return [tf.zeros([batch_sz, self.image_sz[0], self.image_sz[1], self.unit_list[layer]]),
34 | tf.zeros([batch_sz, self.image_sz[0], self.image_sz[1], self.unit_list[layer]])]
35 |
36 |
37 | # Encoder doesn't need states input
38 | # x.shape -> (batch_size, time_steps, rows, cols, channels)
39 | def call(self, input_, batch_sz, training=True):
40 |
41 | states = []
42 | for layer in range(self.enc_layers):
43 | outputs, hidden_state, cell_state = self.conv_lstm[layer](
44 | input_,
45 | initial_state = self.initialize_states(layer, batch_sz)
46 | )
47 | input_ = outputs
48 |
49 | # No batch normalization for now
50 | # if layer != self.enc_layers - 1:
51 | # input_ = self.batch_norm[layer](outputs, training=training)
52 |
53 | states.append([hidden_state, cell_state])
54 |
55 | return states
56 |
--------------------------------------------------------------------------------
/encoder_decoder.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import time
4 | import os
5 | from encoder import Encoder
6 | from decoder import Decoder
7 | from test import test_model
8 |
9 | # Builds an encoder-decoder
10 | class EncoderDecoder:
11 | def __init__(
12 | self,
13 | num_layers,
14 | unit_list,
15 | filter_sz,
16 | batch_sz,
17 | image_sz,
18 | checkpoint_dir,
19 | ):
20 | self.num_layers = num_layers
21 | self.batch_sz = batch_sz
22 | self.image_sz = image_sz
23 | self.encoder = Encoder(num_layers, unit_list, filter_sz, image_sz, batch_sz)
24 | self.decoder = Decoder(num_layers, unit_list, filter_sz, image_sz[2])
25 | self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
26 | # self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
27 | self.checkpoint_dir = checkpoint_dir
28 | self.checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
29 | self.checkpoint = tf.train.Checkpoint(
30 | optimizer=self.optimizer,
31 | encoder=self.encoder,
32 | decoder=self.decoder
33 | )
34 |
35 | # Binary crossentropy
36 | # T * logP + (1 - T) * log(1 - P)
37 | self.loss_object = tf.keras.losses.BinaryCrossentropy()
38 | # self.loss_object = tf.keras.losses.CategoricalCrossentropy()
39 | # self.loss_object = tf.keras.losses.CategoricalCrossentropy(
40 | # reduction=tf.keras.losses.Reduction.SUM
41 | #)
42 |
43 | def __loss_function(self, real_frame, pred_frame):
44 | return tf.reduce_mean(self.loss_object(real_frame, pred_frame))
45 |
46 | # input_ -> (batch_size, time_steps, rows, cols, channels)
47 | # target -> (batch_size, time_steps, rows, cols, channels)
48 | def __train_step(self, input_, target):
49 | batch_loss = 0
50 | start_pred = input_.shape[1] - 1
51 |
52 | with tf.GradientTape() as tape:
53 |
54 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], self.batch_sz, True)
55 | dec_input = tf.expand_dims(input_[:, start_pred, :, :, :], 1)
56 |
57 | # Teacher forcing
58 | for t in range(0, target.shape[1]):
59 | prediction, dec_states = self.decoder(dec_input, dec_states)
60 |
61 | batch_loss += self.__loss_function(target[:, t, :, :, :], prediction)
62 |
63 | # using teacher forcing
64 | dec_input = tf.expand_dims(target[:, t, :, :, :], 1)
65 |
66 |
67 | variables = self.encoder.trainable_variables + self.decoder.trainable_variables
68 | gradients = tape.gradient(batch_loss, variables)
69 | self.optimizer.apply_gradients(zip(gradients, variables))
70 | return (batch_loss / int(target.shape[1]))
71 |
72 | # inputX - > (total, time_steps, rows, cols, channels)
73 | # targetY -> (total, time_steps, rows, cols, channels)
74 | def train(self, inputX, targetY, epochs, valX, valY, X, Y):
75 | init_time = time.time()
76 | for epoch in range(epochs):
77 | start = time.time()
78 | total_loss = 0
79 | total_batch = inputX.shape[0] // self.batch_sz
80 | #print(total_batch)
81 |
82 | for batch in range(total_batch):
83 | index = batch * self.batch_sz
84 | input_ = inputX[index:index + self.batch_sz, :, :, :, :]
85 | target = targetY[index:index + self.batch_sz, :, :, :, :]
86 |
87 | # print(input_.shape, target.shape)
88 |
89 | batch_loss = self.__train_step(input_, target)
90 | total_loss += batch_loss
91 |
92 | # saving (checkpoint) the model every 25 epochs
93 | if epoch % 10 == 0:
94 | self.checkpoint.save(file_prefix = self.checkpoint_prefix)
95 | val_loss = self.evaluate(valX, valY)
96 | print('Epoch {} Evaluation Loss {:.4f}'.format(epoch + 1, val_loss))
97 | # if epoch % 50 == 0:
98 | test_model(self, X, Y)
99 | if (time.time() - init_time) / 3600.0 > 8:
100 | break
101 |
102 | total_batch += 1
103 | print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / total_batch))
104 | print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
105 |
106 | def restore(self):
107 | self.checkpoint.restore(tf.train.latest_checkpoint(self.checkpoint_dir))
108 |
109 | # input_ -> (batch_size, time_steps, rows, cols, channels)
110 | # target -> (batch_size, time_steps, rows, cols, channels)
111 | # valid -> validation
112 | def __eval_step(self, input_, target, valid):
113 |
114 | batch_loss = 0
115 | start_pred = input_.shape[1] - 1
116 |
117 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], self.batch_sz, True)
118 | dec_input = tf.expand_dims(input_[:, start_pred, :, :, :], 1)
119 |
120 | for t in range(0, target.shape[1]):
121 | prediction, dec_states = self.decoder(dec_input, dec_states)
122 | batch_loss += self.__loss_function(target[:, t, :, :, :], prediction)
123 |
124 | # if evaluating on validation set
125 | if valid:
126 | # using teacher forcing
127 | dec_input = tf.expand_dims(target[:, t, :, :, :], 1)
128 | else:
129 | # evaluating on testing set
130 | dec_input = tf.expand_dims(prediction, 1)
131 |
132 | return (batch_loss / int(target.shape[1]))
133 |
134 | # input -> (time_steps, rows, cols, channels)
135 | def predict(self, input_, output_seq):
136 | input_ = tf.expand_dims(input_, 0)
137 | start_pred = input_.shape[1] - 1
138 | dec_states = self.encoder(input_[:, :start_pred, :, :, :], 1, False)
139 | dec_input = tf.expand_dims(input_[:,-1, :, :, :], 1)
140 |
141 | predictions = []
142 |
143 | for t in range(output_seq):
144 | prediction, dec_states = self.decoder(dec_input, dec_states, False)
145 | dec_input = tf.expand_dims(prediction, 0)
146 | predictions.append(prediction.numpy().reshape(self.image_sz))
147 |
148 | return np.array(predictions)
149 |
150 | def evaluate(self, inputX, outputY, valid=True):
151 |
152 | total_loss = 0
153 | total_batch = inputX.shape[0] // self.batch_sz
154 |
155 | for batch in range(total_batch):
156 | index = batch * self.batch_sz
157 | input_ = inputX[index:index + self.batch_sz, :, :, :, :]
158 | target = outputY[index:index + self.batch_sz, :, :, :, :]
159 |
160 | batch_loss = self.__eval_step(input_, target, valid)
161 | total_loss += batch_loss
162 |
163 | total_batch += 1
164 | return total_loss / total_batch
165 |
--------------------------------------------------------------------------------
/lstm-keras/conv-lstm-one.py:
--------------------------------------------------------------------------------
1 | from tensorflow import keras
2 | from tensorflow.keras import layers
3 | import numpy as np
4 | #import pylab as plt
5 | import matplotlib.pyplot as plt
6 |
7 | def load_data(filename):
8 | data = np.load(filename)
9 | #print (data[:100, :10, :, :].shape)
10 | print(data.shape)
11 | X = data[:, :10, :, :].reshape(1000, 10, 64, 64, 1)
12 | Y = data[:, 10:20, :, :].reshape(1000, 10, 64, 64, 1)
13 | return (X, Y)
14 |
15 | if __name__ == "__main__":
16 |
17 | model = keras.Sequential(
18 | [
19 | keras.Input(
20 | shape=(10, 64, 64, 1)
21 | ),
22 | layers.ConvLSTM2D(
23 | filters=40, kernel_size=(3, 3), padding="same", return_sequences=True
24 | ),
25 | layers.Conv2D(
26 | filters=1, kernel_size=(3, 3), activation="sigmoid", padding="same"
27 | ),
28 | ]
29 | )
30 |
31 | model.compile(loss="binary_crossentropy", optimizer="adadelta")
32 | model.summary()
33 |
34 | X, Y = load_data("../train1000.npy")
35 | print(X.shape, Y.shape)
36 | model.fit(X[:200], Y[:200], batch_size=10, epochs=5, verbose=2, validation_split=0.1)
37 |
38 | # predict
39 | pred = model.predict(X[300:301, :, :, :]).reshape(10, 64, 64)
40 | actual = Y[300:301, :, :, :].reshape(10, 64, 64)
41 |
42 | fig, ax = plt.subplots(2, 10)
43 | i = 0
44 | for row in ax:
45 | for col in range(len(row)):
46 | if i == 1:
47 | row[col].imshow(pred[len(row) - col - 1], cmap="gray")
48 | else:
49 | row[col].imshow(actual[col], cmap="gray")
50 | i += 1
51 |
52 | plt.show()
53 | #print(result.shape)
54 | '''
55 | fig, ax = plt.subplots(2, 10)
56 | for row in ax:
57 | for col in range(len(row)):
58 | row[col].imshow(X[15][col], cmap="gray")
59 | plt.show()
60 | '''
61 |
--------------------------------------------------------------------------------
/lstm-keras/images/mnist-one-layer-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/lstm-keras/images/mnist-one-layer-2.png
--------------------------------------------------------------------------------
/lstm-keras/images/mnist-one-layer-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/lstm-keras/images/mnist-one-layer-3.png
--------------------------------------------------------------------------------
/lstm-keras/images/mnist-one-layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/lstm-keras/images/mnist-one-layer.png
--------------------------------------------------------------------------------
/lstm-keras/lstm-encoder-decoder.py:
--------------------------------------------------------------------------------
1 | from numpy import array
2 | from keras.models import Model
3 | from keras.layers import Input, LSTM, Dense, RepeatVector
4 |
5 | def createData(n):
6 | X = [x + 1 for x in range(n)]
7 | Y = [y * 15 for y in X]
8 |
9 | #print(X, Y)
10 | return (X, Y)
11 |
12 | # encoder decoder model
13 | # 50 - 50
14 | if __name__ == "__main__":
15 |
16 | X, Y = createData(20)
17 | X = array(X).reshape(20, 1, 1) # samples, time-steps, features
18 | Y = array(Y)
19 |
20 | # encoder LSTM
21 | encoder_inputs = Input(shape=(1, 1)) # time-steps, features
22 | encoder = LSTM(50, return_state=True)
23 | encoder_outputs, state_h, state_c = encoder(encoder_inputs)
24 | encoder_states = [state_h, state_c]
25 | # decoder LSTM
26 | # decoder_inputs = Inputs(shape=())
27 | # using repeat vector for now
28 | # repeats the encoder_outputs n (50) times
29 | '''
30 | decoder_inputs = RepeatVector(50)(encoder_outputs)
31 | #decoder_inputs = Input(shape=(1, 1))
32 | decoder_lstm = LSTM(50, return_state=True)
33 | decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
34 | initial_state=encoder_states)
35 | '''
36 | decoder_dense = Dense(1, activation='softmax')
37 | decoder_outputs = decoder_dense(encoder_outputs)
38 |
39 | model = Model(encoder_inputs, decoder_outputs)
40 | model.compile(optimizer='adam', loss='mse')
41 | print(model.summary())
42 | model.fit(X, Y, epochs=10, validation_split=0.2, batch_size=5)
43 |
44 | test = array([30]).reshape((1, 1, 1))
45 | output = model.predict(test, verbose=0)
46 |
47 | print(output)
48 |
--------------------------------------------------------------------------------
/lstm-keras/lstm-one-one-50.py:
--------------------------------------------------------------------------------
1 | from numpy import array
2 | from keras.models import Sequential
3 | from keras.layers.core import Activation, Dropout, Dense
4 | from keras.layers import LSTM
5 |
6 | def createData(n):
7 | X = [x + 1 for x in range(n)]
8 | Y = [y * 15 for y in X]
9 |
10 | #print(X, Y)
11 | return (X, Y)
12 |
13 | if __name__ == "__main__":
14 |
15 | X, Y = createData(20)
16 | X = array(X).reshape(20, 1, 1) # samples, time-steps, features
17 | Y = array(Y)
18 |
19 | model = Sequential()
20 | model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(1, 1)))
21 | model.add(LSTM(32, return_sequences=True))
22 | #model.add(Dense(1))
23 | model.compile(optimizer='adam', loss='mse')
24 | print(model.summary())
25 |
26 | #model.fit(X, Y, epochs=1000, validation_split=0.2, batch_size=5)
27 |
28 | test = array([30]).reshape((1, 1, 1))
29 | output = model.predict(test, verbose=0)
30 |
31 | print(output)
32 |
--------------------------------------------------------------------------------
/lstm-keras/lstm-one-one-50.py~:
--------------------------------------------------------------------------------
1 | from numpy import array
2 | from keras.models import Sequential
3 | from keras.layers.core import Activation, Dropout, Dense
4 | from keras.layers import LSTM
5 |
6 | def createData(n):
7 | X = [x + 1 for x in range(n)]
8 | Y = [y * 15 for y in X]
9 |
10 | #print(X, Y)
11 | return (X, Y)
12 |
13 | if __name__ == "__main__":
14 |
15 | X, Y = createData(20)
16 | X = array(X).reshape(20, 1, 1) # samples, time-steps, features
17 | Y = array(Y)
18 |
19 | model = Sequential()
20 | model.add(LSTM(50, activation='relu', input_shape=(1, 1)))
21 | model.add(Dense(1))
22 | model.compile(optimizer='adam', loss='mse')
23 | print(model.summary())
24 |
25 | #model.fit(X, Y, epochs=1000, validation_split=0.2, batch_size=5)
26 |
27 | #test = array([30]).reshape((1, 1, 1))
28 | #output = model.predict(test, verbose=0)
29 |
30 | #print(output)
31 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from encoder_decoder import EncoderDecoder
4 | from test import test_model
5 |
6 | def load_dataset(path, filename):
7 | train_data = np.load(path + filename)
8 | # patch size 4 x 4
9 | train_data = train_data.reshape(train_data.shape[0], train_data.shape[1], 50, 50, 4)
10 |
11 | train_data[train_data < 200] = 0
12 | train_data[train_data >= 200] = 1
13 | #train_data = train_data / 255.0
14 | print(train_data.shape)
15 |
16 | X = train_data[:, :10, :, :, :]
17 | Y = train_data[:, 10:21, :, :, :]
18 |
19 | X = tf.convert_to_tensor(X, dtype=tf.float32)
20 | Y = tf.convert_to_tensor(Y, dtype=tf.float32)
21 | return (X, Y)
22 |
23 | def main():
24 |
25 | X, Y = load_dataset("../input/nexraddata/", 'data.npy')
26 | model = EncoderDecoder(
27 | 2,
28 | [64, 48], [(3, 3), (3, 3)],
29 | 16,
30 | (X.shape[2], X.shape[3], X.shape[4]),
31 | './training_checkpoints'
32 | )
33 | # model.restore()
34 | model.train(X[:700], Y[:700], 400, X[700:800], Y[700:800])
35 |
36 | test_model(model, X, Y)
37 |
38 | if __name__ == "__main__":
39 | main()
40 |
--------------------------------------------------------------------------------
/next-frame-tutorial/README.md:
--------------------------------------------------------------------------------
1 | # Next-frame prediction with Conv-LSTM
2 | A simple Artificial dataset is generated where each frame has 3 - 7 squares moving linearly over time inside the 40 x 40 image frame. Each sample has 20 sequence of such image frames. The dataset generation code can be found at https://keras.io/examples/vision/conv_lstm/.
3 |
4 | ## Stacked Conv-LSTM
5 | The model has 3 stacked Conv-LSTM layers with 40 hidden units (filters of size (3 x 3)) in each layer. The final layer is a 1 filter 3D Convolutional layer with filter size (1 x 1 x 1) having sigmoid activation to produce the final image with intensities between 0 and 1. Between each two layers there is a batch normalization layer to make network stable and faster. The model is many-to-one which means given any number sequences, the model will predict the next frame of the given sequence. The model was trained to minimize the binary crossentropy loss and Adadelta optimizer was used during the training.
6 |
7 | To run this model, set the epoch to 50. If the epoch is set higher then ensure that model doesn't overfit.
8 |
9 | ` python3 next-frame.py
10 | `
11 | ### Training
12 | The model was trained on 950 samples and 50 validation samples for 5 epochs.
13 | ```
14 | Train on 950 samples, validate on 50 samples
15 | Epoch 50/50
16 | 900/900 [==============================] - 30s 33ms/step - loss: 0.0055 - val_loss: 0.0053
17 | CPU times: user 18min 27s, sys: 8min 21s, total: 26min 48s
18 | ```
19 |
20 | ### Result
21 | On a test sample, next 10 frames were predicted using first 10 frames. Since the above model is many-to-one, so for the many-to-many predictions, predicted output frames are also feed into the network along with the original input sequence.
22 |
23 |
24 |
25 |
26 |
27 | (a) Input frames
28 |
29 | (b) Prediction vs Ground truth
30 |
31 |
32 | ## Encoder-Decoder Model
33 |
34 | One layer encoder-decoder model was used for training. The layer (both encoder and decoder) has 128 hidden units (no. of filters) with filter size of (3 x 3). The final layer in decoder is 1 x 1 2D conolutional layer with sigmoid activation to produce the frame with intensities between 0 and 1. Since it's a encoder-decoder model, the hidden states from encoder layer is used to initialize the hidden states in the decoder layer. The loss is propagated from the decoder prediction loss (encoder outputs are discarded). The training set is same as in the above model. The model was trained to minimize the binary crossentropy loss and RMSprop optimizer (learning rate = 0.001 and rho = 0.9) was used during the training.
35 |
36 | To run this model, set the epoch to 20 or little higher. The model converges fastly.
37 |
38 | ` python3 encoder-decoder.py
39 | `
40 | ### Training
41 | The model was trained on 1000 samples for 20 epochs with batch size of 8.
42 | ```
43 | Epoch 19 Loss 0.0001
44 | Time taken for 1 epoch 37.519431352615356 sec
45 |
46 | ```
47 |
48 | ### Result
49 | The model was used to predict 10 output frames sequence using 10 input seqeunce frames.
50 |
51 |
52 |
53 |
54 |
55 | (a) Input frames
56 |
57 | (b) Ground truth vs Prediction
58 |
59 |
60 |
61 |
62 |
63 |
64 | (a) Input frames
65 |
66 | (b) Ground truth vs Prediction
67 |
68 |
69 | ## References
70 | [1] https://keras.io/examples/vision/conv_lstm/
71 | [2] https://www.tensorflow.org/tutorials/text/nmt_with_attention
72 |
--------------------------------------------------------------------------------
/next-frame-tutorial/encoder-decoder.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | # Generate movies with 3 to 7 moving squares inside.
7 | # visit : https://keras.io/examples/vision/conv_lstm/ for details
8 | # @param n_samples number of samples to generate
9 | # @param n_frames number of sequences in each sample
10 | def generate_movies(n_samples=1200, n_frames=20):
11 | row = 80
12 | col = 80
13 | noisy_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float)
14 | shifted_movies = np.zeros((n_samples, n_frames, row, col, 1),
15 | dtype=np.float)
16 |
17 | for i in range(n_samples):
18 | # Add 3 to 7 moving squares
19 | n = np.random.randint(3, 8)
20 |
21 | for j in range(n):
22 | # Initial position
23 | xstart = np.random.randint(20, 60)
24 | ystart = np.random.randint(20, 60)
25 | # Direction of motion
26 | directionx = np.random.randint(0, 3) - 1
27 | directiony = np.random.randint(0, 3) - 1
28 |
29 | # Size of the square
30 | w = np.random.randint(2, 4)
31 |
32 | for t in range(n_frames):
33 | x_shift = xstart + directionx * t
34 | y_shift = ystart + directiony * t
35 |
36 | # Shift the ground truth by 1
37 | x_shift = xstart + directionx * (t + 1)
38 | y_shift = ystart + directiony * (t + 1)
39 | shifted_movies[i, t, x_shift - w: x_shift + w,
40 | y_shift - w: y_shift + w, 0] += 1
41 |
42 | # Cut to a 40x40 window
43 | shifted_movies = shifted_movies[::, ::, 20:60, 20:60, ::]
44 | shifted_movies[shifted_movies >= 1] = 1
45 | return shifted_movies
46 |
47 |
48 | # Encoder model to encapsulate the input sequences
49 | class Encoder(tf.keras.Model):
50 |
51 | # @param enc_layers number of conv lstm layers
52 | # @param unit_list list of hidden units in each conv lstm layer
53 | # @param filter_sz list of filter sizes for each layer
54 | def __init__(self, enc_layers, unit_list, filter_sz):
55 | super(Encoder, self).__init__()
56 |
57 | self.enc_layers = enc_layers
58 | self.unit_list = unit_list
59 | self.filter_sz = filter_sz
60 | self.conv_lstm = []
61 | self.batch_norm = []
62 |
63 | for layer in range(self.enc_layers):
64 | # conv lstm layer
65 | lstm = tf.keras.layers.ConvLSTM2D(filters=self.unit_list[layer],
66 | kernel_size=self.filter_sz[layer],
67 | padding="same",
68 | return_sequences=True,
69 | return_state=True,
70 | data_format="channels_last")
71 |
72 | # batch normalization layer after each conv lstm except after the last layer
73 | if layer != self.enc_layers - 1:
74 | norm = tf.keras.layers.BatchNormalization()
75 | self.batch_norm.append(norm)
76 | self.conv_lstm.append(lstm)
77 |
78 | # Encoder doesn't need states input
79 | # input_.shape -> (batch_size, time_steps, rows, cols, channels)
80 | # @return list of pairs of states from each layer
81 | def call(self, input_, training=True):
82 |
83 | states = []
84 | for layer in range(self.enc_layers):
85 | outputs, hidden_state, cell_state = self.conv_lstm[layer](input_)
86 |
87 | if layer != self.enc_layers - 1:
88 | input_ = self.batch_norm[layer](outputs, training=training)
89 |
90 | states.append([hidden_state, cell_state])
91 |
92 | return states
93 |
94 |
95 | # Decode the state inputs from encoder and predicts the sequence output
96 | class Decoder(tf.keras.Model):
97 |
98 | # @param dec_layers number of conv lstm layers
99 | # @param unit_list -> list of hidden units in each conv lstm layer
100 | # @param filter_sz -> list of filter sizes for each conv lstm layer
101 | # Note : keep parameters same as Encoder
102 | def __init__(self, dec_layers, unit_list, filter_sz):
103 | super(Decoder, self).__init__()
104 |
105 | self.dec_layers = dec_layers
106 | self.unit_list = unit_list
107 | self.filter_sz = filter_sz
108 | self.conv_lstm = []
109 | self.batch_norm = []
110 |
111 | # 2D convolution for the time step outputs
112 | # 1 x 1 CNN
113 | self.conv_nn = tf.keras.layers.Conv2D(filters=1,
114 | kernel_size=(1, 1),
115 | padding="same",
116 | activation='sigmoid',
117 | data_format="channels_last")
118 |
119 | # ConvLSTM layers and Batch Normalization
120 | for layer in range(self.dec_layers):
121 | lstm = tf.keras.layers.ConvLSTM2D(filters=self.unit_list[layer],
122 | kernel_size=self.filter_sz[layer],
123 | padding="same",
124 | return_state=True,
125 | data_format="channels_last")
126 |
127 | # batch normalization layer after each conv lstm layer
128 | norm = tf.keras.layers.BatchNormalization()
129 | self.batch_norm.append(norm)
130 | self.conv_lstm.append(lstm)
131 |
132 | # input_.shape -> (batch_size, time_steps, rows, cols, channels)
133 | # @param states hidden and cell states to be fed to the Decoder
134 | # @return predicted frame and list of states from each layer
135 | def call(self, input_, states, training=True):
136 |
137 | new_states = []
138 | for layer in range(self.dec_layers):
139 | output, hidden_state, cell_state = self.conv_lstm[layer](
140 | input_,
141 | initial_state=states[layer]
142 | )
143 | new_states.append([hidden_state, cell_state])
144 | input_ = self.batch_norm[layer](output, training=training)
145 | input_ = tf.expand_dims(input_, 1)
146 |
147 | frames = self.conv_nn(output)
148 | return frames, new_states
149 |
150 | '''
151 | def initialize_states(self):
152 | return [tf.zeros([self.batch_sz, self.image_sz[0], self.image_sz[1], self.units]),
153 | tf.zeros([self.batch_sz, self.image_sz[0], self.image_sz[1], self.units])]
154 | '''
155 |
156 | # Builds an Encoder-Decoder model
157 | class EncoderDecoder:
158 |
159 | # @param num_layers number of layers in the model
160 | # @param unit_list list of hidden states size for each layer
161 | # @param filter_sz list of filter sizes for each layer
162 | # @param batch_sz batch size
163 | # @param image_sz image size (gray scale)
164 | # Note : Keep encoder and decoder layer parameters same
165 | def __init__(self, num_layers, unit_list, filter_sz, batch_sz, image_sz):
166 | self.num_layers = num_layers
167 | # encoder layer defined
168 | self.encoder = Encoder(num_layers, unit_list, filter_sz)
169 | # decoder layer defined
170 | self.decoder = Decoder(num_layers, unit_list, filter_sz)
171 | self.batch_sz = batch_sz
172 | # RMS optimizer
173 | self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
174 | self.image_sz = image_sz
175 |
176 | # Binary crossentropy loss (average)
177 | # Sigma T * logP + (1 - T) * log(1 - P)
178 | self.loss_object = tf.keras.losses.BinaryCrossentropy()
179 |
180 | # @return average loss
181 | def loss_function(self, real_frame, pred_frame):
182 | return self.loss_object(real_frame, pred_frame)
183 |
184 | # Trains for one iteration
185 | # @param input_ input sequences
186 | # @param target target sequences
187 | # input_.shape -> (batch_size, time_steps, rows, cols, channels)
188 | # target .shape-> (batch_size, time_steps, rows, cols, channels)
189 |
190 | def train_step(self, input_, target):
191 | batch_loss = 0
192 |
193 | with tf.GradientTape() as tape:
194 | dec_states = self.encoder(input_[:, :9, :, :, :])
195 | dec_input = tf.expand_dims(input_[:, 9, :, :, :], 1)
196 |
197 | # Teacher forcing
198 | for t in range(0, target.shape[1]):
199 | prediction, dec_states = self.decoder(dec_input, dec_states)
200 |
201 | batch_loss += self.loss_function(target[:, t, :, :, :], prediction)
202 |
203 | # using teacher forcing
204 | dec_input = tf.expand_dims(target[:, t, :, :, :], 1)
205 |
206 |
207 | variables = self.encoder.trainable_variables + self.decoder.trainable_variables
208 | # back propagates the gradient
209 | gradients = tape.gradient(batch_loss, variables)
210 | self.optimizer.apply_gradients(zip(gradients, variables))
211 |
212 | # batch_loss (average loss over each target output)
213 | return batch_loss / int(target.shape[1])
214 |
215 | # Trains the model for epochs number of iterations
216 | #
217 | # @param inputX total training samples inputs
218 | # @param inputY total training samples outputs
219 | # inputX.shape - > (total, time_steps, rows, cols, channels)
220 | # targetY.shapw -> (total, time_steps, rows, cols, channels)
221 | def train(self, inputX, targetY, epochs):
222 |
223 | assert(inputX.shape == targetY.shape)
224 |
225 | for epoch in range(epochs):
226 | start = time.time()
227 | total_loss = 0
228 | total_batch = inputX.shape[0] // self.batch_sz
229 | #print(total_batch)
230 |
231 | for batch in range(total_batch):
232 | index = batch * self.batch_sz
233 | input_ = inputX[index:index + self.batch_sz, :, :, :, :]
234 | target = targetY[index:index + self.batch_sz, :, :, :, :]
235 |
236 | # print(input_.shape, target.shape)
237 |
238 | batch_loss = self.train_step(input_, target)
239 | total_loss += batch_loss
240 |
241 | if epoch % 2 == 0:
242 | print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / total_batch))
243 | print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
244 |
245 | # predicts for the given input sequences
246 | # @param output_seq number of output frame predictions
247 | # input_.shape -> (time_steps, rows, cols, channels)
248 | def predict(self, input_, output_seq):
249 | input_ = tf.expand_dims(input_, 0)
250 | dec_states = self.encoder(input_[:9, :, :, :], False)
251 | dec_input = tf.expand_dims(input_[-1, :, :, :], 0)
252 |
253 | predictions = []
254 |
255 | for t in range(output_seq):
256 | prediction, dec_states = self.decoder(dec_input, dec_states, False)
257 | # feeding back the predicted frame as the input frame
258 | dec_input = tf.expand_dims(prediction, 0)
259 | predictions.append(prediction.numpy().reshape(self.image_sz))
260 |
261 | return np.array(predictions)
262 |
263 |
264 | # plots the actual vs predicted frame
265 | def plot_result(input_, actual, predict):
266 |
267 | for i in range(input_.shape[0]):
268 | plt.imshow(input_[i])
269 | plt.title("Actual_" + str(i + 1))
270 | plt.show()
271 |
272 | for i in range(actual.shape[0]):
273 | plt.subplot(121), plt.imshow(actual[i]),
274 | plt.title("Actual_" + str(i + 1 + input_.shape[0]))
275 | plt.subplot(122), plt.imshow(predict[i]),
276 | plt.title("Predicted_" + str(i + 1 + input_.shape[0]))
277 | plt.show()
278 |
279 | def main():
280 |
281 |
282 | shifted_movies = generate_movies(n_samples=1200)
283 | print(shifted_movies.shape)
284 |
285 | X = shifted_movies[:, :10, :, :, :]
286 | Y = shifted_movies[:, 10:, :, :, :]
287 |
288 | # defines the model
289 | model = EncoderDecoder(1, [128], [(3, 3)], 8, (X.shape[2], X.shape[3]))
290 | # training on first 1000 samples
291 | # samples from 1000 - 1199 are used as test set
292 | model.train(X[:1000], Y[:1000], 20)
293 |
294 | # predictions
295 | y1 = model.predict(X[1100], 10)
296 | y2 = model.predict(X[1005], 10)
297 | plot_result(X[1100].reshape(10, 40, 40), Y[1100].reshape(10, 40, 40), y1)
298 | plot_result(X[1005].reshape(10, 40, 40), Y[1005].reshape(10, 40, 40), y2)
299 |
300 |
301 |
302 | if __name__ == "__main__":
303 | main()
304 |
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_1.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_2.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_3.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_4.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_5.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_6.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_7.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_8.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_9.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/actual0_fin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/actual0_fin.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/input.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/input.gif
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output.gif
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_1.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_2.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_3.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_4.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_5.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_6.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_7.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_8.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_9.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec0/output0_fin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec0/output0_fin.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_1.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_2.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_3.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_4.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_5.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_6.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_7.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_8.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_9.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/actual1_fin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/actual1_fin.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/input.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/input.gif
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output.gif
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_1.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_2.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_3.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_4.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_5.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_6.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_7.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_8.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_9.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/enc_dec1/output1_fin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/enc_dec1/output1_fin.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual0.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual1.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual2.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual3.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual4.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual5.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual6.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual7.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual8.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/actual9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/actual9.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/encoder.drawio:
--------------------------------------------------------------------------------
1 | 7Z3Lkps4FIafppdJIcBgL/uWzCJdlaqeSjKrFGNkmwlGPVhO2/P0I4y4SbTBWEJqoBeJESDgfEdI/9GRfWPdbw+fY+9l84R8GN6Yhn+4sR5uTHMxB+TfpOCYFszALC1Yx4GfFoGi4Dn4D9JCg5buAx/uKgdihEIcvFQLlyiK4BJXyrw4Rq/Vw1YorF71xVtDruB56YV86ffAx5u0dD4zivI/YLDeZFcGBt2z9bKDacFu4/notVRkPd5Y9zFCOP20PdzDMLFdZpf0vE9v7M1vLIYRbnPC18Pyy89vr/aTv3xY/Yp/+ber6AOt5bcX7ukD05vFx8wCMdpHPkwqMW6su9dNgOHzi7dM9r4S5KRsg7ch2QLk4yoIw3sUovh0rgX+9gAkT3q3wzH6BUt7DMN5vP2UnIEiXCpfnf5IOb0xGGN4ePOJQW5H4n8QbSGOj+SQ7IQ5NT31PSuj9lqQzNxxU4Lo0MM86jvrvObCvOQDtfAF1raGbG1yFb2sbQ/a2rr59mzI1rZ0821HrrVXnjM3jDpr37vk2Q251raBZtZ2B21tRzNrz4ds7Zluvr0YtLV1821gNpsbRv5tolzI1jL0drtgWbUwPAT4RwLj44xu/UXRJJ8fDuWNI91ILwJ9TuwwliQ3gvbxEjY5DG/xkkUzI5ctmpXFMPRw8Lt6G3Vmplf4igJygwVQ9w2gWRXp7dOzymqIrciZVysymIqwF68h5io6Uc8f+wpHaKEFVDqCMsDA/ZiddTVjUpcL1GJuIULGiNl2EsxG/mc6YpAn9bpn6u0bfwtVNEr8QBJ+oBf+FjJtjPgtSw7+pF6d8LfQjWPEb87l4E/q1Ql/CyE7SvySWr+pWetvoazHiB9Iav1Ar9ZvSg6s6DW1Ztqqp9aMIZubnVtTb+5BTxyzk2vKze20CSOQN/4z3UQx3qA1irzwsSi9qwIpjvmC0AvF8A/E+EizMLw9RlVIbbuWFlFEiz5A+hY+d6Bbj6p1L3Sdn5tD9nN2WlO5n1tTVLSLYM65CRbMYNHvmMmaoqVdBHNn/A2CuXf8U7S0i2Dujl+z1j9FS7sI5s74GwRz7/iHnWUDdBtqDTrNhk1qUm/uYefZ6Obd2fUHam7dvNtuEQ7SPK3JNupNriavqXOnzuY19d2L2y0CJmMcxM2YjAdLEG4m4cFSC38K39RSash26ky/Idupd/xT+KYe0/lsp+74z2c79Y5/Ct900e82OyoTpN9zxdkX/inbqUv0rjP+huhd7/inbKcusfvu+DVr/VO2Uy2mhpm7zvgbZu76xp9Za8J/0dCvM/6GoV/v+AXGf0AZfu4K0uM/ptJ1bdySZxZg+1EBE0hye/YEgfEfGZ6gbBiQqaOMi7PI16Zd3vUbjXXJxiww0jMozAtxmLnsLAWYBUZ0LsFMYMbHH+WN0lnJZnHaaevy/qBz3mFPw8ba10XR3xt2R5+qfXW8Xa9s/xIYMhrSa8Q25eDnpsUV4xeY8TMo/K4c/Nw0vWL8AiOGQ8I/k9T6ubQBxfgFRgyHhB8wY0jLvUIqsF9cVlOXbMwCI4NDwswqwhTN9a2c+2bAhnol48/eORP+80pRFH7uqwoV49c8MqjLCF8Ufu7b/BTjNyf8bUb4ovBzXy+oGP8UJmw1wheGX7PW3ya8o8+yZd4bBEfxrhnJczNE/Y/kHcnfOuz4c9exb2qS6q3bmW2dzuhvhQ7IBlLKFv1LXhCl2NyObuaWvCBKrbnZgYF6c0teEKXY3Lp5tytQhitaoeNo9c3DOdFrV+jMmXokd+KuQEE+pEyt8wt0utM+u0Cnb/YC1fiQ2Desz+kMv2F9Tt/0p9VZbygrSfTP52j2Tf+y2bYIRYn29r3d5jQAA6IkdV/hVaa37ppPadsNFcnGdtns2XvHxljb7rw0TjG2+WXD7QkbPdFtqEg2NhGD4zx3sUhX/Ku8T37uotLURZvRN92dgQ1mmz07w2Wj5ffehll927nHXCh+9V42zH3v2BaiXr2qsV2Waj5ho/djKu4xRWRwS+4xVYaUFqU/Qf1iUm11XrHv5X7zy/K233lbZZtY555xplqUXJZv/d6xiRIlyrGNKnIjDptqLSki73moPaMjp2d0VPeMCz6AcI+i31+e/3wipaF3hDH5H3COgOEBV9FXp61poy7PcdMiLwzWUeI/hCyp3LpL5q2DpRfe0h3bwPdPeVR18+dMbhXZGeCqE133MzFc0hI/DW7XuBmr9IVNgy94TV/Dh2+oI+GT5x8o48OL94QPCvc4SHL+CkjjAMRmEIKaH7DuFxAv01ext4U/x/NO4zsZ5a2GF+EplNG8yExeEyuHwovkFEo0FiiWhlB4CbwjQ0L884RmLGBsQzcsvMTN28rdaHoWa8G2F+VceA1bcNmOhcvM1o0LyH7MtBbMaHp9mwNTk2bdM5gWGQSFVcAbdiuBqhptt/H8U7io+O2J7/RRk0uLeQ0ZjCisEe3ArLGqtOR1kCnyklWfsYdJh218g0uM4t1YXN6asS5f9+MfPfs8L9onOtkLSbVgB0bdxLoTJuZPFsBUuDj/7lG248PutJ7ylhwA5i+HYif5tE7+f4yWyE9CMWll5O7S+tK9miKvvEAL/kCUDmXiNTNDOf66uVoR+B/ghJ8bkLi94SebMUqA5Ps+kwfePBEmyRH/Aw==
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/input.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/input.gif
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o0.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o1.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o2.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o3.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o4.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o5.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o6.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o7.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o8.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/o9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/o9.png
--------------------------------------------------------------------------------
/next-frame-tutorial/images/stack/output.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iamrakesh28/Deep-Learning-for-Weather-and-Climate-Science/a36b0a09a23b361e0e6ca1e6fdc06eeb62f06160/next-frame-tutorial/images/stack/output.gif
--------------------------------------------------------------------------------
/next-frame-tutorial/next-frame.py:
--------------------------------------------------------------------------------
1 | from keras.models import Sequential
2 | from keras.layers.convolutional import Conv3D
3 | from keras.layers.convolutional_recurrent import ConvLSTM2D
4 | from keras.layers.normalization import BatchNormalization
5 | import numpy as np
6 | import pylab as plt
7 |
8 | # Artificial data generation
9 |
10 | # Generate movies with 3 to 7 moving squares inside.
11 | # The squares are of shape 1x1 or 2x2 pixels, which move linearly over time.
12 | # For convenience we first create movies with bigger width and height (80x80)
13 | # and at the end we select a 40x40 window.
14 |
15 | def generate_movies(n_samples=1200, n_frames=20):
16 | row = 80
17 | col = 80
18 | orginal_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float)
19 | shifted_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float)
20 |
21 | for i in range(n_samples):
22 | # Add 3 to 7 moving squares
23 | n = np.random.randint(3, 8)
24 |
25 | for j in range(n):
26 | # Initial position
27 | xstart = np.random.randint(20, 60)
28 | ystart = np.random.randint(20, 60)
29 | # Direction of motion
30 | directionx = np.random.randint(0, 3) - 1
31 | directiony = np.random.randint(0, 3) - 1
32 |
33 | # Size of the square
34 | w = np.random.randint(2, 4)
35 |
36 | for t in range(n_frames):
37 | x_shift = xstart + directionx * t
38 | y_shift = ystart + directiony * t
39 | orginal_movies[
40 | i, t, x_shift - w : x_shift + w, y_shift - w : y_shift + w, 0
41 | ] += 1
42 |
43 | # Shift the ground truth by 1
44 | x_shift = xstart + directionx * (t + 1)
45 | y_shift = ystart + directiony * (t + 1)
46 | shifted_movies[
47 | i, t, x_shift - w : x_shift + w, y_shift - w : y_shift + w, 0
48 | ] += 1
49 |
50 | # Cut to a 40x40 window
51 | orginal_movies = orginal_movies[::, ::, 20:60, 20:60, ::]
52 | shifted_movies = shifted_movies[::, ::, 20:60, 20:60, ::]
53 | orginal_movies[orginal_movies >= 1] = 1
54 | shifted_movies[shifted_movies >= 1] = 1
55 | return orginal_movies, shifted_movies
56 |
57 |
58 | # Testing the network on one movie
59 | # Feed it with the first 10 positions and then predict the new positions
60 |
61 | def prediction(seq, which, orginal_movies, shifted_movies):
62 | track = orginal_movies[which][:10, ::, ::, ::]
63 |
64 | # track has the shape of 10 frames 40*40 with one channel. np.newaxis
65 | # adds additional axis so the array can be accepted by the seq model
66 |
67 | for j in range(10):
68 | new_pos = seq.predict(track[np.newaxis, ::, ::, ::, ::]) # (1, 10, 40, 40, 1)
69 | new = new_pos[::, -1, ::, ::, ::] # (1, 40, 40, 1)
70 | # adds +1 to the first dimension in each loop cycle
71 | track = np.concatenate((track, new), axis=0)
72 |
73 | # compare the predictions to the ground truth
74 |
75 | track2 = orginal_movies[which][10:, ::, ::, ::]
76 | for i in range(20):
77 | if i < 10:
78 | plt.title("Actucal_" + str(i + 1))
79 | plt.imshow(track[i, :, :, 0])
80 | plt.show()
81 | continue
82 | plt.subplot(121), plt.imshow(track[i, :, :, 0]), plt.title("Predicted_" + str(i + 1))
83 | plt.subplot(122), plt.imshow(track2[i - 10, :, :, 0]), plt.title("Actual_" + str(i + 1))
84 | plt.show()
85 |
86 |
87 | if __name__ == "__main__":
88 |
89 | # Model
90 | # We create a layer which take as input movies of shape
91 | # (n_frames, width, height, channels) and returns a movie of identical shape
92 |
93 | seq = Sequential()
94 | seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
95 | input_shape=(None, 40, 40, 1),
96 | padding='same', return_sequences=True))
97 | seq.add(BatchNormalization())
98 |
99 | seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
100 | padding='same', return_sequences=True))
101 | seq.add(BatchNormalization())
102 |
103 | seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
104 | padding='same', return_sequences=True))
105 | seq.add(BatchNormalization())
106 |
107 | seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
108 | padding='same', return_sequences=True))
109 | seq.add(BatchNormalization())
110 |
111 | seq.add(Conv3D(filters=1, kernel_size=(3, 3, 3),
112 | activation='sigmoid',
113 | padding='same', data_format='channels_last'))
114 | seq.compile(loss='binary_crossentropy', optimizer='adadelta')
115 |
116 | seq.summary()
117 |
118 | orginal_movies, shifted_movies = generate_movies(n_samples=1200)
119 |
120 | # Train the network
121 | seq.fit(orginal_movies[:1000], shifted_movies[:1000],
122 | epochs=50, validation_split=0.1)
123 |
124 | # prediction
125 | prediction(seq, 1004, orginal_movies, shifted_movies)
126 |
127 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | from utility import restore_patch, plot_result
2 |
3 | def test_model(model, X, Y):
4 | #e1 = model.evaluate(X[700:800], Y[700:800], True)
5 | test_loss = model.evaluate(X[800:], Y[800:], False)
6 | print('Test Loss {:.4f}'.format(test_loss))
7 |
8 | y1 = model.predict(X[50], 10)
9 | y2 = model.predict(X[915], 10)
10 | y3 = model.predict(X[936], 10)
11 | y4 = model.predict(X[956], 10)
12 |
13 | plot_result(
14 | restore_patch(X[50].numpy(), (2, 2)),
15 | restore_patch(Y[50].numpy(), (2, 2)),
16 | restore_patch(y1, (2, 2))
17 | )
18 |
19 | plot_result(
20 | restore_patch(X[915].numpy(), (2, 2)),
21 | restore_patch(Y[915].numpy(), (2, 2)),
22 | restore_patch(y2, (2, 2))
23 | )
24 |
25 | plot_result(
26 | restore_patch(X[936].numpy(), (2, 2)),
27 | restore_patch(Y[936].numpy(), (2, 2)),
28 | restore_patch(y3, (2, 2))
29 | )
30 |
31 | plot_result(
32 | restore_patch(X[956].numpy(), (2, 2)),
33 | restore_patch(Y[956].numpy(), (2, 2)),
34 | restore_patch(y4, (2, 2))
35 | )
36 |
--------------------------------------------------------------------------------
/utility.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | def plot_result(input_, actual, predict):
5 |
6 | for i in range(input_.shape[0]):
7 | plt.imshow(input_[i])
8 | plt.title("Actual_" + str(i + 1))
9 | plt.show()
10 |
11 | for i in range(actual.shape[0]):
12 | plt.subplot(121), plt.imshow(actual[i]),
13 | plt.title("Actual_" + str(i + 1 + input_.shape[0]))
14 | plt.subplot(122), plt.imshow(predict[i]),
15 | plt.title("Predicted_" + str(i + 1 + input_.shape[0]))
16 | plt.show()
17 |
18 | def reshape_patch(data, patch_sz):
19 | data_patch = []
20 | for sample in range(data.shape[0]):
21 |
22 | data_patch.append([])
23 |
24 | for frame in range(data.shape[1]):
25 |
26 | data_patch[sample].append([])
27 | rows = data.shape[2] // patch_sz[0]
28 |
29 | for row in range(rows):
30 |
31 | data_patch[sample][frame].append([])
32 | cols = data.shape[3] // patch_sz[1]
33 |
34 | for col in range(cols):
35 |
36 | patch = data[sample][frame][
37 | row * patch_sz[0] : (row + 1) * patch_sz[0],
38 | col * patch_sz[1] : (col + 1) * patch_sz[1]
39 | ]
40 |
41 | # better to use list() compared to patch.tolist() here
42 | data_patch[sample][frame][row].append(list(patch.reshape(patch_sz[0] * patch_sz[1])))
43 |
44 | return np.array(data_patch)
45 |
46 | def restore_patch(data, patch_sz):
47 | data_restore = np.zeros((data.shape[0], data.shape[1] * patch_sz[0], data.shape[2] * patch_sz[1]))
48 |
49 | for frame in range(data.shape[0]):
50 | for row in range(data.shape[1]):
51 | for col in range(data.shape[2]):
52 | patch = data[frame][row][col].reshape(patch_sz)
53 | data_restore[frame][
54 | row * patch_sz[0] : (row + 1) * patch_sz[0],
55 | col * patch_sz[1] : (col + 1) * patch_sz[1]
56 | ] = patch
57 |
58 | return data_restore
59 |
--------------------------------------------------------------------------------