├── Lab_PyTorch_1.py ├── Lab_PyTorch_2.py ├── Lab_PyTorch_3.py ├── dataset.rar ├── dataset_class_gen.py ├── dataset_dogs.zip ├── dataset_gen_reg.zip ├── dataset_seg.zip ├── images_style_transfer.zip ├── neuro_net_10_1.py ├── neuro_net_10_2.py ├── neuro_net_10_3.py ├── neuro_net_12.py ├── neuro_net_15_sgd.py ├── neuro_net_16.py ├── neuro_net_19.py ├── neuro_net_20.py ├── neuro_net_21.py ├── neuro_net_23.py ├── neuro_net_24_Dropout.py ├── neuro_net_24_L2.py ├── neuro_net_25_bm.py ├── neuro_net_29.py ├── neuro_net_29_view.py ├── neuro_net_32_style_transfer.py ├── neuro_net_36_transfer_lr.py ├── neuro_net_38_unet.py ├── neuro_net_41_rnn.py ├── neuro_net_43_rnn_words.py ├── neuro_net_45_rnn_bidirect.py ├── neuro_net_46_lstm_bidirect.py ├── neuro_net_47_gru_words.py ├── neuro_net_48_autoencoder.py ├── neuro_net_48_homotop.py ├── neuro_net_49_autoencoder_1.py ├── neuro_net_49_autoencoder_2.py ├── neuro_net_50_vae.py ├── neuro_net_52_gan.py ├── neuro_net_9.py ├── neuro_net_9gpu.py ├── solves ├── 1.10.4 ├── 1.10.5 ├── 1.10.6 ├── 1.10.7 ├── 1.11.1 ├── 1.11.2 ├── 1.11.3 ├── 1.11.4 ├── 1.11.5 ├── 1.11.6 ├── 1.2.6 ├── 1.2.7 ├── 1.2.8 ├── 1.2.9 ├── 1.4.10 ├── 1.4.3 ├── 1.4.4 ├── 1.4.6 ├── 1.4.9 ├── 1.5.1 ├── 1.5.10 ├── 1.5.13 ├── 1.5.2 ├── 1.5.3 ├── 1.5.5 ├── 1.5.8 ├── 1.6.1 ├── 1.6.10 ├── 1.6.11 ├── 1.6.2 ├── 1.6.4 ├── 1.6.5 ├── 1.6.6 ├── 1.6.7 ├── 1.6.8 ├── 1.6.9 ├── 1.7.1 ├── 1.7.10 ├── 1.7.2 ├── 1.7.3 ├── 1.7.4 ├── 1.7.8 ├── 1.7.9 ├── 1.8.1 ├── 1.8.10 ├── 1.8.2 ├── 1.8.4 ├── 1.8.5 ├── 1.8.7 ├── 1.8.8 ├── 1.8.9 ├── 1.9.5 ├── 1.9.6 ├── 1.9.7 ├── 15.11 ├── 2.1.5 ├── 2.1.6.4 ├── 2.14.3 ├── 2.14.6 ├── 2.14.7 ├── 2.15.4 ├── 2.15.6 ├── 2.16.1 ├── 2.16.10 ├── 2.16.2 ├── 2.16.3 ├── 2.16.4 ├── 2.16.5 ├── 2.16.6 ├── 2.16.8 ├── 3.1.10 ├── 3.1.3 ├── 3.1.5 ├── 3.1.6 ├── 3.1.9 ├── 3.10.3 ├── 3.10.4 ├── 3.10.5 ├── 3.10.6 ├── 3.11.5 ├── 3.11.6 ├── 3.11.8 ├── 3.12.1 ├── 3.12.2 ├── 3.2.4 ├── 3.2.5 ├── 3.2.7 ├── 3.2.8 ├── 3.2.9 ├── 3.3.2 ├── 3.3.3 ├── 3.3.4 ├── 3.3.5 ├── 3.4.5 ├── 3.4.7 ├── 3.7.6 ├── 3.7.7 ├── 3.7.8 ├── 3.8.2 ├── 3.8.3 ├── 3.8.4 ├── 3.9.1 ├── 3.9.2 ├── 3.9.3 ├── 3.9.4 ├── 4.1.7 ├── 4.1.8 ├── 4.2.4 ├── 4.2.5 ├── 4.2.8 ├── 4.3.1 ├── 4.3.2 ├── 4.3.3 ├── 4.3.4 ├── 4.3.5 ├── 4.4.2 ├── 4.4.5 ├── 4.5.3 ├── 4.5.4 ├── 4.5.5 ├── 4.5.6 ├── 4.6.10 ├── 4.6.7 ├── 4.6.8 ├── 4.6.9 ├── 4.7.2 ├── 4.7.3 ├── 4.7.4 ├── 4.7.5 ├── 4.7.6 ├── 4.7.7 ├── 4.8.6 ├── 4.8.7 ├── 4.8.8 ├── 4.9.7 ├── 5.1.2 ├── 5.1.5 ├── 5.1.6 ├── 5.3.1 ├── 5.3.2 ├── 5.3.3 └── 5.3.4 ├── tests ├── 1.11.3 ├── 1.11.4 ├── 1.11.5 ├── 1.11.6 ├── 1.2.6 ├── 1.2.7 ├── 1.2.8 ├── 1.2.9 ├── 1.4.10 ├── 1.4.9 ├── 1.6.10 ├── 1.6.11 ├── 1.6.2 ├── 1.6.4 ├── 1.6.5 ├── 1.6.7 ├── 1.6.8 ├── 1.6.9 ├── 1.7.10 ├── 1.7.2 ├── 1.7.3 ├── 1.7.4 ├── 1.7.8 ├── 1.7.9 ├── 1.8.10 ├── 1.8.2 ├── 1.8.4 ├── 1.8.7 ├── 1.8.8 ├── 1.8.9 ├── 1.9.5 ├── 1.9.7 ├── 2.4.4 ├── 2.4.5 ├── 2.4.6 ├── 2.4.7 ├── 2.6.3 └── 3.5.7 ├── text_2 ├── train_data_false └── train_data_true /Lab_PyTorch_1.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torch.optim as optim 5 | from random import randint 6 | import matplotlib.pyplot as plt 7 | 8 | class NetGirl(nn.Module): 9 | def __init__(self, input_dim, num_hidden, output_dim): 10 | super().__init__() 11 | self.layer1 = nn.Linear(input_dim, num_hidden) 12 | self.layer2 = nn.Linear(num_hidden, output_dim) 13 | 14 | def forward(self, x): 15 | x = self.layer1(x) 16 | x = F.tanh(x) 17 | x = self.layer2(x) 18 | x=F.linear(x,x) 19 | 20 | return x 21 | 22 | model = NetGirl(2, 3, 1) 23 | 24 | print(model) 25 | 26 | gen_p = model.parameters() # возвращает генератор с набором параметров 27 | print(list(model.parameters())) # отображение списка параметров 28 | 29 | x_train = torch.FloatTensor([(1, 1), (1,2), (1, 3), (1, 5), 30 | (1,7), (1, 9), (2, 3), (2, 4),(2,5)]) 31 | y_train = torch.FloatTensor([2, 3, 4, 6, 8, 10, 5, 6,7]) 32 | total = len(y_train) 33 | 34 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01) 35 | loss_func = torch.nn.MSELoss() 36 | model.train() 37 | 38 | num_epochs = 1000 39 | losses = [] 40 | 41 | for epoch in range(num_epochs): 42 | k = randint(0, total - 1) 43 | y = model(x_train[k]) 44 | y=y.squeeze() 45 | loss = loss_func(y, y_train[k]) 46 | 47 | optimizer.zero_grad() 48 | loss.backward() 49 | optimizer.step() 50 | 51 | if (epoch + 1) % 10 == 0: 52 | print(f'Эпоха [{epoch + 1}/{num_epochs}], Потери: {loss.item():.4f}') 53 | 54 | losses.append(loss.item()) 55 | 56 | model.eval() 57 | 58 | for x, d in zip(x_train, y_train): 59 | y = model(x) 60 | print(f"Выходное значение НС: {y.data} => {d}") 61 | 62 | # Построение функции потерь 63 | 64 | plt.plot(losses) 65 | plt.xlabel('Epoch') 66 | plt.ylabel('Loss') 67 | plt.title('Training Loss') 68 | plt.show() -------------------------------------------------------------------------------- /Lab_PyTorch_2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | import torchvision 5 | from PIL import Image 6 | import matplotlib.pyplot as plt 7 | 8 | import torch 9 | import torch.utils.data as data 10 | import torchvision.transforms.v2 as tfs 11 | import torch.nn as nn 12 | import torch.optim as optim 13 | from tqdm import tqdm 14 | 15 | 16 | 17 | 18 | class DigitDataset(data.Dataset): 19 | def __init__(self, path, train=True, transform=None): 20 | self.path = os.path.join(path, "train" if train else "test") 21 | self.transform = transform 22 | 23 | with open(os.path.join(path, "format.json"), "r") as fp: 24 | self.format = json.load(fp) 25 | 26 | self.length = 0 27 | self.files = [] 28 | self.targets = torch.eye(10) 29 | 30 | for _dir, _target in self.format.items(): 31 | path = os.path.join(self.path, _dir) 32 | list_files = os.listdir(path) 33 | self.length += len(list_files) 34 | self.files.extend(map(lambda _x: (os.path.join(path, _x), _target), list_files)) 35 | 36 | def __getitem__(self, item): 37 | path_file, target = self.files[item] 38 | t = self.targets[target] 39 | img = Image.open(path_file) 40 | 41 | if self.transform: 42 | img = self.transform(img).ravel().float() / 255.0 43 | 44 | return img, t 45 | 46 | def __len__(self): 47 | return self.length 48 | 49 | 50 | class DigitNN(nn.Module): 51 | def __init__(self, input_dim, num_hidden, output_dim): 52 | super().__init__() 53 | self.layer1 = nn.Linear(input_dim, num_hidden) #bias =False 54 | self.layer2 = nn.Linear(num_hidden, output_dim)#bias =False 55 | 56 | def forward(self, x): 57 | x = self.layer1(x) 58 | x = nn.functional.relu(x) 59 | x = self.layer2(x) 60 | return x 61 | 62 | 63 | model = DigitNN(28 * 28, 32, 10) 64 | 65 | to_tensor = tfs.ToImage() # PILToTensor 66 | d_train = DigitDataset("dataset", transform=to_tensor) 67 | #train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 68 | 69 | d_train, d_val = data.random_split(d_train, [0.7, 0.3]) 70 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 71 | train_data_val = data.DataLoader(d_val, batch_size=32, shuffle=False) 72 | 73 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) 74 | loss_function = nn.CrossEntropyLoss() 75 | epochs = 2 76 | model.train() 77 | losses = [] 78 | 79 | for _e in range(epochs): 80 | loss_mean = 0 81 | lm_count = 0 82 | 83 | train_tqdm = tqdm(train_data, leave=True) 84 | for x_train, y_train in train_tqdm: 85 | predict = model(x_train) 86 | loss = loss_function(predict, y_train) 87 | 88 | optimizer.zero_grad() 89 | loss.backward() 90 | optimizer.step() 91 | 92 | losses.append(loss.item()) 93 | 94 | lm_count += 1 95 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 96 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 97 | 98 | d_test = DigitDataset("dataset", train=False, transform=to_tensor) 99 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 100 | 101 | Q = 0 102 | 103 | # тестирование обученной НС 104 | model.eval() 105 | 106 | for x_test, y_test in test_data: 107 | with torch.no_grad(): 108 | p = model(x_test) 109 | p = torch.argmax(p, dim=1) 110 | y = torch.argmax(y_test, dim=1) 111 | Q += torch.sum(p == y).item() 112 | 113 | Q /= len(d_test) 114 | print(Q) 115 | 116 | gen_p = model.parameters() # возвращает генератор с набором параметров 117 | print(list(model.parameters())) # отображение списка параметров 118 | 119 | print("Вывод параметров сгенерированной модели с использованием словаря") 120 | params=dict(model.named_parameters()) 121 | print(params) 122 | for key, value in params.items(): 123 | print(key, value) 124 | #Выводим конфигурацию модели 125 | print(model) 126 | 127 | plt.plot(losses) 128 | plt.xlabel('Epoch') 129 | plt.ylabel('Loss') 130 | plt.title('Training Loss') 131 | plt.show() 132 | 133 | -------------------------------------------------------------------------------- /dataset.rar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/selfedu-rus/neuro-pytorch/9bba8ea72b5986b5312106245ba22cadec0e95a5/dataset.rar -------------------------------------------------------------------------------- /dataset_class_gen.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import torch 4 | import torchvision 5 | import torchvision.transforms.v2 as tfs 6 | 7 | transform = tfs.ToPILImage() 8 | 9 | mnist_train = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True) 10 | mnist_test = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=False) 11 | 12 | dir_out = 'dataset' 13 | file_format = 'format.json' 14 | train_data = {'dir': "train", 'data': mnist_train} 15 | test_data = {'dir': "test", 'data': mnist_test} 16 | 17 | if not os.path.exists(dir_out): 18 | os.mkdir(dir_out) 19 | 20 | for info in (train_data, test_data): 21 | os.mkdir(os.path.join(dir_out, info['dir'])) 22 | 23 | for i in range(10): 24 | os.mkdir(os.path.join(dir_out, info['dir'], f"class_{i}")) 25 | 26 | 27 | for info in (train_data, test_data): 28 | for i in range(10): 29 | path = os.path.join(dir_out, info['dir'], f"class_{i}") 30 | cls = info['data'].data[info['data'].targets == i] 31 | 32 | for n, x in enumerate(cls): 33 | x = transform(x) 34 | x.save(os.path.join(path, f"img_{n}.png"), "png") 35 | 36 | targets = dict() 37 | for i in range(10): 38 | targets[f'class_{i}'] = i 39 | 40 | fp = open(os.path.join(dir_out, file_format), "w") 41 | json.dump(targets, fp) 42 | fp.close() 43 | -------------------------------------------------------------------------------- /dataset_dogs.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/selfedu-rus/neuro-pytorch/9bba8ea72b5986b5312106245ba22cadec0e95a5/dataset_dogs.zip -------------------------------------------------------------------------------- /dataset_gen_reg.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/selfedu-rus/neuro-pytorch/9bba8ea72b5986b5312106245ba22cadec0e95a5/dataset_gen_reg.zip -------------------------------------------------------------------------------- /dataset_seg.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/selfedu-rus/neuro-pytorch/9bba8ea72b5986b5312106245ba22cadec0e95a5/dataset_seg.zip -------------------------------------------------------------------------------- /images_style_transfer.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/selfedu-rus/neuro-pytorch/9bba8ea72b5986b5312106245ba22cadec0e95a5/images_style_transfer.zip -------------------------------------------------------------------------------- /neuro_net_10_1.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib.pyplot as plt 3 | 4 | N = 5 5 | 6 | x1 = torch.rand(N) 7 | x2 = x1 + torch.randint(1, 10, [N]) / 10 8 | C1 = torch.vstack([x1, x2]).mT 9 | 10 | x1 = torch.rand(N) 11 | x2 = x1 - torch.randint(1, 10, [N]) / 10 12 | C2 = torch.vstack([x1, x2]).mT 13 | 14 | f = [0, 1] 15 | 16 | w = torch.FloatTensor([-0.3, 0.3]) 17 | for i in range(N): 18 | x = C1[:][i] 19 | y = torch.dot(w, x) 20 | if y >= 0: 21 | print("Класс C1") 22 | else: 23 | print("Класс C2") 24 | 25 | plt.scatter(C1[:, 0], C1[:, 1], s=10, c='red') 26 | plt.scatter(C2[:, 0], C2[:, 1], s=10, c='blue') 27 | plt.plot(f) 28 | plt.grid() 29 | plt.show() 30 | -------------------------------------------------------------------------------- /neuro_net_10_2.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib.pyplot as plt 3 | 4 | N = 5 5 | b = 3 6 | 7 | x1 = torch.rand(N) 8 | x2 = x1 + torch.randint(1, 10, [N]) / 10 + b 9 | C1 = torch.vstack([x1, x2, torch.ones(N)]).mT 10 | 11 | x1 = torch.rand(N) 12 | x2 = x1 - torch.randint(1, 10, [N]) / 10 + b 13 | C2 = torch.vstack([x1, x2, torch.ones(N)]).mT 14 | 15 | f = [0+b, 1+b] 16 | w1 = -0.5 17 | w2 = -w1 18 | w3 = -b * w2 19 | w = torch.FloatTensor([w1, w2, w3]) 20 | 21 | for i in range(N): 22 | x = C2[:][i] 23 | y = torch.dot(w, x) 24 | if y >= 0: 25 | print("Класс C1") 26 | else: 27 | print("Класс C2") 28 | 29 | plt.scatter(C1[:, 0], C1[:, 1], s=10, c='red') 30 | plt.scatter(C2[:, 0], C2[:, 1], s=10, c='blue') 31 | plt.plot(f) 32 | plt.grid() 33 | plt.show() 34 | -------------------------------------------------------------------------------- /neuro_net_10_3.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def act(x): 5 | return 0 if x <= 0 else 1 6 | 7 | 8 | w_hidden = torch.FloatTensor([[1, 1, -1.5], [1, 1, -0.5]]) 9 | w_out = torch.FloatTensor([-1, 1, -0.5]) 10 | 11 | # C1 = [(1,0), (0,1)] 12 | # C2 = [(0,0), (1,1)] 13 | data_x = [0, 0] # входные данные x1, x2 14 | x = torch.FloatTensor(data_x + [1]) 15 | 16 | z_hidden = torch.matmul(w_hidden, x) 17 | print(z_hidden) 18 | u_hidden = torch.FloatTensor([act(x) for x in z_hidden] + [1]) 19 | print(u_hidden) 20 | 21 | z_out = torch.dot(w_out, u_hidden) 22 | y = act(z_out) 23 | print(y) 24 | -------------------------------------------------------------------------------- /neuro_net_12.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from random import randint 3 | 4 | 5 | def act(z): 6 | return torch.tanh(z) 7 | 8 | 9 | def df(z): 10 | s = act(z) 11 | return 1 - s * s 12 | 13 | 14 | def go_forward(x_inp, w1, w2): 15 | z1 = torch.mv(w1[:, :3], x_inp) + w1[:, 3] 16 | s = act(z1) 17 | 18 | z2 = torch.dot(w2[:2], s) + w2[2] 19 | y = act(z2) 20 | return y, z1, z2 21 | 22 | 23 | torch.manual_seed(1) 24 | 25 | W1 = torch.rand(8).view(2, 4) - 0.5 26 | W2 = torch.rand(3) - 0.5 27 | 28 | # обучающая выборка (она же полная выборка) 29 | x_train = torch.FloatTensor([(-1, -1, -1), (-1, -1, 1), (-1, 1, -1), (-1, 1, 1), 30 | (1, -1, -1), (1, -1, 1), (1, 1, -1), (1, 1, 1)]) 31 | y_train = torch.FloatTensor([-1, 1, -1, 1, -1, 1, -1, -1]) 32 | 33 | lmd = 0.05 # шаг обучения 34 | N = 1000 # число итераций при обучении 35 | total = len(y_train) # размер обучающей выборки 36 | 37 | for _ in range(N): 38 | k = randint(0, total-1) 39 | x = x_train[k] # случайный выбор образа из обучающей выборки 40 | y, z1, out = go_forward(x, W1, W2) # прямой проход по НС и вычисление выходных значений нейронов 41 | e = y - y_train[k] # производная квадратической функции потерь 42 | delta = e * df(out) # вычисление локального градиента 43 | delta2 = W2[:2] * delta * df(z1) # вектор из 2-х локальных градиентов скрытого слоя 44 | 45 | W2[:2] = W2[:2] - lmd * delta * z1 # корректировка весов связей последнего слоя 46 | W2[2] = W2[2] - lmd * delta # корректировка bias 47 | 48 | # корректировка связей первого слоя 49 | W1[0, :3] = W1[0, :3] - lmd * delta2[0] * x 50 | W1[1, :3] = W1[1, :3] - lmd * delta2[1] * x 51 | 52 | # корректировка bias 53 | W1[0, 3] = W1[0, 3] - lmd * delta2[0] 54 | W1[1, 3] = W1[1, 3] - lmd * delta2[1] 55 | 56 | # тестирование обученной НС 57 | for x, d in zip(x_train, y_train): 58 | y, z1, out = go_forward(x, W1, W2) 59 | print(f"Выходное значение НС: {y} => {d}") 60 | 61 | # результирующие весовые коэффициенты 62 | print(W1) 63 | print(W2) 64 | -------------------------------------------------------------------------------- /neuro_net_15_sgd.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.optim as optim 3 | 4 | from random import randint 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | def model(X, w): 9 | return X @ w 10 | 11 | 12 | N = 2 13 | w = torch.FloatTensor(N).uniform_(-1e-5, 1e-5) 14 | w.requires_grad_(True) 15 | x = torch.arange(0, 3, 0.1) 16 | 17 | y_train = 0.5 * x + 0.2 * torch.sin(2*x) - 3.0 18 | x_train = torch.tensor([[_x ** _n for _n in range(N)] for _x in x]) 19 | 20 | total = len(x) 21 | lr = torch.tensor([0.1, 0.01]) 22 | loss_func = torch.nn.L1Loss() 23 | optimizer = optim.Adam(params=[w], lr=0.01) 24 | 25 | for _ in range(1000): 26 | k = randint(0, total-1) 27 | y = model(x_train[k], w) 28 | loss = loss_func(y, y_train[k]) 29 | 30 | loss.backward() 31 | # w.data = w.data - lr * w.grad 32 | # w.grad.zero_() 33 | optimizer.step() 34 | optimizer.zero_grad() 35 | 36 | print(w) 37 | predict = model(x_train, w) 38 | 39 | plt.plot(x, y_train.numpy()) 40 | plt.plot(x, predict.data.numpy()) 41 | plt.grid() 42 | plt.show() 43 | -------------------------------------------------------------------------------- /neuro_net_16.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | import torch.nn.functional as F 5 | 6 | from random import randint 7 | import matplotlib.pyplot as plt 8 | 9 | 10 | class NetGirl(nn.Module): 11 | def __init__(self, input_dim, num_hidden, output_dim): 12 | super().__init__() 13 | self.layer1 = nn.Linear(input_dim, num_hidden) 14 | self.layer2 = nn.Linear(num_hidden, output_dim) 15 | 16 | def forward(self, x): 17 | x = self.layer1(x) 18 | x = F.tanh(x) 19 | x = self.layer2(x) 20 | x = F.tanh(x) 21 | return x 22 | 23 | 24 | model = NetGirl(3, 2, 1) 25 | # print(model) 26 | # print(list(model.parameters())) 27 | 28 | # обучающая выборка (она же полная выборка) 29 | x_train = torch.FloatTensor([(-1, -1, -1), (-1, -1, 1), (-1, 1, -1), (-1, 1, 1), 30 | (1, -1, -1), (1, -1, 1), (1, 1, -1), (1, 1, 1)]) 31 | y_train = torch.FloatTensor([-1, 1, -1, 1, -1, 1, -1, -1]) 32 | total = len(y_train) 33 | 34 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01) 35 | loss_func = torch.nn.MSELoss() 36 | 37 | model.train() 38 | 39 | for _ in range(1000): 40 | k = randint(0, total-1) 41 | y = model(x_train[k]) 42 | loss = loss_func(y, y_train[k]) 43 | 44 | optimizer.zero_grad() 45 | loss.backward() 46 | optimizer.step() 47 | 48 | model.eval() 49 | 50 | # тестирование обученной НС 51 | for x, d in zip(x_train, y_train): 52 | y = model(x) 53 | print(f"Выходное значение НС: {y.data} => {d}") 54 | -------------------------------------------------------------------------------- /neuro_net_19.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | from PIL import Image 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.utils.data as data 9 | import torchvision 10 | import torchvision.transforms.v2 as tfs 11 | 12 | 13 | class DigitDataset(data.Dataset): 14 | def __init__(self, path, train=True, transform=None): 15 | self.path = os.path.join(path, "train" if train else "test") 16 | self.transform = transform 17 | 18 | with open(os.path.join(self.path, "format.json"), "r") as fp: 19 | self.format = json.load(fp) 20 | 21 | self.length = 0 22 | self.files = [] 23 | self.targets = torch.eye(10) 24 | 25 | for _dir, _target in self.format.items(): 26 | path = os.path.join(self.path, _dir) 27 | list_files = os.listdir(path) 28 | self.length += len(list_files) 29 | self.files.extend(map(lambda _x: (os.path.join(path, _x), _target), list_files)) 30 | 31 | def __getitem__(self, item): 32 | path_file, target = self.files[item] 33 | t = self.targets[target] 34 | img = Image.open(path_file) 35 | 36 | if self.transform: 37 | img = self.transform(img).ravel().float() / 255.0 38 | 39 | return img, t 40 | 41 | def __len__(self): 42 | return self.length 43 | 44 | 45 | to_tensor = tfs.ToImage() # PILToTensor 46 | d_train = DigitDataset("dataset", transform=to_tensor) 47 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 48 | 49 | it = iter(train_data) 50 | x, y = next(it) 51 | print(len(d_train)) 52 | -------------------------------------------------------------------------------- /neuro_net_20.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from PIL import Image 4 | 5 | import torch 6 | import torch.utils.data as data 7 | import torchvision.transforms.v2 as tfs 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | from tqdm import tqdm 11 | 12 | 13 | class DigitDataset(data.Dataset): 14 | def __init__(self, path, train=True, transform=None): 15 | self.path = os.path.join(self.path, "train" if train else "test") 16 | self.transform = transform 17 | 18 | with open(os.path.join(path, "format.json"), "r") as fp: 19 | self.format = json.load(fp) 20 | 21 | self.length = 0 22 | self.files = [] 23 | self.targets = torch.eye(10) 24 | 25 | for _dir, _target in self.format.items(): 26 | path = os.path.join(self.path, _dir) 27 | list_files = os.listdir(path) 28 | self.length += len(list_files) 29 | self.files.extend(map(lambda _x: (os.path.join(path, _x), _target), list_files)) 30 | 31 | def __getitem__(self, item): 32 | path_file, target = self.files[item] 33 | t = self.targets[target] 34 | img = Image.open(path_file) 35 | 36 | if self.transform: 37 | img = self.transform(img).ravel().float() / 255.0 38 | 39 | return img, t 40 | 41 | def __len__(self): 42 | return self.length 43 | 44 | 45 | class DigitNN(nn.Module): 46 | def __init__(self, input_dim, num_hidden, output_dim): 47 | super().__init__() 48 | self.layer1 = nn.Linear(input_dim, num_hidden) 49 | self.layer2 = nn.Linear(num_hidden, output_dim) 50 | 51 | def forward(self, x): 52 | x = self.layer1(x) 53 | x = nn.functional.relu(x) 54 | x = self.layer2(x) 55 | return x 56 | 57 | 58 | model = DigitNN(28 * 28, 32, 10) 59 | 60 | to_tensor = tfs.ToImage() # PILToTensor 61 | d_train = DigitDataset("dataset", transform=to_tensor) 62 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 63 | 64 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) 65 | loss_function = nn.CrossEntropyLoss() 66 | epochs = 2 67 | model.train() 68 | 69 | for _e in range(epochs): 70 | loss_mean = 0 71 | lm_count = 0 72 | 73 | train_tqdm = tqdm(train_data, leave=True) 74 | for x_train, y_train in train_tqdm: 75 | predict = model(x_train) 76 | loss = loss_function(predict, y_train) 77 | 78 | optimizer.zero_grad() 79 | loss.backward() 80 | optimizer.step() 81 | 82 | lm_count += 1 83 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 84 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 85 | 86 | d_test = DigitDataset("dataset", train=False, transform=to_tensor) 87 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 88 | 89 | Q = 0 90 | 91 | # тестирование обученной НС 92 | model.eval() 93 | 94 | for x_test, y_test in test_data: 95 | with torch.no_grad(): 96 | p = model(x_test) 97 | p = torch.argmax(p, dim=1) 98 | y = torch.argmax(y_test, dim=1) 99 | Q += torch.sum(p == y).item() 100 | 101 | Q /= len(d_test) 102 | print(Q) 103 | -------------------------------------------------------------------------------- /neuro_net_21.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from PIL import Image 4 | 5 | import torch 6 | import torch.utils.data as data 7 | import torchvision.transforms.v2 as tfs 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | from tqdm import tqdm 11 | from torchvision.datasets import ImageFolder 12 | 13 | 14 | class RavelTransform(nn.Module): 15 | def forward(self, item): 16 | return item.ravel() 17 | 18 | 19 | class DigitNN(nn.Module): 20 | def __init__(self, input_dim, num_hidden, output_dim): 21 | super().__init__() 22 | self.layer1 = nn.Linear(input_dim, num_hidden) 23 | self.layer2 = nn.Linear(num_hidden, output_dim) 24 | 25 | def forward(self, x): 26 | x = self.layer1(x) 27 | x = nn.functional.relu(x) 28 | x = self.layer2(x) 29 | return x 30 | 31 | 32 | model = DigitNN(28 * 28, 32, 10) 33 | 34 | transforms = tfs.Compose([tfs.ToImage(), tfs.Grayscale(), 35 | tfs.ToDtype(torch.float32, scale=True), 36 | RavelTransform(), 37 | ]) 38 | d_train = ImageFolder("dataset/train", transform=transforms) 39 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 40 | 41 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) 42 | loss_function = nn.CrossEntropyLoss() 43 | epochs = 2 44 | model.train() 45 | 46 | for _e in range(epochs): 47 | loss_mean = 0 48 | lm_count = 0 49 | 50 | train_tqdm = tqdm(train_data, leave=True) 51 | for x_train, y_train in train_tqdm: 52 | predict = model(x_train) 53 | loss = loss_function(predict, y_train) 54 | 55 | optimizer.zero_grad() 56 | loss.backward() 57 | optimizer.step() 58 | 59 | lm_count += 1 60 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 61 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 62 | 63 | d_test = ImageFolder("dataset/test", transform=transforms) 64 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 65 | 66 | Q = 0 67 | 68 | # тестирование обученной НС 69 | model.eval() 70 | 71 | for x_test, y_test in test_data: 72 | with torch.no_grad(): 73 | p = model(x_test) 74 | p = torch.argmax(p, dim=1) 75 | Q += torch.sum(p == y_test).item() 76 | 77 | Q /= len(d_test) 78 | print(Q) 79 | -------------------------------------------------------------------------------- /neuro_net_23.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | import torch 4 | import torch.utils.data as data 5 | import torchvision.transforms.v2 as tfs 6 | import torchvision 7 | import torch.nn as nn 8 | import torch.optim as optim 9 | from tqdm import tqdm 10 | from torchvision.datasets import ImageFolder 11 | 12 | 13 | class RavelTransform(nn.Module): 14 | def forward(self, item): 15 | return item.ravel() 16 | 17 | 18 | class DigitNN(nn.Module): 19 | def __init__(self, input_dim, num_hidden, output_dim): 20 | super().__init__() 21 | self.layer1 = nn.Linear(input_dim, num_hidden) 22 | self.layer2 = nn.Linear(num_hidden, output_dim) 23 | 24 | def forward(self, x): 25 | x = self.layer1(x) 26 | x = nn.functional.relu(x) 27 | x = self.layer2(x) 28 | return x 29 | 30 | 31 | model = DigitNN(28 * 28, 32, 10) 32 | 33 | transforms = tfs.Compose([tfs.ToImage(), tfs.Grayscale(), 34 | tfs.ToDtype(torch.float32, scale=True), 35 | RavelTransform(), 36 | ]) 37 | 38 | dataset_mnist = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 39 | d_train, d_val = data.random_split(dataset_mnist, [0.7, 0.3]) 40 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 41 | train_data_val = data.DataLoader(d_val, batch_size=32, shuffle=False) 42 | 43 | 44 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) 45 | loss_function = nn.CrossEntropyLoss() 46 | epochs = 20 47 | 48 | loss_lst_val = [] # список значений потерь при валидации 49 | loss_lst = [] # список значений потерь при обучении 50 | 51 | for _e in range(epochs): 52 | model.train() 53 | loss_mean = 0 54 | lm_count = 0 55 | 56 | train_tqdm = tqdm(train_data, leave=False) 57 | for x_train, y_train in train_tqdm: 58 | predict = model(x_train) 59 | loss = loss_function(predict, y_train) 60 | 61 | optimizer.zero_grad() 62 | loss.backward() 63 | optimizer.step() 64 | 65 | lm_count += 1 66 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 67 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 68 | 69 | # валидация модели 70 | model.eval() 71 | Q_val = 0 72 | count_val = 0 73 | 74 | for x_val, y_val in train_data_val: 75 | with torch.no_grad(): 76 | p = model(x_val) 77 | loss = loss_function(p, y_val) 78 | Q_val += loss.item() 79 | count_val += 1 80 | 81 | Q_val /= count_val 82 | 83 | loss_lst.append(loss_mean) 84 | loss_lst_val.append(Q_val) 85 | 86 | print(f" | loss_mean={loss_mean:.3f}, Q_val={Q_val:.3f}") 87 | 88 | d_test = ImageFolder("dataset/test", transform=transforms) 89 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 90 | 91 | Q = 0 92 | 93 | # тестирование обученной НС 94 | model.eval() 95 | 96 | for x_test, y_test in test_data: 97 | with torch.no_grad(): 98 | p = model(x_test) 99 | p = torch.argmax(p, dim=1) 100 | Q += torch.sum(p == y_test).item() 101 | 102 | Q /= len(d_test) 103 | print(Q) 104 | 105 | # вывод графиков 106 | plt.plot(loss_lst) 107 | plt.plot(loss_lst_val) 108 | plt.grid() 109 | plt.show() 110 | -------------------------------------------------------------------------------- /neuro_net_24_Dropout.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | import torch 4 | import torch.utils.data as data 5 | import torchvision.transforms.v2 as tfs 6 | import torchvision 7 | import torch.nn as nn 8 | import torch.optim as optim 9 | from tqdm import tqdm 10 | from torchvision.datasets import ImageFolder 11 | 12 | 13 | class RavelTransform(nn.Module): 14 | def forward(self, item): 15 | return item.ravel() 16 | 17 | 18 | class DigitNN(nn.Module): 19 | def __init__(self, input_dim, num_hidden, output_dim): 20 | super().__init__() 21 | self.layer1 = nn.Linear(input_dim, num_hidden) 22 | self.layer2 = nn.Linear(num_hidden, output_dim) 23 | self.dropout_1 = nn.Dropout1d(0.3) 24 | 25 | def forward(self, x): 26 | x = self.layer1(x) 27 | x = nn.functional.relu(x) 28 | x = self.dropout_1(x) 29 | x = self.layer2(x) 30 | return x 31 | 32 | 33 | model = DigitNN(28 * 28, 128, 10) 34 | 35 | transforms = tfs.Compose([tfs.ToImage(), tfs.Grayscale(), 36 | tfs.ToDtype(torch.float32, scale=True), 37 | RavelTransform(), 38 | ]) 39 | 40 | dataset_mnist = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 41 | d_train, d_val = data.random_split(dataset_mnist, [0.7, 0.3]) 42 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 43 | train_data_val = data.DataLoader(d_val, batch_size=32, shuffle=False) 44 | 45 | 46 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) # , weight_decay=0.001) 47 | loss_function = nn.CrossEntropyLoss() 48 | epochs = 20 49 | 50 | loss_lst_val = [] # список значений потерь при валидации 51 | loss_lst = [] # список значений потерь при обучении 52 | 53 | for _e in range(epochs): 54 | model.train() 55 | loss_mean = 0 56 | lm_count = 0 57 | 58 | train_tqdm = tqdm(train_data, leave=False) 59 | for x_train, y_train in train_tqdm: 60 | predict = model(x_train) 61 | loss = loss_function(predict, y_train) 62 | 63 | optimizer.zero_grad() 64 | loss.backward() 65 | optimizer.step() 66 | 67 | lm_count += 1 68 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 69 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 70 | 71 | # валидация модели 72 | model.eval() 73 | Q_val = 0 74 | count_val = 0 75 | 76 | for x_val, y_val in train_data_val: 77 | with torch.no_grad(): 78 | p = model(x_val) 79 | loss = loss_function(p, y_val) 80 | Q_val += loss.item() 81 | count_val += 1 82 | 83 | Q_val /= count_val 84 | 85 | loss_lst.append(loss_mean) 86 | loss_lst_val.append(Q_val) 87 | 88 | print(f" | loss_mean={loss_mean:.3f}, Q_val={Q_val:.3f}") 89 | 90 | d_test = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=False, transform=transforms) 91 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 92 | 93 | Q = 0 94 | 95 | # тестирование обученной НС 96 | model.eval() 97 | 98 | for x_test, y_test in test_data: 99 | with torch.no_grad(): 100 | p = model(x_test) 101 | p = torch.argmax(p, dim=1) 102 | Q += torch.sum(p == y_test).item() 103 | 104 | Q /= len(d_test) 105 | print(Q) 106 | 107 | # вывод графиков 108 | plt.plot(loss_lst) 109 | plt.plot(loss_lst_val) 110 | plt.grid() 111 | plt.show() 112 | -------------------------------------------------------------------------------- /neuro_net_24_L2.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | import torch 4 | import torch.utils.data as data 5 | import torchvision.transforms.v2 as tfs 6 | import torchvision 7 | import torch.nn as nn 8 | import torch.optim as optim 9 | from tqdm import tqdm 10 | from torchvision.datasets import ImageFolder 11 | 12 | 13 | class RavelTransform(nn.Module): 14 | def forward(self, item): 15 | return item.ravel() 16 | 17 | 18 | class DigitNN(nn.Module): 19 | def __init__(self, input_dim, num_hidden, output_dim): 20 | super().__init__() 21 | self.layer1 = nn.Linear(input_dim, num_hidden) 22 | self.layer2 = nn.Linear(num_hidden, output_dim) 23 | 24 | def forward(self, x): 25 | x = self.layer1(x) 26 | x = nn.functional.relu(x) 27 | x = self.layer2(x) 28 | return x 29 | 30 | 31 | model = DigitNN(28 * 28, 128, 10) 32 | 33 | transforms = tfs.Compose([tfs.ToImage(), tfs.Grayscale(), 34 | tfs.ToDtype(torch.float32, scale=True), 35 | RavelTransform(), 36 | ]) 37 | 38 | dataset_mnist = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 39 | d_train, d_val = data.random_split(dataset_mnist, [0.7, 0.3]) 40 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 41 | train_data_val = data.DataLoader(d_val, batch_size=32, shuffle=False) 42 | 43 | 44 | optimizer = optim.Adam(params=model.parameters(), lr=0.01, weight_decay=0.001) 45 | loss_function = nn.CrossEntropyLoss() 46 | epochs = 20 47 | 48 | loss_lst_val = [] # список значений потерь при валидации 49 | loss_lst = [] # список значений потерь при обучении 50 | 51 | for _e in range(epochs): 52 | model.train() 53 | loss_mean = 0 54 | lm_count = 0 55 | 56 | train_tqdm = tqdm(train_data, leave=False) 57 | for x_train, y_train in train_tqdm: 58 | predict = model(x_train) 59 | loss = loss_function(predict, y_train) 60 | 61 | optimizer.zero_grad() 62 | loss.backward() 63 | optimizer.step() 64 | 65 | lm_count += 1 66 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 67 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 68 | 69 | # валидация модели 70 | model.eval() 71 | Q_val = 0 72 | count_val = 0 73 | 74 | for x_val, y_val in train_data_val: 75 | with torch.no_grad(): 76 | p = model(x_val) 77 | loss = loss_function(p, y_val) 78 | Q_val += loss.item() 79 | count_val += 1 80 | 81 | Q_val /= count_val 82 | 83 | loss_lst.append(loss_mean) 84 | loss_lst_val.append(Q_val) 85 | 86 | print(f" | loss_mean={loss_mean:.3f}, Q_val={Q_val:.3f}") 87 | 88 | d_test = ImageFolder("dataset/test", transform=transforms) 89 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 90 | 91 | Q = 0 92 | 93 | # тестирование обученной НС 94 | model.eval() 95 | 96 | for x_test, y_test in test_data: 97 | with torch.no_grad(): 98 | p = model(x_test) 99 | p = torch.argmax(p, dim=1) 100 | Q += torch.sum(p == y_test).item() 101 | 102 | Q /= len(d_test) 103 | print(Q) 104 | 105 | # вывод графиков 106 | plt.plot(loss_lst) 107 | plt.plot(loss_lst_val) 108 | plt.grid() 109 | plt.show() 110 | -------------------------------------------------------------------------------- /neuro_net_25_bm.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | import torch 4 | import torch.utils.data as data 5 | import torchvision.transforms.v2 as tfs 6 | import torchvision 7 | import torch.nn as nn 8 | import torch.optim as optim 9 | from tqdm import tqdm 10 | from torchvision.datasets import ImageFolder 11 | 12 | 13 | class RavelTransform(nn.Module): 14 | def forward(self, item): 15 | return item.ravel() 16 | 17 | 18 | class DigitNN(nn.Module): 19 | def __init__(self, input_dim, num_hidden, output_dim): 20 | super().__init__() 21 | self.layer1 = nn.Linear(input_dim, num_hidden, bias=False) 22 | self.layer2 = nn.Linear(num_hidden, output_dim) 23 | self.bm_1 = nn.BatchNorm1d(num_hidden) 24 | 25 | def forward(self, x): 26 | x = self.layer1(x) 27 | x = nn.functional.relu(x) 28 | x = self.bm_1(x) 29 | x = self.layer2(x) 30 | return x 31 | 32 | 33 | model = DigitNN(28 * 28, 128, 10) 34 | 35 | transforms = tfs.Compose([tfs.ToImage(), tfs.Grayscale(), 36 | tfs.ToDtype(torch.float32, scale=True), 37 | RavelTransform(), 38 | ]) 39 | 40 | dataset_mnist = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 41 | d_train, d_val = data.random_split(dataset_mnist, [0.7, 0.3]) 42 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 43 | train_data_val = data.DataLoader(d_val, batch_size=32, shuffle=False) 44 | 45 | 46 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) # , weight_decay=0.001) 47 | loss_function = nn.CrossEntropyLoss() 48 | epochs = 20 49 | 50 | loss_lst_val = [] # список значений потерь при валидации 51 | loss_lst = [] # список значений потерь при обучении 52 | 53 | for _e in range(epochs): 54 | model.train() 55 | loss_mean = 0 56 | lm_count = 0 57 | 58 | train_tqdm = tqdm(train_data, leave=False) 59 | for x_train, y_train in train_tqdm: 60 | predict = model(x_train) 61 | loss = loss_function(predict, y_train) 62 | 63 | optimizer.zero_grad() 64 | loss.backward() 65 | optimizer.step() 66 | 67 | lm_count += 1 68 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 69 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 70 | 71 | # валидация модели 72 | model.eval() 73 | Q_val = 0 74 | count_val = 0 75 | 76 | for x_val, y_val in train_data_val: 77 | with torch.no_grad(): 78 | p = model(x_val) 79 | loss = loss_function(p, y_val) 80 | Q_val += loss.item() 81 | count_val += 1 82 | 83 | Q_val /= count_val 84 | 85 | loss_lst.append(loss_mean) 86 | loss_lst_val.append(Q_val) 87 | 88 | print(f" | loss_mean={loss_mean:.3f}, Q_val={Q_val:.3f}") 89 | 90 | d_test = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=False, transform=transforms) 91 | test_data = data.DataLoader(d_test, batch_size=500, shuffle=False) 92 | 93 | Q = 0 94 | 95 | # тестирование обученной НС 96 | model.eval() 97 | 98 | for x_test, y_test in test_data: 99 | with torch.no_grad(): 100 | p = model(x_test) 101 | p = torch.argmax(p, dim=1) 102 | Q += torch.sum(p == y_test).item() 103 | 104 | Q /= len(d_test) 105 | print(Q) 106 | 107 | # вывод графиков 108 | plt.plot(loss_lst) 109 | plt.plot(loss_lst_val) 110 | plt.grid() 111 | plt.show() 112 | -------------------------------------------------------------------------------- /neuro_net_29.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from PIL import Image 4 | 5 | import torch 6 | import torch.utils.data as data 7 | import torchvision.transforms.v2 as tfs 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | from tqdm import tqdm 11 | 12 | 13 | class SunDataset(data.Dataset): 14 | def __init__(self, path, train=True, transform=None): 15 | self.path = os.path.join(path, "train" if train else "test") 16 | self.transform = transform 17 | 18 | with open(os.path.join(self.path, "format.json"), "r") as fp: 19 | self.format = json.load(fp) 20 | 21 | self.length = len(self.format) 22 | self.files = tuple(self.format.keys()) 23 | self.targets = tuple(self.format.values()) 24 | 25 | def __getitem__(self, item): 26 | path_file = os.path.join(self.path, self.files[item]) 27 | img = Image.open(path_file).convert('RGB') 28 | 29 | if self.transform: 30 | img = self.transform(img) 31 | 32 | return img, torch.tensor(self.targets[item], dtype=torch.float32) 33 | 34 | def __len__(self): 35 | return self.length 36 | 37 | 38 | model = nn.Sequential( 39 | nn.Conv2d(3, 32, 3, padding='same'), 40 | nn.ReLU(), 41 | nn.MaxPool2d(2), 42 | nn.Conv2d(32, 8, 3, padding='same'), 43 | nn.ReLU(), 44 | nn.MaxPool2d(2), 45 | nn.Conv2d(8, 4, 3, padding='same'), 46 | nn.ReLU(), 47 | nn.MaxPool2d(2), 48 | nn.Flatten(), 49 | nn.Linear(4096, 128), 50 | nn.ReLU(), 51 | nn.Linear(128, 2) 52 | ) 53 | 54 | transforms = tfs.Compose([tfs.ToImage(), tfs.ToDtype(torch.float32, scale=True)]) 55 | d_train = SunDataset("dataset_reg", transform=transforms) 56 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 57 | 58 | optimizer = optim.Adam(params=model.parameters(), lr=0.001, weight_decay=0.001) 59 | loss_function = nn.MSELoss() 60 | epochs = 5 61 | model.train() 62 | 63 | for _e in range(epochs): 64 | loss_mean = 0 65 | lm_count = 0 66 | 67 | train_tqdm = tqdm(train_data, leave=True) 68 | for x_train, y_train in train_tqdm: 69 | predict = model(x_train) 70 | loss = loss_function(predict, y_train) 71 | 72 | optimizer.zero_grad() 73 | loss.backward() 74 | optimizer.step() 75 | 76 | lm_count += 1 77 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 78 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 79 | 80 | st = model.state_dict() 81 | torch.save(st, 'model_sun_2.tar') 82 | 83 | d_test = SunDataset("dataset_reg", train=False, transform=transforms) 84 | test_data = data.DataLoader(d_test, batch_size=50, shuffle=False) 85 | 86 | # тестирование обученной НС 87 | Q = 0 88 | count = 0 89 | model.eval() 90 | 91 | test_tqdm = tqdm(test_data, leave=True) 92 | for x_test, y_test in test_tqdm: 93 | with torch.no_grad(): 94 | p = model(x_test) 95 | Q += loss_function(p, y_test).item() 96 | count += 1 97 | 98 | Q /= count 99 | print(Q) 100 | -------------------------------------------------------------------------------- /neuro_net_29_view.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import json 3 | import os 4 | 5 | import matplotlib.pyplot as plt 6 | import torch 7 | import torch.nn as nn 8 | import torchvision.transforms.v2 as tfs 9 | 10 | # model = nn.Sequential( 11 | # nn.Conv2d(3, 32, 3, padding='same'), 12 | # nn.ReLU(), 13 | # nn.MaxPool2d(2), 14 | # nn.Conv2d(32, 16, 3, padding='same'), 15 | # nn.ReLU(), 16 | # nn.MaxPool2d(2), 17 | # nn.Conv2d(16, 8, 3, padding='same'), 18 | # nn.ReLU(), 19 | # nn.MaxPool2d(2), 20 | # nn.Conv2d(8, 4, 3, padding='same'), 21 | # nn.ReLU(), 22 | # nn.MaxPool2d(2), 23 | # nn.Flatten(), 24 | # nn.Linear(1024, 256), 25 | # nn.ReLU(), 26 | # nn.Linear(256, 2) 27 | # ) 28 | 29 | model = nn.Sequential( 30 | nn.Conv2d(3, 32, 3, padding='same'), 31 | nn.ReLU(), 32 | nn.MaxPool2d(2), 33 | nn.Conv2d(32, 8, 3, padding='same'), 34 | nn.ReLU(), 35 | nn.MaxPool2d(2), 36 | nn.Conv2d(8, 4, 3, padding='same'), 37 | nn.ReLU(), 38 | nn.MaxPool2d(2), 39 | nn.Flatten(), 40 | nn.Linear(4096, 128), 41 | nn.ReLU(), 42 | nn.Linear(128, 2) 43 | ) 44 | 45 | path = 'dataset_reg/test/' 46 | num_img = 100 47 | 48 | st = torch.load('model_sun_2.tar', weights_only=False) 49 | model.load_state_dict(st) 50 | 51 | with open(os.path.join(path, "format.json"), "r") as fp: 52 | format = json.load(fp) 53 | 54 | transforms = tfs.Compose([tfs.ToImage(), tfs.ToDtype(torch.float32, scale=True)]) 55 | img = Image.open(os.path.join(path, f'sun_reg_{num_img}.png')).convert('RGB') 56 | img_t = transforms(img).unsqueeze(0) 57 | 58 | model.eval() 59 | predict = model(img_t) 60 | print(predict) 61 | print(tuple(format.values())[num_img-1]) 62 | p = predict.detach().squeeze().numpy() 63 | 64 | plt.imshow(img) 65 | plt.scatter(p[0], p[1], s=20, c='r') 66 | plt.show() 67 | -------------------------------------------------------------------------------- /neuro_net_32_style_transfer.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | import torch 6 | from torchvision import models 7 | import torchvision.transforms.v2 as tfs_v2 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | 11 | 12 | class ModelStyle(nn.Module): 13 | def __init__(self): 14 | super().__init__() 15 | _model = models.vgg19(weights=models.VGG19_Weights.DEFAULT) 16 | self.mf = _model.features 17 | self.mf.requires_grad_(False) 18 | self.requires_grad_(False) 19 | self.mf.eval() 20 | self.idx_out = (0, 5, 10, 19, 28, 34) 21 | self.num_style_layers = len(self.idx_out) - 1 # последний слой для контента 22 | 23 | def forward(self, x): 24 | outputs = [] 25 | for indx, layer in enumerate(self.mf): 26 | x = layer(x) 27 | if indx in self.idx_out: 28 | outputs.append(x.squeeze(0)) 29 | 30 | return outputs 31 | 32 | 33 | def get_content_loss(base_content, target): 34 | return torch.mean( torch.square(base_content - target) ) 35 | 36 | 37 | def gram_matrix(x): 38 | channels = x.size(dim=0) 39 | g = x.view(channels, -1) 40 | gram = torch.mm(g, g.mT) / g.size(dim=1) 41 | return gram 42 | 43 | 44 | def get_style_loss(base_style, gram_target): 45 | style_weights = [1.0, 0.8, 0.5, 0.3, 0.1] 46 | 47 | _loss = 0 48 | i = 0 49 | for base, target in zip(base_style, gram_target): 50 | gram_style = gram_matrix(base) 51 | _loss += style_weights[i] * torch.mean(torch.square(gram_style - target)) 52 | i += 1 53 | 54 | return _loss 55 | 56 | 57 | img = Image.open('img.jpg').convert('RGB') 58 | img_style = Image.open('img_style.jpg').convert('RGB') 59 | 60 | # transforms = models.VGG19_Weights.DEFAULT.transforms() 61 | transforms = tfs_v2.Compose([tfs_v2.ToImage(), 62 | tfs_v2.ToDtype(torch.float32, scale=True), 63 | ]) 64 | 65 | img = transforms(img).unsqueeze(0) 66 | img_style = transforms(img_style).unsqueeze(0) 67 | img_create = img.clone() 68 | img_create.requires_grad_(True) 69 | 70 | model = ModelStyle() 71 | outputs_img = model(img) 72 | outputs_img_style = model(img_style) 73 | 74 | gram_matrix_style = [gram_matrix(x) for x in outputs_img_style[:model.num_style_layers]] 75 | content_weight = 1 76 | style_weight = 1000 77 | best_loss = -1 78 | epochs = 100 79 | 80 | optimizer = optim.Adam(params=[img_create], lr=0.01) 81 | best_img = img_create.clone() 82 | 83 | for _e in range(epochs): 84 | outputs_img_create = model(img_create) 85 | 86 | loss_content = get_content_loss(outputs_img_create[-1], outputs_img[-1]) 87 | loss_style = get_style_loss(outputs_img_create, gram_matrix_style) 88 | loss = content_weight * loss_content + style_weight * loss_style 89 | 90 | optimizer.zero_grad() 91 | loss.backward() 92 | optimizer.step() 93 | 94 | img_create.data.clamp_(0, 1) 95 | 96 | if loss < best_loss or best_loss < 0: 97 | best_loss = loss 98 | best_img = img_create.clone() 99 | 100 | print(f'Iteration: {_e}, loss: {loss.item(): .4f}') 101 | 102 | x = best_img.detach().squeeze() 103 | low, hi = torch.amin(x), torch.amax(x) 104 | x = (x - low) / (hi - low) * 255.0 105 | x = x.permute(1, 2, 0) 106 | x = x.numpy() 107 | x = np.clip(x, 0, 255).astype('uint8') 108 | 109 | image = Image.fromarray(x, 'RGB') 110 | image.save("result.jpg") 111 | 112 | print(best_loss) 113 | plt.imshow(x) 114 | plt.show() 115 | -------------------------------------------------------------------------------- /neuro_net_36_transfer_lr.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from PIL import Image 4 | 5 | import torch 6 | import torch.utils.data as data 7 | from torchvision import models 8 | import torchvision.transforms.v2 as tfs_v2 9 | import torch.nn as nn 10 | import torch.optim as optim 11 | from tqdm import tqdm 12 | 13 | 14 | class DogDataset(data.Dataset): 15 | def __init__(self, path, train=True, transform=None): 16 | self.path = os.path.join(path, "train" if train else "test") 17 | self.transform = transform 18 | 19 | with open(os.path.join(self.path, "format.json"), "r") as fp: 20 | self.format = json.load(fp) 21 | 22 | self.length = 0 23 | self.files = [] 24 | self.targets = torch.eye(10) 25 | 26 | for _dir, _target in self.format.items(): 27 | path = os.path.join(self.path, _dir) 28 | list_files = os.listdir(path) 29 | self.length += len(list_files) 30 | self.files.extend(map(lambda _x: (os.path.join(path, _x), _target), list_files)) 31 | 32 | def __getitem__(self, item): 33 | path_file, target = self.files[item] 34 | t = self.targets[target] 35 | img = Image.open(path_file) 36 | 37 | if self.transform: 38 | img = self.transform(img) 39 | 40 | return img, t 41 | 42 | def __len__(self): 43 | return self.length 44 | 45 | 46 | resnet_weights = models.ResNet50_Weights.DEFAULT 47 | transforms = resnet_weights.transforms() 48 | 49 | model = models.resnet50(weights=resnet_weights) 50 | model.requires_grad_(False) 51 | model.fc = nn.Linear(512*4, 10) 52 | model.fc.requires_grad_(True) 53 | 54 | # transforms = tfs_v2.Compose([tfs_v2.ToImage(), tfs_v2.ToDtype(torch.float32, scale=True)]) 55 | d_train = DogDataset(r"C:\datasets\dogs", transform=transforms) 56 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 57 | 58 | optimizer = optim.Adam(params=model.fc.parameters(), lr=0.001, weight_decay=0.001) 59 | loss_function = nn.CrossEntropyLoss() 60 | epochs = 3 61 | model.train() 62 | 63 | for _e in range(epochs): 64 | loss_mean = 0 65 | lm_count = 0 66 | 67 | train_tqdm = tqdm(train_data, leave=True) 68 | for x_train, y_train in train_tqdm: 69 | predict = model(x_train) 70 | loss = loss_function(predict, y_train) 71 | 72 | optimizer.zero_grad() 73 | loss.backward() 74 | optimizer.step() 75 | 76 | lm_count += 1 77 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 78 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 79 | 80 | st = model.state_dict() 81 | torch.save(st, 'model_transfer_resnet.tar') 82 | # st = torch.load('model_transfer_resnet.tar', weights_only=False) 83 | # model.load_state_dict(st) 84 | 85 | d_test = DogDataset(r"C:\datasets\dogs", train=False, transform=transforms) 86 | test_data = data.DataLoader(d_test, batch_size=50, shuffle=False) 87 | 88 | # тестирование обученной НС 89 | Q = 0 90 | P = 0 91 | count = 0 92 | model.eval() 93 | 94 | test_tqdm = tqdm(test_data, leave=True) 95 | for x_test, y_test in test_tqdm: 96 | with torch.no_grad(): 97 | p = model(x_test) 98 | p2 = torch.argmax(p, dim=1) 99 | y = torch.argmax(y_test, dim=1) 100 | P += torch.sum(p2 == y).item() 101 | Q += loss_function(p, y_test).item() 102 | count += 1 103 | 104 | Q /= count 105 | P /= len(d_test) 106 | print(Q) 107 | print(P) 108 | -------------------------------------------------------------------------------- /neuro_net_41_rnn.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import re 4 | 5 | from PIL import Image 6 | import matplotlib.pyplot as plt 7 | 8 | from tqdm import tqdm 9 | import torch 10 | from torch.utils.data import BatchSampler, SequentialSampler 11 | import torch.utils.data as data 12 | import torchvision 13 | from torchvision import models 14 | import torchvision.transforms.v2 as tfs_v2 15 | import torch.nn as nn 16 | import torch.optim as optim 17 | 18 | 19 | class CharsDataset(data.Dataset): 20 | def __init__(self, path, prev_chars=3): 21 | self.prev_chars = prev_chars 22 | 23 | with open(path, 'r', encoding='utf-8') as f: 24 | self.text = f.read() 25 | self.text = self.text.replace('\ufeff', '') # убираем первый невидимый символ 26 | self.text = re.sub(r'[^А-яA-z0-9.,?;: ]', '', self.text) # заменяем все неразрешенные символы на пустые символы 27 | 28 | self.text = self.text.lower() 29 | self.alphabet = set(self.text) 30 | self.int_to_alpha = dict(enumerate(sorted(self.alphabet))) 31 | self.alpha_to_int = {b: a for a, b in self.int_to_alpha.items()} 32 | # self.alphabet = {'а': 0, 'б': 1, 'в': 2, 'г': 3, 'д': 4, 'е': 5, 'ё': 6, 'ж': 7, 'з': 8, 'и': 9, 33 | # 'й': 10, 'к': 11, 'л': 12, 'м': 13, 'н': 14, 'о': 15, 'п': 16, 'р': 17, 'с': 18, 34 | # 'т': 19, 'у': 20, 'ф': 21, 'х': 22, 'ц': 23, 'ч': 24, 'ш': 25, 'щ': 26, 'ъ': 27, 35 | # 'ы': 28, 'ь': 29, 'э': 30, 'ю': 31, 'я': 32, ' ': 33, '.': 34, '!': 35, '?': 36} 36 | self.num_characters = len(self.alphabet) 37 | self.onehots = torch.eye(self.num_characters) 38 | 39 | def __getitem__(self, item): 40 | _data = torch.vstack([self.onehots[self.alpha_to_int[self.text[x]]] for x in range(item, item+self.prev_chars)]) 41 | ch = self.text[item+self.prev_chars] 42 | t = self.alpha_to_int[ch] 43 | return _data, t 44 | 45 | def __len__(self): 46 | return len(self.text) - 1 - self.prev_chars 47 | 48 | 49 | class TextRNN(nn.Module): 50 | def __init__(self, in_features, out_features): 51 | super().__init__() 52 | self.hidden_size = 64 53 | self.in_features = in_features 54 | self.out_features = out_features 55 | 56 | self.rnn = nn.RNN(in_features, self.hidden_size, batch_first=True) 57 | self.out = nn.Linear(self.hidden_size, out_features) 58 | 59 | def forward(self, x): 60 | x, h = self.rnn(x) 61 | y = self.out(h) 62 | return y 63 | 64 | 65 | d_train = CharsDataset("train_data_true", prev_chars=10) 66 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=False) 67 | 68 | model = TextRNN(d_train.num_characters, d_train.num_characters) 69 | 70 | optimizer = optim.Adam(params=model.parameters(), lr=0.001) 71 | loss_func = nn.CrossEntropyLoss() 72 | 73 | epochs = 100 74 | model.train() 75 | 76 | for _e in range(epochs): 77 | loss_mean = 0 78 | lm_count = 0 79 | 80 | train_tqdm = tqdm(train_data, leave=True) 81 | for x_train, y_train in train_tqdm: 82 | predict = model(x_train).squeeze(0) 83 | loss = loss_func(predict, y_train.long()) 84 | 85 | optimizer.zero_grad() 86 | loss.backward() 87 | optimizer.step() 88 | 89 | lm_count += 1 90 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 91 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 92 | 93 | st = model.state_dict() 94 | torch.save(st, 'model_rnn_1.tar') 95 | 96 | # st = torch.load('model_rnn_1.tar', weights_only=False) 97 | # model.load_state_dict(st) 98 | 99 | model.eval() 100 | predict = "Мой дядя самых".lower() 101 | total = 40 102 | 103 | for _ in range(total): 104 | _data = torch.vstack([d_train.onehots[d_train.alpha_to_int[predict[-x]]] for x in range(d_train.prev_chars, 0, -1)]) 105 | p = model(_data.unsqueeze(0)).squeeze(0) 106 | indx = torch.argmax(p, dim=1) 107 | predict += d_train.int_to_alpha[indx.item()] 108 | 109 | print(predict) 110 | -------------------------------------------------------------------------------- /neuro_net_43_rnn_words.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import re 4 | 5 | from navec import Navec 6 | from tqdm import tqdm 7 | import torch 8 | import torch.utils.data as data 9 | import torchvision 10 | from torchvision import models 11 | import torchvision.transforms.v2 as tfs_v2 12 | import torch.nn as nn 13 | import torch.optim as optim 14 | 15 | 16 | class WordsDataset(data.Dataset): 17 | def __init__(self, path, navec_emb, prev_words=3): 18 | self.prev_words = prev_words 19 | self.navec_emb = navec_emb 20 | 21 | with open(path, 'r', encoding='utf-8') as f: 22 | self.text = f.read() 23 | self.text = self.text.replace('\ufeff', '') # убираем первый невидимый символ 24 | self.text = self.text.replace('\n', ' ') 25 | self.text = re.sub(r'[^А-яA-z- ]', '', self.text) # удаляем все неразрешенные символы 26 | 27 | self.words = self.text.lower().split() 28 | self.words = [word for word in self.words if word in self.navec_emb] # оставляем слова, которые есть в словаре 29 | vocab = set(self.words) 30 | 31 | self.int_to_word = dict(enumerate(vocab)) 32 | self.word_to_int = {b: a for a, b in self.int_to_word.items()} 33 | self.vocab_size = len(vocab) 34 | 35 | def __getitem__(self, item): 36 | _data = torch.vstack([torch.tensor(self.navec_emb[self.words[x]]) for x in range(item, item+self.prev_words)]) 37 | word = self.words[item+self.prev_words] 38 | t = self.word_to_int[word] 39 | return _data, t 40 | 41 | def __len__(self): 42 | return len(self.words) - 1 - self.prev_words 43 | 44 | 45 | class WordsRNN(nn.Module): 46 | def __init__(self, in_features, out_features): 47 | super().__init__() 48 | self.hidden_size = 256 49 | self.in_features = in_features 50 | self.out_features = out_features 51 | 52 | self.rnn = nn.RNN(in_features, self.hidden_size, batch_first=True) 53 | self.out = nn.Linear(self.hidden_size, out_features) 54 | 55 | def forward(self, x): 56 | x, h = self.rnn(x) 57 | y = self.out(h) 58 | return y 59 | 60 | 61 | path = 'navec_hudlit_v1_12B_500K_300d_100q.tar' 62 | navec = Navec.load(path) 63 | 64 | d_train = WordsDataset("text_2", navec, prev_words=3) 65 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=False) 66 | 67 | model = WordsRNN(300, d_train.vocab_size) 68 | 69 | optimizer = optim.Adam(params=model.parameters(), lr=0.001, weight_decay=0.0001) 70 | loss_func = nn.CrossEntropyLoss() 71 | 72 | epochs = 20 73 | model.train() 74 | 75 | for _e in range(epochs): 76 | loss_mean = 0 77 | lm_count = 0 78 | 79 | train_tqdm = tqdm(train_data, leave=True) 80 | for x_train, y_train in train_tqdm: 81 | predict = model(x_train).squeeze(0) 82 | loss = loss_func(predict, y_train.long()) 83 | 84 | optimizer.zero_grad() 85 | loss.backward() 86 | optimizer.step() 87 | 88 | lm_count += 1 89 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 90 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 91 | 92 | st = model.state_dict() 93 | torch.save(st, 'model_rnn_words.tar') 94 | 95 | model.eval() 96 | predict = "подумал встал и снова лег".lower().split() 97 | total = 10 98 | 99 | for _ in range(total): 100 | _data = torch.vstack([torch.tensor(d_train.navec_emb[predict[-x]]) for x in range(d_train.prev_words, 0, -1)]) 101 | p = model(_data.unsqueeze(0)).squeeze(0) 102 | indx = torch.argmax(p, dim=1) 103 | predict.append(d_train.int_to_word[indx.item()]) 104 | 105 | print(" ".join(predict)) 106 | -------------------------------------------------------------------------------- /neuro_net_45_rnn_bidirect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import re 4 | 5 | from navec import Navec 6 | from tqdm import tqdm 7 | import torch 8 | import torch.utils.data as data 9 | import torchvision 10 | from torchvision import models 11 | import torchvision.transforms.v2 as tfs_v2 12 | import torch.nn as nn 13 | import torch.optim as optim 14 | 15 | 16 | class PhraseDataset(data.Dataset): 17 | def __init__(self, path_true, path_false, navec_emb, batch_size=8): 18 | self.navec_emb = navec_emb 19 | self.batch_size = batch_size 20 | 21 | with open(path_true, 'r', encoding='utf-8') as f: 22 | phrase_true = f.readlines() 23 | self._clear_phrase(phrase_true) 24 | 25 | with open(path_false, 'r', encoding='utf-8') as f: 26 | phrase_false = f.readlines() 27 | self._clear_phrase(phrase_false) 28 | 29 | self.phrase_lst = [(_x, 0) for _x in phrase_true] + [(_x, 1) for _x in phrase_false] 30 | self.phrase_lst.sort(key=lambda _x: len(_x[0])) 31 | self.dataset_len = len(self.phrase_lst) 32 | 33 | def _clear_phrase(self, p_lst): 34 | for _i, _p in enumerate(p_lst): 35 | _p = _p.lower().replace('\ufeff', '').strip() 36 | _p = re.sub(r'[^А-яA-z- ]', '', _p) 37 | _words = _p.split() 38 | _words = [w for w in _words if w in self.navec_emb] 39 | p_lst[_i] = _words 40 | 41 | def __getitem__(self, item): 42 | item *= self.batch_size 43 | item_last = item + self.batch_size 44 | if item_last > self.dataset_len: 45 | item_last = self.dataset_len 46 | 47 | _data = [] 48 | _target = [] 49 | max_length = len(self.phrase_lst[item_last-1][0]) 50 | 51 | for i in range(item, item_last): 52 | words_emb = [] 53 | phrase = self.phrase_lst[i] 54 | length = len(phrase[0]) 55 | 56 | for k in range(max_length): 57 | t = torch.tensor(self.navec_emb[phrase[0][k]], dtype=torch.float32) if k < length else torch.zeros(300) 58 | words_emb.append(t) 59 | 60 | _data.append(torch.vstack(words_emb)) 61 | _target.append(torch.tensor(phrase[1], dtype=torch.float32)) 62 | 63 | _data_batch = torch.stack(_data) 64 | _target = torch.vstack(_target) 65 | return _data_batch, _target 66 | 67 | def __len__(self): 68 | last = 0 if self.dataset_len % self.batch_size == 0 else 1 69 | return self.dataset_len // self.batch_size + last 70 | 71 | 72 | class WordsRNN(nn.Module): 73 | def __init__(self, in_features, out_features): 74 | super().__init__() 75 | self.hidden_size = 16 76 | self.in_features = in_features 77 | self.out_features = out_features 78 | 79 | self.rnn = nn.RNN(in_features, self.hidden_size, batch_first=True, bidirectional=True) 80 | self.out = nn.Linear(self.hidden_size * 2, out_features) 81 | 82 | def forward(self, x): 83 | x, h = self.rnn(x) 84 | hh = torch.cat((h[-2, :, :], h[-1, :, :]), dim=1) 85 | y = self.out(hh) 86 | return y 87 | 88 | 89 | path = 'navec_hudlit_v1_12B_500K_300d_100q.tar' 90 | navec = Navec.load(path) 91 | 92 | d_train = PhraseDataset("train_data_true", "train_data_false", navec) 93 | train_data = data.DataLoader(d_train, batch_size=1, shuffle=True) 94 | 95 | model = WordsRNN(300, 1) 96 | 97 | optimizer = optim.Adam(params=model.parameters(), lr=0.001, weight_decay=0.001) 98 | loss_func = nn.BCEWithLogitsLoss() 99 | 100 | epochs = 20 101 | model.train() 102 | 103 | for _e in range(epochs): 104 | loss_mean = 0 105 | lm_count = 0 106 | 107 | train_tqdm = tqdm(train_data, leave=True) 108 | for x_train, y_train in train_tqdm: 109 | predict = model(x_train.squeeze(0)).squeeze(0) 110 | loss = loss_func(predict, y_train.squeeze(0)) 111 | 112 | optimizer.zero_grad() 113 | loss.backward() 114 | optimizer.step() 115 | 116 | lm_count += 1 117 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 118 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 119 | 120 | st = model.state_dict() 121 | torch.save(st, 'model_rnn_bidir.tar') 122 | 123 | model.eval() 124 | 125 | phrase = "Сегодня пасмурная погода" 126 | phrase_lst = phrase.lower().split() 127 | phrase_lst = [torch.tensor(navec[w]) for w in phrase_lst if w in navec] 128 | _data_batch = torch.stack(phrase_lst) 129 | predict = model(_data_batch.unsqueeze(0)).squeeze(0) 130 | p = torch.nn.functional.sigmoid(predict).item() 131 | print(p) 132 | print(phrase, ":", "положительное" if p < 0.5 else "отрицательное") 133 | -------------------------------------------------------------------------------- /neuro_net_47_gru_words.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import re 4 | 5 | from navec import Navec 6 | from tqdm import tqdm 7 | import torch 8 | import torch.utils.data as data 9 | import torchvision 10 | from torchvision import models 11 | import torchvision.transforms.v2 as tfs_v2 12 | import torch.nn as nn 13 | import torch.optim as optim 14 | 15 | 16 | class WordsDataset(data.Dataset): 17 | def __init__(self, path, navec_emb, prev_words=3): 18 | self.prev_words = prev_words 19 | self.navec_emb = navec_emb 20 | 21 | with open(path, 'r', encoding='utf-8') as f: 22 | self.text = f.read() 23 | self.text = self.text.replace('\ufeff', '') # убираем первый невидимый символ 24 | self.text = self.text.replace('\n', ' ') 25 | self.text = re.sub(r'[^А-яA-z- ]', '', self.text) # удаляем все неразрешенные символы 26 | 27 | self.words = self.text.lower().split() 28 | self.words = [word for word in self.words if word in self.navec_emb] # оставляем слова, которые есть в словаре 29 | vocab = set(self.words) 30 | 31 | self.int_to_word = dict(enumerate(vocab)) 32 | self.word_to_int = {b: a for a, b in self.int_to_word.items()} 33 | self.vocab_size = len(vocab) 34 | 35 | def __getitem__(self, item): 36 | _data = torch.vstack([torch.tensor(self.navec_emb[self.words[x]]) for x in range(item, item+self.prev_words)]) 37 | word = self.words[item+self.prev_words] 38 | t = self.word_to_int[word] 39 | return _data, t 40 | 41 | def __len__(self): 42 | return len(self.words) - 1 - self.prev_words 43 | 44 | 45 | class WordsRNN(nn.Module): 46 | def __init__(self, in_features, out_features): 47 | super().__init__() 48 | self.hidden_size = 64 49 | self.in_features = in_features 50 | self.out_features = out_features 51 | 52 | self.rnn = nn.GRU(in_features, self.hidden_size, batch_first=True) 53 | self.out = nn.Linear(self.hidden_size, out_features) 54 | 55 | def forward(self, x): 56 | x, h = self.rnn(x) 57 | y = self.out(h) 58 | return y 59 | 60 | 61 | path = 'navec_hudlit_v1_12B_500K_300d_100q.tar' 62 | navec = Navec.load(path) 63 | 64 | d_train = WordsDataset("text_2", navec, prev_words=3) 65 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=False) 66 | 67 | model = WordsRNN(300, d_train.vocab_size) 68 | 69 | optimizer = optim.Adam(params=model.parameters(), lr=0.001, weight_decay=0.0001) 70 | loss_func = nn.CrossEntropyLoss() 71 | 72 | epochs = 20 73 | model.train() 74 | 75 | for _e in range(epochs): 76 | loss_mean = 0 77 | lm_count = 0 78 | 79 | train_tqdm = tqdm(train_data, leave=True) 80 | for x_train, y_train in train_tqdm: 81 | predict = model(x_train).squeeze(0) 82 | loss = loss_func(predict, y_train.long()) 83 | 84 | optimizer.zero_grad() 85 | loss.backward() 86 | optimizer.step() 87 | 88 | lm_count += 1 89 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 90 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 91 | 92 | st = model.state_dict() 93 | torch.save(st, 'model_rnn_words.tar') 94 | 95 | model.eval() 96 | predict = "подумал встал и снова лег".lower().split() 97 | total = 10 98 | 99 | for _ in range(total): 100 | _data = torch.vstack([torch.tensor(d_train.navec_emb[predict[-x]]) for x in range(d_train.prev_words, 0, -1)]) 101 | p = model(_data.unsqueeze(0)).squeeze(0) 102 | indx = torch.argmax(p, dim=1) 103 | predict.append(d_train.int_to_word[indx.item()]) 104 | 105 | print(" ".join(predict)) 106 | -------------------------------------------------------------------------------- /neuro_net_48_autoencoder.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | from tqdm import tqdm 6 | import torch 7 | import torch.utils.data as data 8 | import torchvision 9 | import torchvision.transforms.v2 as tfs_v2 10 | import torch.nn as nn 11 | import torch.optim as optim 12 | 13 | 14 | class AutoEncoderMNIST(nn.Module): 15 | def __init__(self, input_dim, output_dim, hidden_dim): 16 | super().__init__() 17 | self.hidden_dim = hidden_dim 18 | self.encoder = nn.Sequential( 19 | nn.Linear(input_dim, 128), 20 | nn.ELU(inplace=True), 21 | nn.Linear(128, 64), 22 | nn.ELU(inplace=True), 23 | nn.Linear(64, self.hidden_dim) 24 | ) 25 | 26 | self.decoder = nn.Sequential( 27 | nn.Linear(self.hidden_dim, 64), 28 | nn.ELU(inplace=True), 29 | nn.Linear(64, 128), 30 | nn.ELU(inplace=True), 31 | nn.Linear(128, output_dim), 32 | nn.Sigmoid() 33 | ) 34 | 35 | def forward(self, x): 36 | h = self.encoder(x) 37 | x = self.decoder(h) 38 | 39 | return x, h 40 | 41 | 42 | model = AutoEncoderMNIST(784, 784, 28) 43 | transforms = tfs_v2.Compose([tfs_v2.ToImage(), tfs_v2.ToDtype(dtype=torch.float32, scale=True), 44 | tfs_v2.Lambda(lambda _img: _img.ravel())]) 45 | 46 | d_train = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 47 | train_data = data.DataLoader(d_train, batch_size=100, shuffle=True) 48 | 49 | optimizer = optim.Adam(params=model.parameters(), lr=0.001) 50 | loss_func = nn.MSELoss() 51 | 52 | epochs = 5 53 | model.train() 54 | 55 | for _e in range(epochs): 56 | loss_mean = 0 57 | lm_count = 0 58 | 59 | train_tqdm = tqdm(train_data, leave=True) 60 | for x_train, y_train in train_tqdm: 61 | predict, _ = model(x_train) 62 | loss = loss_func(predict, x_train) 63 | 64 | optimizer.zero_grad() 65 | loss.backward() 66 | optimizer.step() 67 | 68 | lm_count += 1 69 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 70 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 71 | 72 | st = model.state_dict() 73 | torch.save(st, 'model_autoencoder.tar') 74 | 75 | n = 10 76 | model.eval() 77 | 78 | plt.figure(figsize=(2*n, 2*2)) 79 | for i in range(n): 80 | img, _ = d_train[i] 81 | predict, _ = model(img.unsqueeze(0)) 82 | 83 | predict = predict.squeeze(0).view(28, 28) 84 | img = img.view(28, 28) 85 | 86 | dec_img = predict.detach().numpy() 87 | img = img.detach().numpy() 88 | 89 | ax = plt.subplot(2, n, i+1) 90 | plt.imshow(img, cmap='gray') 91 | ax.get_xaxis().set_visible(False) 92 | ax.get_yaxis().set_visible(False) 93 | 94 | ax2 = plt.subplot(2, n, i+n+1) 95 | plt.imshow(dec_img, cmap='gray') 96 | ax2.get_xaxis().set_visible(False) 97 | ax2.get_yaxis().set_visible(False) 98 | 99 | plt.show() 100 | -------------------------------------------------------------------------------- /neuro_net_48_homotop.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import re 5 | 6 | from tqdm import tqdm 7 | import torch 8 | import torch.utils.data as data 9 | import torchvision 10 | from torchvision import models 11 | import torchvision.transforms.v2 as tfs_v2 12 | import torch.nn as nn 13 | import torch.optim as optim 14 | 15 | 16 | class AutoEncoderMNIST(nn.Module): 17 | def __init__(self, input_dim, output_dim, hidden_dim): 18 | super().__init__() 19 | self.hidden_dim = hidden_dim 20 | self.encoder = nn.Sequential( 21 | nn.Linear(input_dim, 128), 22 | nn.ELU(inplace=True), 23 | nn.Linear(128, 64), 24 | nn.ELU(inplace=True), 25 | nn.Linear(64, self.hidden_dim) 26 | ) 27 | 28 | self.decoder = nn.Sequential( 29 | nn.Linear(self.hidden_dim, 64), 30 | nn.ELU(inplace=True), 31 | nn.Linear(64, 128), 32 | nn.ELU(inplace=True), 33 | nn.Linear(128, output_dim), 34 | nn.Sigmoid() 35 | ) 36 | 37 | def forward(self, x): 38 | h = self.encoder(x) 39 | x = self.decoder(h) 40 | 41 | return x, h 42 | 43 | 44 | model = AutoEncoderMNIST(784, 784, 28) 45 | transforms = tfs_v2.Compose([tfs_v2.ToImage(), tfs_v2.ToDtype(dtype=torch.float32, scale=True), 46 | tfs_v2.Lambda(lambda _img: _img.ravel())]) 47 | 48 | d_train = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 49 | train_data = data.DataLoader(d_train, batch_size=32, shuffle=True) 50 | 51 | optimizer = optim.Adam(params=model.parameters(), lr=0.001) 52 | loss_func = nn.MSELoss() 53 | 54 | epochs = 5 55 | model.train() 56 | 57 | for _e in range(epochs): 58 | loss_mean = 0 59 | lm_count = 0 60 | 61 | train_tqdm = tqdm(train_data, leave=True) 62 | for x_train, y_train in train_tqdm: 63 | predict, _ = model(x_train) 64 | loss = loss_func(predict, x_train) 65 | 66 | optimizer.zero_grad() 67 | loss.backward() 68 | optimizer.step() 69 | 70 | lm_count += 1 71 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 72 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 73 | 74 | # st = model.state_dict() 75 | # torch.save(st, 'model_autoencoder.tar') 76 | 77 | st = torch.load('model_autoencoder.tar', weights_only=True) 78 | model.load_state_dict(st) 79 | 80 | n = 10 81 | model.eval() 82 | 83 | plt.figure(figsize=(2*n, 2*2)) 84 | 85 | # фрагмент для формирования и отображения гомотопии изображений по прямой 86 | frm, to = d_train.data[d_train.targets == 5][10:12] 87 | frm = transforms(frm) 88 | to = transforms(to) 89 | 90 | for i, t in enumerate(np.linspace(0., 1., n)): 91 | img = frm * (1-t) + to * t # Гомотопия по прямой 92 | predict, _ = model(img.unsqueeze(0)) 93 | predict = predict.squeeze(0).view(28, 28) 94 | dec_img = predict.detach().numpy() 95 | img = img.view(28, 28).numpy() 96 | 97 | ax = plt.subplot(2, n, i+1) 98 | plt.imshow(img, cmap='gray') 99 | ax.get_xaxis().set_visible(False) 100 | ax.get_yaxis().set_visible(False) 101 | 102 | ax2 = plt.subplot(2, n, i+n+1) 103 | plt.imshow(dec_img, cmap='gray') 104 | ax2.get_xaxis().set_visible(False) 105 | ax2.get_yaxis().set_visible(False) 106 | 107 | 108 | # результат декодирования n=10 первых изображений выборки 109 | # for i in range(n): 110 | # img, _ = d_train[i] 111 | # predict, _ = model(img.unsqueeze(0)) 112 | # 113 | # predict = predict.squeeze(0).view(28, 28) 114 | # img = img.view(28, 28) 115 | # 116 | # dec_img = predict.detach().numpy() 117 | # img = img.detach().numpy() 118 | # 119 | # ax = plt.subplot(2, n, i+1) 120 | # plt.imshow(img, cmap='gray') 121 | # ax.get_xaxis().set_visible(False) 122 | # ax.get_yaxis().set_visible(False) 123 | # 124 | # ax2 = plt.subplot(2, n, i+n+1) 125 | # plt.imshow(dec_img, cmap='gray') 126 | # ax2.get_xaxis().set_visible(False) 127 | # ax2.get_yaxis().set_visible(False) 128 | 129 | plt.show() 130 | -------------------------------------------------------------------------------- /neuro_net_49_autoencoder_1.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | from tqdm import tqdm 6 | import torch 7 | import torch.utils.data as data 8 | import torchvision 9 | import torchvision.transforms.v2 as tfs_v2 10 | import torch.nn as nn 11 | import torch.optim as optim 12 | 13 | 14 | class AutoEncoderMNIST(nn.Module): 15 | def __init__(self, input_dim, output_dim, hidden_dim): 16 | super().__init__() 17 | self.hidden_dim = hidden_dim 18 | self.encoder = nn.Sequential( 19 | nn.Linear(input_dim, 128), 20 | nn.ELU(inplace=True), 21 | nn.Linear(128, 64), 22 | nn.ELU(inplace=True), 23 | nn.Linear(64, self.hidden_dim) 24 | ) 25 | 26 | self.decoder = nn.Sequential( 27 | nn.Linear(self.hidden_dim, 64), 28 | nn.ELU(inplace=True), 29 | nn.Linear(64, 128), 30 | nn.ELU(inplace=True), 31 | nn.Linear(128, output_dim), 32 | nn.Sigmoid() 33 | ) 34 | 35 | def forward(self, x): 36 | h = self.encoder(x) 37 | x = self.decoder(h) 38 | 39 | return x, h 40 | 41 | 42 | model = AutoEncoderMNIST(784, 784, 2) 43 | transforms = tfs_v2.Compose([tfs_v2.ToImage(), tfs_v2.ToDtype(dtype=torch.float32, scale=True), 44 | tfs_v2.Lambda(lambda _img: _img.ravel())]) 45 | 46 | d_train = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 47 | train_data = data.DataLoader(d_train, batch_size=100, shuffle=True) 48 | 49 | optimizer = optim.Adam(params=model.parameters(), lr=0.001) 50 | loss_func = nn.MSELoss() 51 | 52 | epochs = 0 53 | model.train() 54 | 55 | for _e in range(epochs): 56 | loss_mean = 0 57 | lm_count = 0 58 | 59 | train_tqdm = tqdm(train_data, leave=True) 60 | for x_train, y_train in train_tqdm: 61 | predict, _ = model(x_train) 62 | loss = loss_func(predict, x_train) 63 | 64 | optimizer.zero_grad() 65 | loss.backward() 66 | optimizer.step() 67 | 68 | lm_count += 1 69 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 70 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 71 | 72 | # st = model.state_dict() 73 | # torch.save(st, 'model_vae.tar') 74 | 75 | st = torch.load('model_vae.tar', weights_only=True) 76 | model.load_state_dict(st) 77 | 78 | model.eval() 79 | 80 | # d_test = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=False, transform=transforms) 81 | # x_data = transforms(d_test.data).view(len(d_test), -1) 82 | # 83 | # h = model.encoder(x_data) 84 | # h = h.detach().numpy() 85 | # 86 | # plt.scatter(h[:, 0], h[:, 1]) 87 | # plt.grid() 88 | 89 | h = torch.tensor([-40, -20], dtype=torch.float32) 90 | predict = model.decoder(h.unsqueeze(0)) 91 | predict = predict.detach().squeeze(0).view(28, 28) 92 | dec_img = predict.numpy() 93 | plt.imshow(dec_img, cmap='gray') 94 | 95 | plt.show() 96 | -------------------------------------------------------------------------------- /neuro_net_49_autoencoder_2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | from tqdm import tqdm 6 | import torch 7 | import torch.utils.data as data 8 | import torchvision 9 | import torchvision.transforms.v2 as tfs_v2 10 | import torch.nn as nn 11 | import torch.optim as optim 12 | 13 | 14 | class AutoEncoderMNIST(nn.Module): 15 | def __init__(self, input_dim, output_dim, hidden_dim): 16 | super().__init__() 17 | self.hidden_dim = hidden_dim 18 | self.encoder = nn.Sequential( 19 | nn.Linear(input_dim, 128, bias=False), 20 | nn.ELU(inplace=True), 21 | nn.BatchNorm1d(128), 22 | nn.Linear(128, 64, bias=False), 23 | nn.ELU(inplace=True), 24 | nn.BatchNorm1d(64), 25 | nn.Linear(64, self.hidden_dim) 26 | ) 27 | 28 | self.decoder = nn.Sequential( 29 | nn.Linear(self.hidden_dim, 64), 30 | nn.ELU(inplace=True), 31 | nn.Linear(64, 128), 32 | nn.ELU(inplace=True), 33 | nn.Linear(128, output_dim), 34 | nn.Sigmoid() 35 | ) 36 | 37 | def forward(self, x): 38 | h = self.encoder(x) 39 | x = self.decoder(h) 40 | 41 | return x, h 42 | 43 | 44 | model = AutoEncoderMNIST(784, 784, 2) 45 | transforms = tfs_v2.Compose([tfs_v2.ToImage(), tfs_v2.ToDtype(dtype=torch.float32, scale=True), 46 | tfs_v2.Lambda(lambda _img: _img.ravel())]) 47 | 48 | d_train = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 49 | train_data = data.DataLoader(d_train, batch_size=100, shuffle=True) 50 | 51 | optimizer = optim.Adam(params=model.parameters(), lr=0.001) 52 | loss_func = nn.MSELoss() 53 | 54 | epochs = 5 55 | model.train() 56 | 57 | for _e in range(epochs): 58 | loss_mean = 0 59 | lm_count = 0 60 | 61 | train_tqdm = tqdm(train_data, leave=True) 62 | for x_train, y_train in train_tqdm: 63 | predict, _ = model(x_train) 64 | loss = loss_func(predict, x_train) 65 | 66 | optimizer.zero_grad() 67 | loss.backward() 68 | optimizer.step() 69 | 70 | lm_count += 1 71 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 72 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 73 | 74 | st = model.state_dict() 75 | torch.save(st, 'model_vae_2.tar') 76 | 77 | # st = torch.load('model_vae.tar', weights_only=True) 78 | # model.load_state_dict(st) 79 | 80 | model.eval() 81 | 82 | d_test = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=False, transform=transforms) 83 | x_data = transforms(d_test.data).view(len(d_test), -1) 84 | 85 | h = model.encoder(x_data) 86 | h = h.detach().numpy() 87 | 88 | plt.scatter(h[:, 0], h[:, 1]) 89 | plt.grid() 90 | 91 | # h = torch.tensor([-40, -20], dtype=torch.float32) 92 | # predict = model.decoder(h.unsqueeze(0)) 93 | # predict = predict.detach().squeeze(0).view(28, 28) 94 | # dec_img = predict.numpy() 95 | # plt.imshow(dec_img, cmap='gray') 96 | 97 | plt.show() 98 | -------------------------------------------------------------------------------- /neuro_net_50_vae.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | from tqdm import tqdm 6 | import torch 7 | import torch.utils.data as data 8 | import torchvision 9 | import torchvision.transforms.v2 as tfs_v2 10 | import torch.nn as nn 11 | import torch.optim as optim 12 | 13 | 14 | class AutoEncoderMNIST(nn.Module): 15 | def __init__(self, input_dim, output_dim, hidden_dim): 16 | super().__init__() 17 | self.hidden_dim = hidden_dim 18 | self.encoder = nn.Sequential( 19 | nn.Linear(input_dim, 128), 20 | nn.ELU(inplace=True), 21 | nn.BatchNorm1d(128), 22 | nn.Linear(128, 64), 23 | nn.ELU(inplace=True), 24 | nn.BatchNorm1d(64) 25 | ) 26 | 27 | self.h_mean = nn.Linear(64, self.hidden_dim) 28 | self.h_log_var = nn.Linear(64, self.hidden_dim) 29 | 30 | self.decoder = nn.Sequential( 31 | nn.Linear(self.hidden_dim, 64), 32 | nn.ELU(inplace=True), 33 | nn.BatchNorm1d(64), 34 | nn.Linear(64, 128), 35 | nn.ELU(inplace=True), 36 | nn.BatchNorm1d(128), 37 | nn.Linear(128, output_dim), 38 | nn.Sigmoid() 39 | ) 40 | 41 | def forward(self, x): 42 | enc = self.encoder(x) 43 | 44 | h_mean = self.h_mean(enc) 45 | h_log_var = self.h_log_var(enc) 46 | 47 | noise = torch.normal(mean=torch.zeros_like(h_mean), std=torch.ones_like(h_log_var)) 48 | h = noise * torch.exp(h_log_var / 2) + h_mean 49 | x = self.decoder(h) 50 | 51 | return x, h, h_mean, h_log_var 52 | 53 | 54 | class VAELoss(nn.Module): 55 | def forward(self, x, y, h_mean, h_log_var): 56 | img_loss = torch.sum(torch.square(x - y), dim=-1) 57 | kl_loss = -0.5 * torch.sum(1 + h_log_var - torch.square(h_mean) - torch.exp(h_log_var), dim=-1) 58 | return torch.mean(img_loss + kl_loss) 59 | 60 | 61 | model = AutoEncoderMNIST(784, 784, 2) 62 | transforms = tfs_v2.Compose([tfs_v2.ToImage(), tfs_v2.ToDtype(dtype=torch.float32, scale=True), 63 | tfs_v2.Lambda(lambda _img: _img.ravel())]) 64 | 65 | d_train = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=True, transform=transforms) 66 | train_data = data.DataLoader(d_train, batch_size=100, shuffle=True) 67 | 68 | optimizer = optim.Adam(params=model.parameters(), lr=0.001) 69 | loss_func = VAELoss() 70 | 71 | epochs = 5 72 | model.train() 73 | 74 | for _e in range(epochs): 75 | loss_mean = 0 76 | lm_count = 0 77 | 78 | train_tqdm = tqdm(train_data, leave=True) 79 | for x_train, y_train in train_tqdm: 80 | predict, _, h_mean, h_log_var = model(x_train) 81 | loss = loss_func(predict, x_train, h_mean, h_log_var) 82 | 83 | optimizer.zero_grad() 84 | loss.backward() 85 | optimizer.step() 86 | 87 | lm_count += 1 88 | loss_mean = 1/lm_count * loss.item() + (1 - 1/lm_count) * loss_mean 89 | train_tqdm.set_description(f"Epoch [{_e+1}/{epochs}], loss_mean={loss_mean:.3f}") 90 | 91 | st = model.state_dict() 92 | torch.save(st, 'model_vae_3.tar') 93 | 94 | # st = torch.load('model_vae.tar', weights_only=True) 95 | # model.load_state_dict(st) 96 | 97 | model.eval() 98 | 99 | d_test = torchvision.datasets.MNIST(r'C:\datasets\mnist', download=True, train=False, transform=transforms) 100 | x_data = transforms(d_test.data).view(len(d_test), -1) 101 | 102 | _, h, _, _ = model(x_data) 103 | h = h.detach().numpy() 104 | 105 | plt.scatter(h[:, 0], h[:, 1]) 106 | plt.grid() 107 | 108 | 109 | n = 5 110 | total = 2*n+1 111 | 112 | plt.figure(figsize=(total, total)) 113 | 114 | num = 1 115 | for i in range(-n, n+1): 116 | for j in range(-n, n+1): 117 | ax = plt.subplot(total, total, num) 118 | num += 1 119 | h = torch.tensor([3*i/n, 3*j/n], dtype=torch.float32) 120 | predict = model.decoder(h.unsqueeze(0)) 121 | predict = predict.detach().squeeze(0).view(28, 28) 122 | dec_img = predict.numpy() 123 | 124 | plt.imshow(dec_img, cmap='gray') 125 | ax.get_xaxis().set_visible(False) 126 | ax.get_yaxis().set_visible(False) 127 | 128 | plt.show() 129 | -------------------------------------------------------------------------------- /neuro_net_52_gan.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/selfedu-rus/neuro-pytorch/9bba8ea72b5986b5312106245ba22cadec0e95a5/neuro_net_52_gan.py -------------------------------------------------------------------------------- /neuro_net_9.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def act(x): 5 | return 0 if x < 0.5 else 1 6 | 7 | 8 | def go(house, rock, attr): 9 | X = torch.tensor([house, rock, attr], dtype=torch.float32) 10 | Wh = torch.tensor([[0.3, 0.3, 0], [0.4, -0.5, 1]]) # матрица 2x3 11 | Wout = torch.tensor([-1.0, 1.0]) # вектор 1х2 12 | 13 | Zh = torch.mv(Wh, X) # вычисляем сумму на входах нейронов скрытого слоя 14 | print(f"Значения сумм на нейронах скрытого слоя: {Zh}") 15 | 16 | Uh = torch.tensor([act(x) for x in Zh], dtype=torch.float32) 17 | print(f"Значения на выходах нейронов скрытого слоя: {Uh}") 18 | 19 | Zout = torch.dot(Wout, Uh) 20 | Y = act(Zout) 21 | print(f"Выходное значение НС: {Y}") 22 | 23 | return Y 24 | 25 | 26 | house = 1 27 | rock = 0 28 | attr = 1 29 | 30 | res = go(house, rock, attr) 31 | if res == 1: 32 | print("Ты мне нравишься") 33 | else: 34 | print("Созвонимся") 35 | -------------------------------------------------------------------------------- /neuro_net_9gpu.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 4 | 5 | def act(x): 6 | return 0 if x < 0.5 else 1 7 | 8 | 9 | def go(house, rock, attr): 10 | X = torch.tensor([house, rock, attr], dtype=torch.float32, device=device) 11 | Wh = torch.tensor([[0.3, 0.3, 0], [0.4, -0.5, 1]], device=device) # матрица 2x3 12 | Wout = torch.tensor([-1.0, 1.0], device=device) # вектор 1х2 13 | 14 | Zh = torch.mv(Wh, X) # вычисляем сумму на входах нейронов скрытого слоя 15 | print(f"Значения сумм на нейронах скрытого слоя: {Zh}") 16 | 17 | Uh = torch.tensor([act(x) for x in Zh], dtype=torch.float32, device=device) 18 | print(f"Значения на выходах нейронов скрытого слоя: {Uh}") 19 | 20 | Zout = torch.dot(Wout, Uh) 21 | Y = act(Zout) 22 | print(f"Выходное значение НС: {Y}") 23 | 24 | return Y 25 | 26 | 27 | house = 1 28 | rock = 0 29 | attr = 1 30 | 31 | res = go(house, rock, attr) 32 | if res == 1: 33 | print("Ты мне нравишься") 34 | else: 35 | print("Созвонимся") 36 | -------------------------------------------------------------------------------- /solves/1.10.4: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 4 | 5 | t1 = torch.arange(1, 6, dtype=torch.int32) 6 | t2 = torch.ones(3, 5, dtype=torch.float32) 7 | 8 | t_res = torch.matmul(t2, t1.float()).cpu() 9 | -------------------------------------------------------------------------------- /solves/1.10.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 4 | 5 | targets = torch.rand(1000).to(device) 6 | predict = torch.rand(1000).to(device) 7 | 8 | Q = torch.mean((predict - targets) ** 2).cpu() 9 | -------------------------------------------------------------------------------- /solves/1.10.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 4 | 5 | w = torch.rand(1, 16) # вектор-строка 6 | X = torch.rand(16, 16) # матрица 16 x 16 7 | I = torch.eye(16) # единичная матрица 16 x 16 8 | lm = 0.5 # значение лямбда 9 | 10 | wT = w.transpose(1, 0) 11 | Q = w @ (X + I * lm) @ wT 12 | -------------------------------------------------------------------------------- /solves/1.10.7: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def sigma(x): 4 | return 1 / (1 + torch.exp(-x)) 5 | 6 | 7 | # все эти переменные в программе не менять, только тензоры разместить на device 8 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 9 | 10 | W = torch.rand(3) * 10 - 5 11 | bias = torch.rand(1) * 100 - 50 12 | 13 | batch_size = 8 # размер мини-батча 14 | X = torch.empty(batch_size, 3).normal_(mean=1.0, std=4.0) 15 | 16 | W = W.to(device) 17 | bias = bias.to(device) 18 | X = X.to(device) 19 | 20 | predict = sigma(torch.matmul(X, W) + bias) 21 | predict = predict.cpu() 22 | -------------------------------------------------------------------------------- /solves/1.11.1: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | w = torch.tensor([6, 3, -2], dtype=torch.float32) 4 | -------------------------------------------------------------------------------- /solves/1.11.2: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | W1 = torch.tensor([(12, 2, -3), (12, -2, 3)], dtype=torch.float32) 5 | W2 = torch.tensor([-1, 1, 1], dtype=torch.float32) 6 | -------------------------------------------------------------------------------- /solves/1.11.3: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def act_sigma(x): 4 | return x ** 2 5 | 6 | 7 | def act_u(x): 8 | return x 9 | 10 | 11 | # тензор X в программе не менять 12 | batch_size = 16 # количество входных данных 13 | X = torch.tensor(list(map(float, input().split())), dtype=torch.float32).view(batch_size, 2) 14 | 15 | W1 = torch.tensor([(1, 0), (1, 0.5)], dtype=torch.float32) 16 | bias1 = torch.tensor([0, 0], dtype=torch.float32) 17 | W2 = torch.tensor([3, -2], dtype=torch.float32) 18 | bias2 = torch.tensor([7.8], dtype=torch.float32) 19 | 20 | h = torch.matmul(X, W1.transpose(1, 0) + bias1) 21 | h = act_sigma(h) 22 | predict = h @ W2 + bias2 23 | -------------------------------------------------------------------------------- /solves/1.11.4: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def act_sigma(x): 4 | return torch.sin(x) 5 | 6 | 7 | def act_u(x): 8 | return torch.sign(x - 0.5) 9 | 10 | 11 | # тензор X в программе не менять 12 | batch_size = 16 # количество входных данных 13 | X = torch.tensor(list(map(float, input().split())), dtype=torch.float32).view(batch_size, 2) 14 | 15 | W1 = torch.tensor([(0.5 * torch.pi, 0.5 * torch.pi), (0, 0.1)], dtype=torch.float32) 16 | bias1 = torch.tensor([1.5, -1.5 * torch.pi], dtype=torch.float32) 17 | W2 = torch.tensor([2, -3.5], dtype=torch.float32) 18 | bias2 = torch.tensor([0], dtype=torch.float32) 19 | 20 | h = torch.matmul(X, W1.transpose(1, 0)) + bias1 21 | h = act_sigma(h) 22 | predict = act_u(h @ W2 + bias2) 23 | -------------------------------------------------------------------------------- /solves/1.11.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # тензор X в программе не менять 4 | batch_size = 32 # количество входных данных 5 | X = torch.tensor(list(map(float, input().split())), dtype=torch.float32).view(batch_size, 2) 6 | 7 | W1 = torch.tensor([(3, -4), (9, -5)], dtype=torch.float32) 8 | bias1 = torch.tensor([18, 12], dtype=torch.float32) 9 | W2 = torch.tensor([1, 1], dtype=torch.float32) 10 | bias2 = torch.tensor([-1], dtype=torch.float32) 11 | 12 | h = torch.matmul(X, W1.transpose(1, 0)) + bias1 13 | h = torch.sign(h) 14 | predict = torch.sign(h @ W2 + bias2) 15 | -------------------------------------------------------------------------------- /solves/1.11.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # тензор X в программе не менять 4 | batch_size = 32 # количество входных данных 5 | X = torch.tensor(list(map(float, input().split())), dtype=torch.float32).view(batch_size, 2) 6 | 7 | W1 = torch.tensor([(4, -9), (4, -3), (3, 1)], dtype=torch.float32) 8 | bias1 = torch.tensor([41, 11, 18], dtype=torch.float32) 9 | W2 = torch.tensor([1, 1, 1], dtype=torch.float32) 10 | bias2 = torch.tensor([-2.5], dtype=torch.float32) 11 | 12 | h = torch.matmul(X, W1.transpose(1, 0)) + bias1 13 | h = torch.sign(h) 14 | predict = torch.sign(h @ W2 + bias2) 15 | -------------------------------------------------------------------------------- /solves/1.2.6: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | x = list(map(int, input().split())) # чтение вектора [x1, x2] из входного потока 4 | 5 | W1 = np.array([[0.5, 0.5, -1]]) 6 | b1 = np.array([[2.5]]) 7 | 8 | x = np.array([x + [x[0] * x[1]]]) 9 | y = W1 @ x.T + b1 10 | 11 | print(f"{y.item():.1f}") # вывод выходного значения y с точностью до десятых 12 | -------------------------------------------------------------------------------- /solves/1.2.7: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | x = list(map(int, input().split())) # чтение вектора [x1, x2, x3] из входного потока 4 | 5 | W1 = np.array([[3/2, 3/2, 4]]) 6 | b1 = np.array([[-5]]) 7 | 8 | x = np.array([x]) 9 | y = W1 @ x.T + b1 10 | 11 | print(f"{y.item():.2f}") # вывод выходного значения y с точностью до сотых 12 | -------------------------------------------------------------------------------- /solves/1.2.8: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | x = list(map(int, input().split())) # чтение вектора [x1, x2, x3] из входного потока 4 | 5 | W1 = np.array([[0.3, 0.2, 0.5], [0.5, -0.1, -0.2]]) 6 | W2 = np.array([[-1.5, 0.5]]) 7 | b1 = np.array([[-1.5, 2]]) 8 | b2 = np.array([[-1]]) 9 | 10 | x = np.array([x]) 11 | h = W1 @ x.T + b1.T 12 | u = np.array([[+1 if _x > 0 else -1 for _x in h]]) 13 | y = W2 @ u.T + b2.T 14 | 15 | print(f"{y.item():.1f}") # вывод выходного значения y с точностью до десятых 16 | -------------------------------------------------------------------------------- /solves/1.2.9: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | x = list(map(int, input().split())) # чтение вектора [x1, x2] из входного потока 4 | 5 | W1 = np.array([[1, 1]]) 6 | b1 = np.array([[-5]]) 7 | 8 | x = np.array([[_x * _x for _x in x]]) 9 | y = W1 @ x.T + b1.T 10 | 11 | print(f"{y.item():.1f}") 12 | -------------------------------------------------------------------------------- /solves/1.4.10: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список в программе не менять 4 | 5 | t_indx = torch.tensor(lst, dtype=torch.int64).view(-1, 3) 6 | -------------------------------------------------------------------------------- /solves/1.4.3: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t = torch.empty(7, 30, 24) 4 | -------------------------------------------------------------------------------- /solves/1.4.4: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | table = torch.tensor([[1, 4], [2, 5], [3, 6]], dtype=torch.int32) 5 | 6 | -------------------------------------------------------------------------------- /solves/1.4.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | tr = torch.tensor([[[0.2, -1.2, 1.2], [-1.0, -2.0, 1.0], [0.5, 0.2, -0.1]], [[-1.0, 2.1, 0.1], [0.9, 0.8, -2.0], [1.5, -2.0, 0.1]]], dtype=torch.float32) 4 | 5 | 6 | -------------------------------------------------------------------------------- /solves/1.4.9: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список в программе не менять 4 | 5 | tnsr = torch.tensor(lst, dtype=torch.float32) 6 | -------------------------------------------------------------------------------- /solves/1.5.1: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t1 = torch.zeros(4, 8, 2, dtype=torch.int8) 4 | t2 = torch.ones(1, 9, dtype=torch.int8) 5 | t3 = torch.eye(5, 4, dtype=torch.int8) 6 | t4 = torch.full((2, 7, 1, 5), -5, dtype=torch.int8) 7 | -------------------------------------------------------------------------------- /solves/1.5.10: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t_indx = torch.LongTensor(list(range(1, 65))).view(2, 32) 4 | -------------------------------------------------------------------------------- /solves/1.5.13: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t1 = torch.empty(3, 2, 10).fill_(5) 4 | t2 = torch.empty(1, 10, 1, 7, 1).fill_(-1) 5 | 6 | t1 = t1.unsqueeze(0) 7 | t2 = t2.squeeze() 8 | -------------------------------------------------------------------------------- /solves/1.5.2: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t1 = torch.arange(2, 11, 1, dtype=torch.float32) 4 | t2 = torch.arange(21, 25, 0.5, dtype=torch.float32).view(1, 8) 5 | t3 = torch.linspace(0,-1.8,10).reshape(5,2).float() 6 | -------------------------------------------------------------------------------- /solves/1.5.3: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | t1 = torch.linspace(3, 7, 3, dtype=torch.float32) 5 | t2 = torch.linspace(9, -9, 10, dtype=torch.float32) 6 | t3 = torch.linspace(5, -5, 5, dtype=torch.float32).view(5, 1) 7 | -------------------------------------------------------------------------------- /solves/1.5.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | t1 = torch.rand(21, dtype=torch.float32) 5 | t2 = torch.randint(-5, 6, (3, 5), dtype=torch.int16) 6 | t3 = torch.randn(64, 128, dtype=torch.float64) 7 | -------------------------------------------------------------------------------- /solves/1.5.8: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t1 = torch.empty(3, 10, 2, dtype=torch.float32).uniform_(-2, 10) 4 | t2 = torch.empty(123, dtype=torch.float32).random_(13, 20) 5 | t3 = torch.empty(8, 1024, dtype=torch.float32).normal_(23, 50) 6 | -------------------------------------------------------------------------------- /solves/1.6.1: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | tr = torch.empty(32, dtype=torch.int32) 4 | tr[0] = tr[-1] = -1 5 | -------------------------------------------------------------------------------- /solves/1.6.10: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | 6 | tr = torch.tensor(lst, dtype=torch.int32) 7 | t_indx = tr[[1, 1, 2, 1, 0]] 8 | -------------------------------------------------------------------------------- /solves/1.6.11: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | 6 | tr = torch.tensor(lst, dtype=torch.int32) 7 | t_res = tr[-2 <= tr] 8 | t_res = t_res[t_res <= 2] 9 | -------------------------------------------------------------------------------- /solves/1.6.2: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(float, input().split())) # список в программе не менять 4 | 5 | 6 | tr = torch.tensor(lst, dtype=torch.float32) 7 | tr_2 = tr[::2] 8 | print(*map(lambda _x: f"{_x.item():.1f}", tr_2)) 9 | -------------------------------------------------------------------------------- /solves/1.6.4: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | value = int(input()) # переменную value в программе не менять 4 | 5 | tr = torch.zeros(10, dtype=torch.int32) 6 | tr[1::2] = value 7 | print(*map(lambda _x: _x.item(), tr)) 8 | -------------------------------------------------------------------------------- /solves/1.6.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # списки lst_1 и lst_2 в программе не менять 4 | lst_1 = list(map(int, input().split())) 5 | lst_2 = list(map(int, input().split())) 6 | 7 | t1 = torch.tensor(lst_1, dtype=torch.int32) 8 | t2 = torch.tensor(lst_2, dtype=torch.int32) 9 | 10 | t2_size = t2.size(0) 11 | t1[1:1+t2_size] = t2 12 | print(*map(lambda _x: _x.item(), t1)) 13 | -------------------------------------------------------------------------------- /solves/1.6.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | tr3d = torch.tensor([[(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12)], 5 | [(10, 20, 30, 40), (50, 60, 70, 80), (90, 100, 110, 120)], 6 | [(-1, -2, -3, -4), (-5, -6, -7, -8), (-9, -10, -11, -12)]], 7 | dtype=torch.int16) 8 | 9 | tm = tr3d[1] 10 | -------------------------------------------------------------------------------- /solves/1.6.7: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | tr3d = torch.tensor(lst, dtype=torch.int16).view(3, 3, 4) 6 | t_res = tr3d[:, 1, :] 7 | -------------------------------------------------------------------------------- /solves/1.6.8: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | tr3d = torch.tensor(lst, dtype=torch.int32).view(3, 3, 4) 6 | t_res = tr3d[:, :, 1].permute(1, 0) 7 | -------------------------------------------------------------------------------- /solves/1.6.9: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | 6 | tr = torch.tensor(lst, dtype=torch.int32) 7 | t_pos = tr[tr > 0] 8 | -------------------------------------------------------------------------------- /solves/1.7.1: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | tr = torch.arange(-5, 2, 0.5) 4 | tr = (tr + 11.0) * 2.5 5 | -------------------------------------------------------------------------------- /solves/1.7.10: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | targets = torch.tensor(lst, dtype=torch.int32) 6 | n = max(lst)+1 7 | t_onehot = torch.eye(n, dtype=torch.int8)[lst] 8 | -------------------------------------------------------------------------------- /solves/1.7.2: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | tr = torch.tensor(lst, dtype=torch.int32) 6 | tr_res = tr[tr > 5] % 2 7 | -------------------------------------------------------------------------------- /solves/1.7.3: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # списки в программе не менять 4 | lst_1 = list(map(int, input().split())) 5 | lst_2 = list(map(int, input().split())) 6 | 7 | tr_1 = torch.tensor(lst_1, dtype=torch.int32) 8 | tr_2 = torch.tensor(lst_2, dtype=torch.int32) 9 | 10 | sz = tr_1.size(0) if (tr_1.size(0) < tr_2.size(0)) else tr_2.size(0) 11 | tr_1.resize_(sz) 12 | tr_2.resize_(sz) 13 | 14 | tr_1 = tr_1 ** tr_2 15 | -------------------------------------------------------------------------------- /solves/1.7.4: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(float, input().split())) # список lst в программе не менять 4 | 5 | t_mask = torch.ones(16, dtype=torch.int8) 6 | t_mask[1::2] = torch.empty(8, dtype=torch.int8).fill_(-1) 7 | 8 | tr = torch.tensor(lst, dtype=torch.float32) 9 | tr[:16] = tr[:16] * t_mask.float() 10 | print(*map(lambda _x: f"{_x.item():.1f}", tr)) 11 | -------------------------------------------------------------------------------- /solves/1.7.8: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) # список lst в программе не менять 4 | 5 | tr = torch.tensor(lst, dtype=torch.int32).view(2, -1) 6 | t_even = tr[0][tr[0] % 2 == 0] 7 | t_odd = tr[1][tr[1] % 2 != 0] 8 | -------------------------------------------------------------------------------- /solves/1.7.9: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | current_tm = int(input()) # переменную current_tm в программе не менять 4 | 5 | 6 | t_time = torch.empty(4, dtype=torch.int32) 7 | t_time[0] = current_tm 8 | t_time[1] = current_tm // 3600 9 | t_time[2] = current_tm % 3600 // 60 10 | t_time[3] = current_tm % 3600 % 60 11 | -------------------------------------------------------------------------------- /solves/1.8.1: -------------------------------------------------------------------------------- 1 | import torch 2 | t_rnd = torch.randint(-3, 5, (100, ), dtype=torch.float32) # значения этого тензора в программе не менять 3 | 4 | t_mean = t_rnd.mean().item() 5 | t_max = t_rnd[:5].max().item() 6 | t_min = t_rnd[-3:].min().item() 7 | -------------------------------------------------------------------------------- /solves/1.8.10: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # переменные lst и t_box_lst в программе не менять 4 | lst = list(map(float, input().split())) 5 | t_box_lst = torch.tensor(lst, dtype=torch.int32).view(-1, 3) 6 | 7 | t_box_vol = torch.prod(t_box_lst, dim=1) 8 | 9 | t_mean_vol = torch.mean(t_box_vol.float()).item() 10 | t_min_vol = torch.min(t_box_vol[t_box_vol > t_mean_vol]).item() 11 | t_max_vol = torch.max(t_box_vol[t_box_vol < t_mean_vol]).item() 12 | -------------------------------------------------------------------------------- /solves/1.8.2: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | lst = list(map(int, input().split())) 4 | 5 | targets = torch.tensor(lst, dtype=torch.int64) 6 | t_onehot = torch.eye(targets.max()+1)[targets] 7 | 8 | t_bags = t_onehot.sum(dim=0) 9 | pred = t_bags.argmax().item() 10 | -------------------------------------------------------------------------------- /solves/1.8.4: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | a, b, n = map(float, input().split()) # переменные a, b, n в программе не менять 4 | 5 | t_range = torch.linspace(a, b, int(n)) 6 | t_out = 1 / (1 + torch.exp(-t_range)) 7 | t_pred = torch.argmax(t_out).item() 8 | -------------------------------------------------------------------------------- /solves/1.8.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | t_out = torch.randn(10, dtype=torch.float32) * 10 - 5 # тензор t_out в программе не менять 4 | 5 | t_pred = torch.tensor([torch.exp(x) / torch.sum(torch.exp(t_out)) for x in t_out]) 6 | t_indx_min = torch.argmin(t_pred).item() 7 | -------------------------------------------------------------------------------- /solves/1.8.7: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # переменные lst и t_salaries в программе не менять 4 | lst = list(map(int, input().split())) 5 | t_salaries = torch.tensor(lst, dtype=torch.int32) 6 | 7 | t_median = torch.median(t_salaries).item() 8 | t_mean = torch.mean(t_salaries.float()).item() 9 | t_std = t_salaries.float().std().item() 10 | 11 | t_low = t_salaries[t_salaries < t_median] 12 | t_hi = t_salaries[t_salaries > t_median] 13 | -------------------------------------------------------------------------------- /solves/1.8.8: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # переменные lst и t_videos в программе не менять 4 | lst = list(map(int, input().split())) 5 | t_videos = torch.tensor(lst, dtype=torch.int32) 6 | 7 | t_median = torch.median(t_videos).item() 8 | t_mean = torch.mean(t_videos.float()).item() 9 | t_disp = t_videos.float().var().item() 10 | 11 | t_low_count = torch.sum(t_videos[t_videos < t_mean]).item() 12 | t_hi_count = torch.sum(t_videos[t_videos > t_mean]).item() 13 | -------------------------------------------------------------------------------- /solves/1.8.9: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # переменные lst и t_rect_lst в программе не менять 4 | lst = list(map(float, input().split())) 5 | t_rect_lst = torch.tensor(lst, dtype=torch.int32).view(-1, 2) 6 | 7 | t_rect_sq = torch.prod(t_rect_lst, dim=1) 8 | t_indx_min = torch.argmin(t_rect_sq) 9 | t_indx_max = torch.argmax(t_rect_sq) 10 | 11 | t_sum_sq = torch.sum(t_rect_sq).item() 12 | 13 | a, b = t_rect_lst[t_indx_min] 14 | t_min_per = (2 * (a + b)).item() 15 | 16 | a, b = t_rect_lst[t_indx_max] 17 | t_max_per = (2 * (a + b)).item() 18 | -------------------------------------------------------------------------------- /solves/1.9.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | w = list(map(float, input().split())) 4 | x = list(map(float, input().split())) 5 | 6 | W = torch.tensor([w[1:]], dtype=torch.float32) 7 | bias = torch.tensor(w[0], dtype=torch.float32) 8 | t_inp = torch.tensor(x, dtype=torch.float32) 9 | 10 | y = W @ t_inp + bias 11 | print(f"{y.item():.1f}") 12 | -------------------------------------------------------------------------------- /solves/1.9.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def sigma(x): 4 | return 1 / (1 + torch.exp(-x)) 5 | 6 | 7 | # все эти переменные в программе не менять 8 | W = torch.rand(3) * 10 - 5 9 | bias = torch.rand(1) * 100 - 50 10 | 11 | batch_size = 8 # размер мини-батча 12 | X = torch.empty(batch_size, 3).normal_(mean=1.0, std=4.0) 13 | 14 | predict = sigma(torch.matmul(X, W) + bias) 15 | -------------------------------------------------------------------------------- /solves/1.9.7: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def sigma(x): 4 | return 1 / (1 + torch.exp(-x)) 5 | 6 | 7 | # значения списков w и g в программе не менять 8 | w = list(map(float, input().split())) 9 | g = list(map(float, input().split())) 10 | 11 | t_inp = torch.rand(3) * 10 12 | 13 | W1 = torch.tensor(w, dtype=torch.float32).view(2, -1) 14 | bias1 = W1[:, 0] 15 | W1 = W1[:, 1:] 16 | W2 = torch.tensor([g[1:]], dtype=torch.float32) 17 | bias2 = torch.tensor(g[0], dtype=torch.float32) 18 | 19 | u = sigma(W1 @ t_inp + bias1) 20 | y = W2 @ u + bias2 21 | -------------------------------------------------------------------------------- /solves/15.11: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | tr = torch.IntTensor(list(range(-10, 10, 2))).view(5, 2) 5 | tr_t = tr.permute(1, 0) 6 | -------------------------------------------------------------------------------- /solves/2.1.5: -------------------------------------------------------------------------------- 1 | def func(x): 2 | return 0.5 * x + 0.2 * x ** 2 - 0.1 * x ** 3 3 | 4 | 5 | def df(x): 6 | return 0.5 + 0.4 * x - 0.3 * x ** 2 7 | 8 | 9 | eta = 0.01 10 | x = -4.0 11 | N = 200 12 | 13 | for _ in range(N): 14 | x = x - eta * df(x) 15 | -------------------------------------------------------------------------------- /solves/2.1.6.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size=12 5 | x = torch.rand(batch_size, 64) # тензор x в программе не менять 6 | 7 | block_bm_dp = nn.Sequential( 8 | nn.Linear(32, 32, bias=False), 9 | nn.ELU(), 10 | nn.BatchNorm1d(32), 11 | nn.Dropout(0.3), 12 | ) 13 | 14 | model = nn.Sequential() 15 | model.add_module('input', nn.Linear(64, 32)) 16 | model.add_module('act1', nn.ReLU()) 17 | model.add_module('block1', block_bm_dp) 18 | model.add_module('block2', block_bm_dp) 19 | model.add_module('block3', block_bm_dp) 20 | model.add_module('output', nn.Linear(32, 10)) 21 | 22 | model.eval() 23 | predict = model(x) 24 | -------------------------------------------------------------------------------- /solves/2.14.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.utils.data as data 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class FuncModel(nn.Module): 7 | def __init__(self, in_features): 8 | super().__init__() 9 | # модель однослойной полносвязной нейронной сети: 10 | # 1-й слой: число входов in_features, число нейронов 1 11 | self.layer = nn.Linear(in_features, 1) 12 | 13 | def forward(self, x): 14 | return self.layer(x) 15 | 16 | 17 | torch.manual_seed(1) 18 | 19 | epochs = 20 # число эпох обучения 20 | batch_size = 8 # размер батча 21 | N = 6 # порядок модели (N-1) 22 | 23 | torch.manual_seed(1) 24 | 25 | data_x = torch.arange(-4, 4, 0.01) 26 | data_y = 0.1 * data_x + 0.1 * data_x ** 2 - 0.5 * torch.sin(2 * data_x) + torch.cos(4 * data_x) 27 | data_x.unsqueeze_(-1) 28 | X = torch.cat([data_x ** _n for _n in range(N)], dim=1) 29 | ds = data.TensorDataset(X, data_y) 30 | 31 | d_train, d_val = data.random_split(ds, [0.8, 0.2]) 32 | train_data = data.DataLoader(d_train, batch_size=batch_size, shuffle=True) 33 | train_data_val = data.DataLoader(d_val, batch_size=len(d_val), shuffle=False) 34 | 35 | model = FuncModel(N) 36 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01, weight_decay=10) 37 | loss_func = nn.MSELoss() 38 | 39 | loss_lst_val = [] # список значений потерь при валидации 40 | loss_lst = [] # список значений потерь при обучении 41 | 42 | for _e in range(epochs): 43 | model.train() 44 | loss_mean = 0 45 | lm_count = 0 46 | 47 | for x_train, y_train in train_data: 48 | predict = model(x_train) 49 | loss = loss_func(predict, y_train.unsqueeze(-1)) 50 | 51 | optimizer.zero_grad() 52 | loss.backward() 53 | optimizer.step() 54 | 55 | lm_count += 1 56 | loss_mean = 1 / lm_count * loss.item() + (1 - 1 / lm_count) * loss_mean 57 | 58 | # валидация модели 59 | model.eval() 60 | x_val, y_val = next(iter(train_data_val)) 61 | 62 | with torch.no_grad(): 63 | p = model(x_val) 64 | loss = loss_func(p, y_val.unsqueeze(-1)) 65 | Q_val = loss.item() 66 | 67 | loss_lst.append(loss_mean) 68 | loss_lst_val.append(Q_val) 69 | 70 | model.eval() 71 | p = model(X) 72 | Q = loss_func(p, data_y.unsqueeze(-1)).item() 73 | -------------------------------------------------------------------------------- /solves/2.14.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь продолжайте программу 5 | class MyModel(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.layer1 = nn.Linear(13, 32) 9 | self.layer2 = nn.Linear(32, 16) 10 | self.layer3 = nn.Linear(16, 3) 11 | self.dropout1 = nn.Dropout(0.4) 12 | 13 | def forward(self, x): 14 | x = self.layer1(x) 15 | x = nn.functional.relu(x) 16 | x = self.layer2(x) 17 | x = nn.functional.relu(x) 18 | x = self.dropout1(x) 19 | x = self.layer3(x) 20 | return x 21 | 22 | 23 | torch.manual_seed(1) 24 | model = MyModel() 25 | model.eval() 26 | x = torch.rand(13) 27 | predict = model(x.unsqueeze(0)) 28 | -------------------------------------------------------------------------------- /solves/2.14.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.utils.data as data 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | 7 | class MyNN(nn.Module): 8 | def __init__(self): 9 | super().__init__() 10 | self.layer1 = nn.Linear(64, 64) 11 | self.layer2 = nn.Linear(64, 32) 12 | self.layer3 = nn.Linear(32, 10) 13 | self.dropout = nn.Dropout1d(0.3) 14 | 15 | def forward(self, x): 16 | x = self.layer1(x) 17 | x = torch.relu(x) 18 | x = self.dropout(x) 19 | 20 | x = self.layer2(x) 21 | x = torch.relu(x) 22 | x = self.dropout(x) 23 | 24 | x = self.layer3(x) 25 | return x 26 | 27 | 28 | model = MyNN() 29 | 30 | ds = data.TensorDataset(_global_var_data_x, _global_var_target) 31 | 32 | d_train, d_test = data.random_split(ds, [0.7, 0.3]) 33 | train_data = data.DataLoader(d_train, batch_size=16, shuffle=True) 34 | test_data = data.DataLoader(d_test, batch_size=len(d_test), shuffle=False) 35 | 36 | optimizer = optim.Adam(params = model.parameters(), lr = 0.01, weight_decay=0.1) 37 | loss_func = nn.CrossEntropyLoss() 38 | 39 | epochs = 2 40 | 41 | model.train() 42 | 43 | for _e in range(epochs): 44 | for x_train, y_train in train_data: 45 | predict = model(x_train) 46 | loss = loss_func(predict, y_train) 47 | 48 | optimizer.zero_grad() 49 | loss.backward() 50 | optimizer.step() 51 | 52 | model.eval() 53 | 54 | with torch.no_grad(): 55 | x_test, y_test = next(iter(test_data)) 56 | predict = model(x_test) 57 | predict = torch.argmax(predict, dim=1) 58 | 59 | Q = (predict == y_test).float().mean() 60 | Q = Q.item() 61 | -------------------------------------------------------------------------------- /solves/2.15.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class MyModel(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.layer1 = nn.Linear(10, 64, bias=False) 8 | self.layer2 = nn.Linear(64, 1) 9 | self.bm1 = nn.BatchNorm1d(64) 10 | self.act = nn.ReLU(inplace=True) 11 | 12 | def forward(self, x): 13 | x = self.layer1(x) 14 | x = self.act(x) 15 | x = self.bm1(x) 16 | x = self.layer2(x) 17 | return x 18 | 19 | 20 | model = MyModel() 21 | 22 | batch_size = 16 23 | x = torch.rand(batch_size, 10) # этот тензор в программе не менять 24 | 25 | model.eval() 26 | predict = model(x) 27 | -------------------------------------------------------------------------------- /solves/2.15.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.utils.data as data 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | 7 | class DigitNN(nn.Module): 8 | def __init__(self, input_dim): 9 | super().__init__() 10 | self.layer1 = nn.Linear(input_dim, 32, bias=False) 11 | self.layer2 = nn.Linear(32, 20, bias=False) 12 | self.layer3 = nn.Linear(20, 1) 13 | self.bm1 = nn.BatchNorm1d(32) 14 | self.bm2 = nn.BatchNorm1d(20) 15 | 16 | def forward(self, x): 17 | x = self.layer1(x) 18 | x = nn.functional.relu(x) 19 | x = self.bm1(x) 20 | x = self.layer2(x) 21 | x = nn.functional.relu(x) 22 | x = self.bm2(x) 23 | x = self.layer3(x) 24 | return x 25 | 26 | 27 | model = DigitNN(30) 28 | 29 | ds = data.TensorDataset(_global_var_data_x, _global_var_target.float()) 30 | d_train, d_test = data.random_split(ds, [0.7, 0.3]) 31 | train_data = data.DataLoader(d_train, batch_size=16, shuffle=True) 32 | test_data = data.DataLoader(d_test, batch_size=len(d_test), shuffle=False) 33 | 34 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) 35 | loss_func = nn.BCEWithLogitsLoss() 36 | epochs = 5 37 | model.train() 38 | 39 | for _e in range(epochs): 40 | for x_train, y_train in train_data: 41 | predict = model(x_train) 42 | loss = loss_func(predict, y_train.unsqueeze(-1)) 43 | 44 | optimizer.zero_grad() 45 | loss.backward() 46 | optimizer.step() 47 | 48 | # тестирование обученной НС 49 | model.eval() 50 | 51 | x_test, y_test = next(iter(test_data)) 52 | with torch.no_grad(): 53 | p = model(x_test) 54 | Q = torch.sum(torch.sign(p.flatten()) == (2 * y_test.flatten() - 1)).item() 55 | 56 | Q /= len(d_test) 57 | -------------------------------------------------------------------------------- /solves/2.16.1: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | model = nn.Sequential( 5 | nn.Linear(5, 16, bias=False), 6 | nn.ReLU(), 7 | nn.BatchNorm1d(16), 8 | nn.Linear(16, 3) 9 | ) 10 | 11 | batch_size=8 12 | x = torch.rand(batch_size, 5) # тензор x в программе не менять 13 | 14 | model.eval() 15 | predict = model(x) 16 | -------------------------------------------------------------------------------- /solves/2.16.10: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class MyModel(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.input = nn.Linear(64, 32) 8 | self.act = nn.ReLU(inplace=True) 9 | self.output = nn.Linear(32, 10) 10 | 11 | self.blocks = nn.ModuleDict({ 12 | 'block_1': nn.Sequential(nn.Linear(32, 32, bias=False), nn.ELU(), nn.BatchNorm1d(32)), 13 | 'block_2': nn.Sequential(nn.Linear(32, 32), nn.ReLU(), nn.Dropout1d(0.4)) 14 | }) 15 | 16 | def forward(self, x, type_block='block_1'): 17 | block = self.blocks[type_block] if type_block in self.blocks else self.blocks['block_1'] 18 | x = self.input(x) 19 | x = self.act(x) 20 | x = block(x) 21 | x = self.output(x) 22 | return x 23 | 24 | 25 | batch_size = 100 26 | x = torch.rand(batch_size, 64) 27 | 28 | model = MyModel() 29 | model.eval() 30 | 31 | predict = model(x, 'block_2') 32 | -------------------------------------------------------------------------------- /solves/2.16.2: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.utils.data as data 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | model = nn.Sequential( 7 | nn.Linear(3, 1), 8 | ) 9 | 10 | _x = torch.arange(-5, 5, 0.1) 11 | data_y = torch.sin(2 * _x) + 0.2 * torch.cos(10 * _x) + 0.1 * _x ** 2 12 | 13 | _x.unsqueeze_(-1) 14 | data_x = torch.cat([_x, _x ** 2, _x ** 3], dim=1) 15 | ds = data.TensorDataset(data_x, data_y) 16 | 17 | batch_size = 8 18 | train_data = data.DataLoader(ds, batch_size, shuffle=True) 19 | 20 | epochs = 20 # число эпох обучения 21 | 22 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01) 23 | loss_func = torch.nn.MSELoss() 24 | 25 | model.train() 26 | for _e in range(epochs): # итерации по эпохам 27 | for x_train, y_train in train_data: 28 | predict = model(x_train) 29 | loss = loss_func(predict, y_train.unsqueeze(-1)) 30 | 31 | optimizer.zero_grad() 32 | loss.backward() 33 | optimizer.step() 34 | 35 | model.eval() 36 | predict = model(data_x) 37 | Q = loss_func(predict, data_y.unsqueeze(-1)).item() 38 | -------------------------------------------------------------------------------- /solves/2.16.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size=16 5 | x = torch.rand(batch_size, 12) # тензор x в программе не менять 6 | 7 | model = nn.Sequential() 8 | model.add_module('layer1', nn.Linear(12, 24)) 9 | model.add_module('act1', nn.Tanh()) 10 | model.add_module('layer2', nn.Linear(24, 10)) 11 | model.add_module('act2', nn.Tanh()) 12 | model.add_module('out', nn.Linear(10, 1)) 13 | 14 | model.eval() 15 | predict = model(x) 16 | -------------------------------------------------------------------------------- /solves/2.16.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size=12 5 | x = torch.rand(batch_size, 64) # тензор x в программе не менять 6 | 7 | block_bm_dp = nn.Sequential( 8 | nn.Linear(32, 32, bias=False), 9 | nn.ELU(), 10 | nn.BatchNorm1d(32), 11 | nn.Dropout(0.3), 12 | ) 13 | 14 | model = nn.Sequential() 15 | model.add_module('input', nn.Linear(64, 32)) 16 | model.add_module('act1', nn.ReLU()) 17 | model.add_module('block1', block_bm_dp) 18 | model.add_module('block2', block_bm_dp) 19 | model.add_module('block3', block_bm_dp) 20 | model.add_module('output', nn.Linear(32, 10)) 21 | 22 | model.eval() 23 | predict = model(x) 24 | -------------------------------------------------------------------------------- /solves/2.16.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class MyModel(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.inp_2 = nn.Sequential( 8 | nn.Linear(12, 12), 9 | nn.Sigmoid() 10 | ) 11 | self.out = nn.Sequential( 12 | nn.Linear(12, 32), 13 | nn.ReLU(), 14 | nn.Linear(32, 1) 15 | ) 16 | self.inp_1 = nn.Sequential( 17 | nn.Linear(7, 12), 18 | nn.Tanh() 19 | ) 20 | 21 | def forward(self, a, b): 22 | x1 = self.inp_1(a) 23 | x2 = self.inp_2(b) 24 | return self.out(x1 + x2) 25 | 26 | 27 | batch_size=12 28 | a = torch.rand(batch_size, 7) # тензоры a, b в программе не менять 29 | b = torch.rand(batch_size, 12) 30 | 31 | model = MyModel() 32 | model.eval() 33 | predict = model(a, b) 34 | -------------------------------------------------------------------------------- /solves/2.16.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class MyModel(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.inp = nn.Sequential( 8 | nn.Linear(32, 64, bias=False), 9 | nn.LeakyReLU(), 10 | nn.BatchNorm1d(64) 11 | ) 12 | self.out_1 = nn.Sequential( 13 | nn.Linear(64, 10), 14 | nn.Sigmoid() 15 | ) 16 | self.out_2 = nn.Sequential( 17 | nn.Linear(64, 32), 18 | nn.Tanh() 19 | ) 20 | 21 | def forward(self, x): 22 | x = self.inp(x) 23 | y = self.out_1(x) 24 | t = self.out_2(x) 25 | return y, t 26 | 27 | 28 | batch_size=28 29 | x = torch.rand(batch_size, 32) # тензор x в программе не менять 30 | 31 | model = MyModel() 32 | model.eval() 33 | predict_y, predict_t = model(x) 34 | -------------------------------------------------------------------------------- /solves/2.16.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class DeepNetwork(nn.Module): 5 | def __init__(self, n_hidden_layers): 6 | super().__init__() 7 | self.layers = nn.ModuleList( 8 | [nn.Linear(32, 32) for _ in range(n_hidden_layers)] 9 | ) 10 | self.input = nn.Linear(11, 32, bias=False) 11 | self.output = nn.Linear(32, 5) 12 | 13 | def forward(self, x): 14 | x = self.input(x) 15 | x = torch.relu(x) 16 | for layer in self.layers: 17 | x = layer(x) 18 | x = torch.relu(x) 19 | x = self.output(x) 20 | return x 21 | 22 | 23 | n = int(input()) # это значение в программе не менять 24 | 25 | batch_size = 18 26 | x = torch.rand(batch_size, 11) # тензор x в программе не менять 27 | 28 | model = DeepNetwork(n) 29 | model.eval() 30 | predict = model(x) 31 | -------------------------------------------------------------------------------- /solves/3.1.10: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | H, W = 24, 24 # размеры карты признаков: H - число строк; W - число столбцов 4 | kernel_size = (3, 2) # размер окна для Pooling по осям (H, W) 5 | stride = (2, 2) # шаг смещения окна по осям (H, W) 6 | padding = 1 # размер нулевой области вокруг карты признаков (число строк и столбцов с кажой стороны) 7 | 8 | H_out = int((H + 2 * padding - kernel_size[0]) / stride[0] + 1) 9 | W_out = int((W + 2 * padding - kernel_size[1]) / stride[1] + 1) 10 | 11 | x = torch.rand((H, W)) # карта признаков (в программе не менять) 12 | 13 | # здесь продолжайте программу 14 | x_img_p = torch.zeros((H + 2*padding, W + 2*padding)) 15 | x_img_p[padding:H+padding, padding:W+padding] = x 16 | 17 | res_pool = torch.empty(H_out, W_out, dtype=torch.float32) 18 | for i in range(0, H_out): 19 | for j in range(0, W_out): 20 | res_pool[i, j] = torch.mean(x_img_p[i*stride[0]:kernel_size[0]+i*stride[0], 21 | j*stride[1]:kernel_size[1]+j*stride[1]]) 22 | -------------------------------------------------------------------------------- /solves/3.1.3: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | C = 3 # число каналов 4 | H, W = 16, 16 # размеры изображения: H - число строк; W - число столбцов 5 | kernel_size = (3, 3) # размер ядра по осям (H, W) 6 | stride = (1, 1) # шаг смещения ядра по осям (H, W) 7 | padding = 0 # размер нулевой области вокруг изображения (число строк и столбцов с кажой стороны) 8 | 9 | H_out = int((H + 2 * padding - kernel_size[0]) / stride[0] + 1) 10 | W_out = int((W + 2 * padding - kernel_size[1]) / stride[1] + 1) 11 | 12 | x_img = torch.randint(0, 255, (C, H, W), dtype=torch.float32) # тензоры x_img и kernel 13 | kernel = torch.rand((C, ) + kernel_size) # в программе не менять 14 | 15 | predict = torch.empty(H_out, W_out, dtype=torch.float32) 16 | 17 | # здесь продолжайте программу 18 | for i in range(0, H_out): 19 | for j in range(0, W_out): 20 | predict[i, j] = torch.sum(x_img[:, i*stride[0]:kernel_size[0]+i*stride[0], 21 | j*stride[1]:kernel_size[1]+j*stride[1]] * kernel) 22 | -------------------------------------------------------------------------------- /solves/3.1.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | C = 2 # число каналов 4 | H, W = 16, 12 # размеры изображения: H - число строк; W - число столбцов 5 | kernel_size = (5, 3) # размер ядра по осям (H, W) 6 | stride = (1, 1) # шаг смещения ядра по осям (H, W) 7 | padding = 1 # размер нулевой области вокруг изображения (число строк и столбцов с кажой стороны) 8 | 9 | H_out = int((H + 2 * padding - kernel_size[0]) / stride[0] + 1) 10 | W_out = int((W + 2 * padding - kernel_size[1]) / stride[1] + 1) 11 | 12 | x_img = torch.randint(0, 255, (C, H, W), dtype=torch.float32) # тензоры x_img и kernel 13 | kernel = torch.rand((C, ) + kernel_size) # в программе не менять 14 | 15 | # здесь продолжайте программу 16 | x_img_p = torch.zeros((C, H + 2*padding, W + 2*padding)) 17 | x_img_p[:, padding:H+padding, padding:W+padding] = x_img 18 | 19 | predict = torch.empty(H_out, W_out, dtype=torch.float32) 20 | for i in range(0, H_out): 21 | for j in range(0, W_out): 22 | predict[i, j] = torch.sum(x_img_p[:, i*stride[0]:kernel_size[0]+i*stride[0], 23 | j*stride[1]:kernel_size[1]+j*stride[1]] * kernel) 24 | -------------------------------------------------------------------------------- /solves/3.1.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | C = 3 # число каналов 4 | H, W = 16, 12 # размеры изображения: H - число строк; W - число столбцов 5 | kernel_size = (5, 3) # размер ядра по осям (H, W) 6 | stride = (1, 2) # шаг смещения ядра по осям (H, W) 7 | padding = 1 # размер нулевой области вокруг изображения (число строк и столбцов с кажой стороны) 8 | 9 | bias = torch.rand(1) # смещение для фильтра (ядра), коэффициент w0 10 | act = torch.tanh # функция активации нейронов (результатов свертки) 11 | 12 | H_out = int((H + 2 * padding - kernel_size[0]) / stride[0] + 1) 13 | W_out = int((W + 2 * padding - kernel_size[1]) / stride[1] + 1) 14 | 15 | x_img = torch.randint(0, 255, (C, H, W), dtype=torch.float32) # тензоры x_img и kernel 16 | kernel = torch.rand((C, ) + kernel_size) # в программе не менять 17 | 18 | # здесь продолжайте программу 19 | x_img_p = torch.zeros((C, H + 2*padding, W + 2*padding)) 20 | x_img_p[:, padding:H+padding, padding:W+padding] = x_img 21 | 22 | predict = torch.empty(H_out, W_out, dtype=torch.float32) 23 | for i in range(0, H_out): 24 | for j in range(0, W_out): 25 | predict[i, j] = torch.sum(x_img_p[:, i*stride[0]:kernel_size[0]+i*stride[0], 26 | j*stride[1]:kernel_size[1]+j*stride[1]] * kernel) 27 | 28 | predict = act(predict) 29 | -------------------------------------------------------------------------------- /solves/3.1.9: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | H, W = 16, 12 # размеры карты признаков: H - число строк; W - число столбцов 4 | kernel_size = (2, 2) # размер окна для Pooling по осям (H, W) 5 | stride = (2, 2) # шаг смещения окна по осям (H, W) 6 | padding = 0 # размер нулевой области вокруг карты признаков (число строк и столбцов с кажой стороны) 7 | 8 | H_out = int((H + 2 * padding - kernel_size[0]) / stride[0] + 1) 9 | W_out = int((W + 2 * padding - kernel_size[1]) / stride[1] + 1) 10 | 11 | x = torch.rand((H, W)) # карта признаков (в программе не менять) 12 | 13 | # здесь продолжайте программу 14 | res_pool = torch.empty(H_out, W_out, dtype=torch.float32) 15 | for i in range(0, H_out): 16 | for j in range(0, W_out): 17 | res_pool[i, j] = torch.max(x[i*stride[0]:kernel_size[0]+i*stride[0], 18 | j*stride[1]:kernel_size[1]+j*stride[1]]) 19 | -------------------------------------------------------------------------------- /solves/3.10.3: -------------------------------------------------------------------------------- 1 | import torch 2 | from torchvision import models 3 | import torchvision.transforms.functional as TF 4 | 5 | # тензор x и img_pil в программе не менять 6 | x = torch.randint(0, 255, (3, 100, 100), dtype=torch.float32) 7 | img_pil = TF.to_pil_image(x) 8 | 9 | resnet_weights = models.ResNet34_Weights.DEFAULT 10 | transforms = resnet_weights.transforms() 11 | 12 | model = models.resnet34() 13 | model.requires_grad_(False) 14 | model.eval() 15 | 16 | inp_img = transforms(img_pil) 17 | results = model(inp_img.unsqueeze(0)) 18 | -------------------------------------------------------------------------------- /solves/3.10.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torchvision import models 4 | import torchvision.transforms.functional as TF 5 | 6 | # тензор x и img_pil в программе не менять 7 | x = torch.randint(0, 255, (3, 250, 250), dtype=torch.float32) 8 | img_pil = TF.to_pil_image(x) 9 | 10 | # здесь продолжайте программу 11 | resnet_weights = models.ResNet50_Weights.DEFAULT 12 | transforms = resnet_weights.transforms() 13 | 14 | model = models.resnet50() 15 | model.requires_grad_(False) 16 | model.fc = nn.Sequential( 17 | nn.Linear(512*4, 128), 18 | nn.ReLU(inplace=True), 19 | nn.Linear(128, 10), 20 | ) 21 | model.fc.requires_grad_(False) 22 | model.eval() 23 | 24 | inp_img = transforms(img_pil) 25 | predict = model(inp_img.unsqueeze(0)) 26 | -------------------------------------------------------------------------------- /solves/3.10.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torchvision import models 4 | import torchvision.transforms.functional as TF 5 | 6 | # тензор x и img_pil в программе не менять 7 | x = torch.randint(0, 255, (3, 250, 250), dtype=torch.float32) 8 | img_pil = TF.to_pil_image(x) 9 | 10 | # здесь продолжайте программу 11 | resnet_weights = models.ResNet50_Weights.DEFAULT 12 | transforms = resnet_weights.transforms() 13 | 14 | model = models.resnet50() 15 | model.fc = nn.Sequential( 16 | nn.Linear(512*4, 100, bias=False), 17 | nn.ReLU(inplace=True), 18 | nn.BatchNorm1d(100), 19 | nn.Linear(100, 10), 20 | ) 21 | model.eval() 22 | 23 | inp_img = transforms(img_pil) 24 | with torch.no_grad(): 25 | predict = model(inp_img.unsqueeze(0)) 26 | -------------------------------------------------------------------------------- /solves/3.10.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.utils.data as data 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | 7 | class FuncModel(nn.Module): 8 | def __init__(self): 9 | super().__init__() 10 | # модель однослойной полносвязной нейронной сети: 11 | # 1-й слой: число входов 5 (x, x^2, x^3, x^4, x^5), число нейронов 1 12 | self.layer = nn.Linear(5, 1) 13 | 14 | def forward(self, x): 15 | x.unsqueeze_(-1) 16 | xx = torch.cat([x, x ** 2, x ** 3, x ** 4, x ** 5], dim=1) 17 | y = self.layer(xx) 18 | return y 19 | 20 | 21 | torch.manual_seed(1) 22 | 23 | model = FuncModel() 24 | 25 | epochs = 20 # число эпох обучения 26 | batch_size = 16 # размер батча 27 | 28 | data_x = torch.arange(-5, 5, 0.05) #тензоры data_x, data_y не менять 29 | data_y = torch.sin(2 * data_x) - 0.3 * torch.cos(8 * data_x) + 0.1 * data_x ** 2 30 | 31 | ds = data.TensorDataset(data_x, data_y) # создание dataset 32 | d_train, d_val = data.random_split(ds, [0.7, 0.3]) 33 | train_data = data.DataLoader(d_train, batch_size=batch_size, shuffle=True) 34 | train_data_val = data.DataLoader(d_val, batch_size=batch_size, shuffle=False) 35 | 36 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01) 37 | loss_func = nn.MSELoss() 38 | 39 | loss_lst_val = [] # список значений потерь при валидации 40 | loss_lst = [] # список значений потерь при обучении 41 | 42 | for _e in range(epochs): 43 | model.train() 44 | loss_mean = 0 45 | lm_count = 0 46 | 47 | for x_train, y_train in train_data: 48 | predict = model(x_train) 49 | loss = loss_func(predict, y_train.unsqueeze(-1)) 50 | 51 | optimizer.zero_grad() 52 | loss.backward() 53 | optimizer.step() 54 | 55 | lm_count += 1 56 | loss_mean = 1 / lm_count * loss.item() + (1 - 1 / lm_count) * loss_mean 57 | 58 | # валидация модели 59 | model.eval() 60 | Q_val = 0 61 | count_val = 0 62 | 63 | for x_val, y_val in train_data_val: 64 | with torch.no_grad(): 65 | p = model(x_val) 66 | loss = loss_func(p, y_val.unsqueeze(-1)) 67 | Q_val += loss.item() 68 | count_val += 1 69 | 70 | Q_val /= count_val 71 | 72 | loss_lst.append(loss_mean) 73 | loss_lst_val.append(Q_val) 74 | 75 | model.eval() 76 | p = model(data_x) 77 | Q = loss_func(p, data_y.unsqueeze(-1)).item() 78 | -------------------------------------------------------------------------------- /solves/3.11.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | result = torch.IntTensor([[-1, 2, -1, 2], [ 1, -2, 1, -2], [-2, 4, -2, 4], [ 2, -4, 2, -4]]) 4 | -------------------------------------------------------------------------------- /solves/3.11.6: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | result = torch.IntTensor([[ -1., 2., 1., 4., 6.], 4 | [ 1., -2., 3., -4., 2.], 5 | [ 0., -5., -15., 10., 20.], 6 | [ -3., 6., 1., -8., 4.], 7 | [ 9., -3., -24., 4., 16.]]) 8 | 9 | -------------------------------------------------------------------------------- /solves/3.11.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size = 4 5 | H, W = 16, 24 6 | x = torch.rand(batch_size, 3, H, W) # тензор x в программе не менять 7 | 8 | # здесь продолжайте программу 9 | layer = nn.ConvTranspose2d(3, 2, 3, 2) 10 | out = layer(x) 11 | -------------------------------------------------------------------------------- /solves/3.12.1: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class BlockEncode1(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.block = nn.Sequential( 9 | nn.Conv2d(3, 16, 3, padding=1, bias=False), 10 | nn.ReLU(inplace=True), 11 | nn.BatchNorm2d(16), 12 | nn.Conv2d(16, 16, 3, padding=1, bias=False), 13 | nn.ReLU(inplace=True), 14 | nn.BatchNorm2d(16), 15 | ) 16 | 17 | self.mp = nn.MaxPool2d(2) 18 | 19 | def forward(self, x): 20 | y1 = self.block(x) 21 | y2 = self.mp(y1) 22 | return y2, y1 23 | 24 | 25 | model = BlockEncode1() 26 | model.eval() 27 | 28 | x = torch.rand(3, 128, 128) 29 | with torch.no_grad(): 30 | out1, out2 = model(x.unsqueeze(0)) 31 | 32 | -------------------------------------------------------------------------------- /solves/3.12.2: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class BlockDecode(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.tr = nn.ConvTranspose2d(32, 16, 2, 2) 8 | 9 | self.block = nn.Sequential( 10 | nn.Conv2d(32, 16, 3, padding=1, bias=False), 11 | nn.ReLU(inplace=True), 12 | nn.BatchNorm2d(16), 13 | nn.Conv2d(16, 16, 3, padding=1, bias=False), 14 | nn.ReLU(inplace=True), 15 | nn.BatchNorm2d(16), 16 | ) 17 | 18 | self.out = nn.Conv2d(16, 1, 1) 19 | 20 | def forward(self, x, y): 21 | x = self.tr(x) 22 | x = torch.cat([x, y], dim=1) 23 | x = self.block(x) 24 | return self.out(x) 25 | 26 | 27 | # тензоры x, y в программе не менять 28 | batch_size = 2 29 | x = torch.rand(batch_size, 32, 32, 32) 30 | y = torch.rand(batch_size, 16, 64, 64) 31 | 32 | model = BlockDecode() 33 | model.eval() 34 | 35 | with torch.no_grad(): 36 | out = model(x, y) 37 | -------------------------------------------------------------------------------- /solves/3.2.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | C = 5 # число каналов 5 | H, W = 32, 24 # размеры изображения: H - число строк; W - число столбцов 6 | kernel_size = (5, 3) # размер ядра по осям (H, W) 7 | stride = (1, 1) # шаг смещения ядра по осям (H, W) 8 | padding = 0 # размер нулевой области вокруг изображения (число строк и столбцов с кажой стороны) 9 | 10 | 11 | x = torch.randint(0, 255, (C, H, W), dtype=torch.float32) # тензор x в программе не менять 12 | 13 | # здесь продолжайте программу 14 | layer_nn = nn.Conv2d(in_channels=C, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding) 15 | t_out = layer_nn(x.unsqueeze(0)) 16 | -------------------------------------------------------------------------------- /solves/3.2.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | C = 3 # число каналов 5 | H, W = 17, 19 # размеры изображения: H - число строк; W - число столбцов 6 | kernel_size = (5, 5) # размер ядра по осям (H, W) 7 | stride = (1, 1) # шаг смещения ядра по осям (H, W) 8 | padding = 2 # размер нулевой области вокруг изображения (число строк и столбцов с каждой стороны) 9 | 10 | batch_size = 8 11 | x = torch.randint(0, 255, (batch_size, C, H, W), dtype=torch.float32) # тензор x в программе не менять 12 | 13 | # здесь продолжайте программу 14 | layer_nn = nn.Conv2d(in_channels=C, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding) 15 | t_out = layer_nn(x) 16 | -------------------------------------------------------------------------------- /solves/3.2.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | H, W = 32, 25 5 | x = torch.randint(0, 255, (H, W), dtype=torch.float32) # тензор x в программе не менять 6 | 7 | # здесь продолжайте программу 8 | lr = nn.MaxPool2d((3, 2)) 9 | t_out = lr(x.view(1, 1, H, W)) 10 | -------------------------------------------------------------------------------- /solves/3.2.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | C = 24 # число входных каналов 5 | H, W = 128, 86 # размеры карт признаков 6 | x = torch.randint(0, 255, (C, H, W), dtype=torch.float32) # тензор x в программе не менять 7 | 8 | # здесь продолжайте программу 9 | lr = nn.MaxPool2d((3, 4), (2, 1)) 10 | t_out = lr(x.view(1, C, H, W)) 11 | -------------------------------------------------------------------------------- /solves/3.2.9: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size = 8 # размер батча 5 | C = 24 # число входных каналов 6 | H, W = 128, 86 # размеры карт признаков 7 | x = torch.randint(0, 255, (batch_size, C, H, W), dtype=torch.float32) # тензор x в программе не менять 8 | 9 | # здесь продолжайте программу 10 | lr = nn.AvgPool2d(3, 2) 11 | t_out = lr(x) 12 | -------------------------------------------------------------------------------- /solves/3.3.2: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | x = torch.rand(1, 16, 16) # тензор x в программе не менять 5 | 6 | model = nn.Sequential( 7 | nn.Conv2d(1, 32, 3, padding=1), 8 | nn.ReLU(inplace=True), 9 | nn.MaxPool2d(2), 10 | nn.Conv2d(32, 16, 3, padding=1), 11 | nn.ReLU(inplace=True), 12 | nn.MaxPool2d(2), 13 | nn.Flatten(), 14 | nn.Linear(256, 5) 15 | ) 16 | 17 | predict = model(x.unsqueeze(0)) 18 | -------------------------------------------------------------------------------- /solves/3.3.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.utils.data as data 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | ds = data.TensorDataset(_global_var_data_x, _global_var_target) # обучающие данные 7 | 8 | model = nn.Sequential( 9 | nn.Conv2d(1, 32, 3, padding=1), 10 | nn.ReLU(inplace=True), 11 | nn.MaxPool2d(2), 12 | nn.Conv2d(32, 16, 3, padding=1), 13 | nn.ReLU(inplace=True), 14 | nn.MaxPool2d(2), 15 | nn.Flatten(), 16 | nn.Linear(64, 10) 17 | ) 18 | 19 | ds = data.TensorDataset(_global_var_data_x, _global_var_target) 20 | test_data = data.DataLoader(ds, batch_size=len(ds), shuffle=False) 21 | 22 | model.load_state_dict(_global_model_state) 23 | 24 | model.eval() 25 | 26 | x_test, y_test = next(iter(test_data)) 27 | with torch.no_grad(): 28 | p = model(x_test) 29 | p = torch.argmax(p, dim=1) 30 | Q = torch.sum(p.flatten() == y_test.flatten()).item() 31 | 32 | Q /= len(ds) 33 | -------------------------------------------------------------------------------- /solves/3.3.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс ImageNormalize 5 | class ImageNormalize(nn.Module): 6 | def forward(self, x): 7 | a, b = torch.min(x), torch.max(x) 8 | return (x - a) / (b - a) 9 | 10 | 11 | # генерация образов выборки 12 | total = 100 # размер выборки 13 | H, W = 32, 32 # размер изображений 14 | circle = torch.tensor([[0, 0, 0, 255, 255, 255, 255, 0, 0, 0], 15 | [0, 255, 255, 255, 255, 255, 255, 255, 255, 0], 16 | [0, 255, 255, 255, 255, 255, 255, 255, 255, 0], 17 | [255, 255, 255, 255, 255, 255, 255, 255, 255, 255], 18 | [255, 255, 255, 255, 255, 255, 255, 255, 255, 255], 19 | [255, 255, 255, 255, 255, 255, 255, 255, 255, 255], 20 | [255, 255, 255, 255, 255, 255, 255, 255, 255, 255], 21 | [0, 255, 255, 255, 255, 255, 255, 255, 255, 0], 22 | [0, 255, 255, 255, 255, 255, 255, 255, 255, 0], 23 | [0, 0, 0, 255, 255, 255, 255, 0, 0, 0]], dtype=torch.float32) 24 | Hc, Wc = circle.size() 25 | 26 | 27 | def _generate_img(_H, _W, _Hc, _Wc, _x, _y, _circle, _tr): # вспомогательная функция 28 | img = torch.rand(_H, _W) * 20 29 | img[_x:_x+_Hc, _y:_y+Wc] = _circle 30 | return _tr(img.view(1, 1, _H, _W)) 31 | 32 | 33 | transform = ImageNormalize() 34 | data_y = torch.tensor([(torch.randint(0, H-Hc, (1, )), torch.randint(0, W-Wc, (1, ))) for _ in range(total)]) 35 | data_x = torch.cat([_generate_img(H, W, Hc, Wc, _x[0], _x[1], circle, transform) for _x in data_y], dim=0) 36 | 37 | # создайте модели и пропустите через нее выборку data_x 38 | torch.manual_seed(1) 39 | 40 | model = nn.Sequential( 41 | nn.Conv2d(1, 16, 5, padding=2), 42 | nn.ReLU(inplace=True), 43 | nn.MaxPool2d(2), 44 | nn.Conv2d(16, 32, 3, padding=1), 45 | nn.ReLU(inplace=True), 46 | nn.MaxPool2d(2), 47 | nn.Flatten(), 48 | nn.Linear(2048, 2) 49 | ) 50 | 51 | loss_func = nn.MSELoss() 52 | 53 | model.eval() 54 | p = model(data_x) 55 | Q = loss_func(p, data_y.float()) 56 | -------------------------------------------------------------------------------- /solves/3.3.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class MyModel(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.net1 = nn.Sequential( 9 | nn.Conv2d(3, 16, 3, padding=1), 10 | nn.ReLU(inplace=True), 11 | nn.MaxPool2d(2), 12 | nn.Conv2d(16, 32, 3, padding=1), 13 | nn.ReLU(inplace=True), 14 | nn.MaxPool2d(2), 15 | nn.Flatten(), 16 | ) 17 | 18 | self.net2 = nn.Sequential( 19 | nn.Linear(12, 64, bias=False), 20 | nn.Sigmoid(), 21 | nn.BatchNorm1d(64), 22 | ) 23 | 24 | self.output = nn.Linear(576, 10) 25 | 26 | def forward(self, x1, x2): 27 | x1 = self.net1(x1) 28 | x2 = self.net2(x2) 29 | x = torch.cat([x1, x2], dim=1) 30 | x = self.output(x) 31 | return x 32 | 33 | 34 | # тензоры data_img, data_x в программе не менять 35 | batch_size = 32 36 | data_img = torch.rand(batch_size, 3, 16, 16) 37 | data_x = torch.rand(batch_size, 12) 38 | 39 | # здесь продолжайте программу 40 | model = MyModel() 41 | model.eval() 42 | predict = model(data_img, data_x) 43 | -------------------------------------------------------------------------------- /solves/3.4.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision.transforms as tfs 3 | import torch.nn as nn 4 | 5 | # здесь продолжайте программу 6 | model = nn.Sequential( 7 | nn.Conv2d(3, 64, 3, padding=1), 8 | nn.ReLU(inplace=True), 9 | nn.Conv2d(64, 64, 3, padding=1), 10 | nn.ReLU(inplace=True), 11 | nn.MaxPool2d(2), 12 | nn.Conv2d(64, 128, 3, padding=1), 13 | nn.ReLU(inplace=True), 14 | nn.Conv2d(128, 128, 3, padding=1), 15 | nn.ReLU(inplace=True), 16 | nn.MaxPool2d(2), 17 | ) 18 | 19 | tr = tfs.Compose([tfs.Resize(224), tfs.ToTensor()]) 20 | img = tr(img_pil) 21 | 22 | model.eval() 23 | out = model(img.unsqueeze(0)) 24 | -------------------------------------------------------------------------------- /solves/3.4.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | # здесь продолжайте программу 7 | model = nn.Sequential( 8 | nn.Conv2d(1, 32, 5, padding=2), 9 | nn.ReLU(inplace=True), 10 | nn.MaxPool2d(2), 11 | nn.Conv2d(32, 16, 3, padding=1), 12 | nn.ReLU(inplace=True), 13 | nn.MaxPool2d(2), 14 | nn.Flatten(), 15 | nn.Linear(1024, 1) 16 | ) 17 | 18 | d_train, d_test = data.random_split(ds, [0.7, 0.3]) 19 | train_data = data.DataLoader(d_train, batch_size=16, shuffle=True) 20 | test_data = data.DataLoader(d_test, batch_size=len(d_test), shuffle=False) 21 | 22 | optimizer = optim.Adam(params=model.parameters(), lr=0.01, weight_decay=0.01) 23 | loss_func = nn.BCEWithLogitsLoss() 24 | epochs = 2 25 | model.train() 26 | 27 | for _e in range(epochs): 28 | for x_train, y_train in train_data: 29 | predict = model(x_train) 30 | loss = loss_func(predict, y_train.unsqueeze(-1)) 31 | 32 | optimizer.zero_grad() 33 | loss.backward() 34 | optimizer.step() 35 | 36 | # тестирование обученной НС 37 | model.eval() 38 | x_test, y_test = next(iter(test_data)) 39 | with torch.no_grad(): 40 | p = model(x_test) 41 | Q = torch.sum(p.sign().flatten() == (2 * y_test.flatten() - 1)).item() 42 | 43 | Q = Q / len(d_test) 44 | -------------------------------------------------------------------------------- /solves/3.7.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class BasicBlock(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.bb = nn.Sequential( 9 | nn.Conv2d(64, 64, 3, padding=1, bias=False), 10 | nn.BatchNorm2d(64), 11 | nn.ReLU(inplace=True), 12 | nn.Conv2d(64, 64, 3, padding=1, bias=False), 13 | nn.BatchNorm2d(64), 14 | nn.ReLU(inplace=True), 15 | ) 16 | 17 | def forward(self, x): 18 | f = self.bb(x) 19 | return f + x 20 | 21 | 22 | batch_size = 8 23 | x = torch.rand(batch_size, 64, 32, 32) # тензор x в программе не менять 24 | 25 | # здесь продолжайте программу 26 | model_bb = BasicBlock() 27 | model_bb.eval() 28 | 29 | y = model_bb(x) 30 | -------------------------------------------------------------------------------- /solves/3.7.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class BottleneckBlock(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.bn = nn.Sequential( 9 | nn.Conv2d(256, 64, 1, padding=0, bias=False), 10 | nn.BatchNorm2d(64), 11 | nn.ReLU(inplace=True), 12 | nn.Conv2d(64, 64, 3, padding=1, bias=False), 13 | nn.BatchNorm2d(64), 14 | nn.ReLU(inplace=True), 15 | nn.Conv2d(64, 256, 1, padding=0, bias=False), 16 | nn.BatchNorm2d(256), 17 | ) 18 | 19 | def forward(self, x): 20 | f = self.bn(x) 21 | return nn.functional.relu(f + x) 22 | 23 | 24 | batch_size = 4 25 | x = torch.rand(batch_size, 256, 16, 16) # тензор x в программе не менять 26 | 27 | # здесь продолжайте программу 28 | model_bn = BottleneckBlock() 29 | model_bn.eval() 30 | 31 | y = model_bn(x) 32 | -------------------------------------------------------------------------------- /solves/3.7.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class BottleneckBlock(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.bn = nn.Sequential( 9 | nn.Conv2d(128, 64, 1, padding=0, bias=False), 10 | nn.BatchNorm2d(64), 11 | nn.ReLU(inplace=True), 12 | nn.Conv2d(64, 64, 3, padding=1, bias=False), 13 | nn.BatchNorm2d(64), 14 | nn.ReLU(inplace=True), 15 | nn.Conv2d(64, 256, 1, padding=0, bias=False), 16 | nn.BatchNorm2d(256), 17 | ) 18 | 19 | self.sc = nn.Sequential( 20 | nn.Conv2d(128, 256, 1, bias=False), 21 | nn.BatchNorm2d(256), 22 | ) 23 | 24 | def forward(self, x): 25 | f = self.bn(x) 26 | x = self.sc(x) 27 | return nn.functional.relu(f + x) 28 | 29 | 30 | batch_size = 4 31 | x = torch.rand(batch_size, 128, 16, 16) # тензор x в программе не менять 32 | 33 | # здесь продолжайте программу 34 | model_bn = BottleneckBlock() 35 | model_bn.eval() 36 | 37 | y = model_bn(x) 38 | -------------------------------------------------------------------------------- /solves/3.8.2: -------------------------------------------------------------------------------- 1 | # в каждом списке 4 значения - количество блоков (по порядку) 2 | # в слоях Layer1, Layer2, Layer3 и Layer4 3 | resnet18 = [2, 2, 2, 2] 4 | resnet34 = [3, 4, 6, 3] 5 | resnet50 = [3, 4, 6, 3] 6 | -------------------------------------------------------------------------------- /solves/3.8.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class BasicBlock1(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.bn1 = nn.Sequential( 8 | nn.Conv2d(64, 128, 3, padding=1, stride=2, bias=False), 9 | nn.BatchNorm2d(128), 10 | nn.ReLU(inplace=True), 11 | nn.Conv2d(128, 128, 3, padding=1, bias=False), 12 | nn.BatchNorm2d(128), 13 | ) 14 | 15 | self.sc1 = nn.Sequential( 16 | nn.Conv2d(64, 128, 1, stride=2, bias=False), 17 | nn.BatchNorm2d(128), 18 | ) 19 | 20 | def forward(self, x): 21 | f = self.bn1(x) 22 | x = self.sc1(x) 23 | return nn.functional.relu(f + x) 24 | 25 | 26 | class BasicBlock2(nn.Module): 27 | def __init__(self): 28 | super().__init__() 29 | self.bn2 = nn.Sequential( 30 | nn.Conv2d(128, 128, 3, padding=1, bias=False), 31 | nn.BatchNorm2d(128), 32 | nn.ReLU(inplace=True), 33 | nn.Conv2d(128, 128, 3, padding=1, bias=False), 34 | nn.BatchNorm2d(128), 35 | ) 36 | 37 | def forward(self, x): 38 | f = self.bn2(x) 39 | return nn.functional.relu(f + x) 40 | 41 | 42 | batch_size = 8 43 | x = torch.rand(batch_size, 3, 32, 32) 44 | 45 | model = nn.Sequential( 46 | nn.Conv2d(3, 64, 7, padding=3, stride=2, bias=False), 47 | nn.BatchNorm2d(64), 48 | nn.MaxPool2d(3, stride=2, padding=1), 49 | BasicBlock1(), 50 | BasicBlock2(), 51 | nn.AdaptiveAvgPool2d(1), 52 | nn.Flatten(), 53 | nn.Linear(128, 10) 54 | ) 55 | 56 | model.eval() 57 | predict = model(x) 58 | -------------------------------------------------------------------------------- /solves/3.8.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте классы BottleneckBlock1 и BottleneckBlock2 5 | class BottleneckBlock1(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.bn = nn.Sequential( 9 | nn.Conv2d(64, 64, 1, padding=0, bias=False), 10 | nn.BatchNorm2d(64), 11 | nn.ReLU(inplace=True), 12 | nn.Conv2d(64, 64, 3, padding=1, bias=False), 13 | nn.BatchNorm2d(64), 14 | nn.ReLU(inplace=True), 15 | nn.Conv2d(64, 256, 1, padding=0, bias=False), 16 | nn.BatchNorm2d(256), 17 | ) 18 | 19 | self.sc = nn.Sequential( 20 | nn.Conv2d(64, 256, 1, bias=False), 21 | nn.BatchNorm2d(256), 22 | ) 23 | 24 | def forward(self, x): 25 | f = self.bn(x) 26 | x = self.sc(x) 27 | return nn.functional.relu(f + x) 28 | 29 | 30 | class BottleneckBlock2(nn.Module): 31 | def __init__(self): 32 | super().__init__() 33 | self.bn = nn.Sequential( 34 | nn.Conv2d(256, 64, 1, padding=0, bias=False), 35 | nn.BatchNorm2d(64), 36 | nn.ReLU(inplace=True), 37 | nn.Conv2d(64, 64, 3, padding=1, bias=False), 38 | nn.BatchNorm2d(64), 39 | nn.ReLU(inplace=True), 40 | nn.Conv2d(64, 256, 1, padding=0, bias=False), 41 | nn.BatchNorm2d(256), 42 | ) 43 | 44 | def forward(self, x): 45 | f = self.bn(x) 46 | return nn.functional.relu(f + x) 47 | 48 | 49 | batch_size = 8 50 | x = torch.rand(batch_size, 3, 32, 32) # тензор x в программе не менять 51 | 52 | # здесь продолжайте программу 53 | model = nn.Sequential( 54 | nn.Conv2d(3, 64, 7, padding=3, stride=2, bias=False), 55 | nn.BatchNorm2d(64), 56 | nn.MaxPool2d(3, stride=2, padding=1), 57 | BottleneckBlock1(), 58 | BottleneckBlock2(), 59 | nn.AdaptiveAvgPool2d(1), 60 | nn.Flatten(), 61 | nn.Linear(256, 10) 62 | ) 63 | 64 | model.eval() 65 | predict = model(x) 66 | -------------------------------------------------------------------------------- /solves/3.9.1: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torchvision import models 4 | 5 | x = torch.rand(3, 224, 224) # тензор x в программе не менять 6 | 7 | model = models.resnet18() 8 | model.eval() 9 | out = model(x.unsqueeze(0)) 10 | -------------------------------------------------------------------------------- /solves/3.9.2: -------------------------------------------------------------------------------- 1 | import torch 2 | from torchvision import models 3 | 4 | resnet_weights = models.ResNet50_Weights.DEFAULT 5 | cats = resnet_weights.meta['categories'] 6 | 7 | print(cats[7]) 8 | -------------------------------------------------------------------------------- /solves/3.9.3: -------------------------------------------------------------------------------- 1 | import torch 2 | from torchvision import models 3 | import torchvision.transforms.functional as TF 4 | 5 | # тензор x и img_pil в программе не менять 6 | x = torch.randint(0, 255, (3, 128, 128), dtype=torch.float32) 7 | img_pil = TF.to_pil_image(x) 8 | 9 | rw = models.ResNet50_Weights.DEFAULT 10 | transforms = rw.transforms() 11 | 12 | inp_img = transforms(img_pil) 13 | -------------------------------------------------------------------------------- /solves/3.9.4: -------------------------------------------------------------------------------- 1 | import torch 2 | from torchvision import models 3 | import torchvision.transforms.functional as TF 4 | 5 | # тензор x и img_pil в программе не менять 6 | x = torch.randint(0, 255, (3, 128, 128), dtype=torch.float32) 7 | img_pil = TF.to_pil_image(x) 8 | 9 | resnet_weights = models.ResNet18_Weights.DEFAULT 10 | cats = resnet_weights.meta['categories'] 11 | transforms = resnet_weights.transforms() 12 | 13 | model = models.resnet18() 14 | model.eval() 15 | 16 | inp_img = transforms(img_pil) 17 | results = model(inp_img.unsqueeze(0)).squeeze() 18 | 19 | res = results.softmax(dim=0).sort(descending=True) 20 | for i in res[1][:4]: 21 | print(cats[i]) 22 | -------------------------------------------------------------------------------- /solves/4.1.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | N = 200 # размер генерируемой последовательности 5 | r = 0.95 # коэффициент регрессии 6 | std = 10 # стандартное отклонение sigma 7 | std_e = std * (1 - r * r) ** 0.5 # стандартное отклонение случайных добавок 8 | 9 | x = torch.empty(N, dtype=torch.float32) 10 | x[0] = torch.randn(1) * std 11 | 12 | # здесь продолжайте программу 13 | for i in range(1, N): 14 | x[i] = r * x[i-1] + torch.randn(1) * std_e 15 | -------------------------------------------------------------------------------- /solves/4.1.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class RNN(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.h_size = 10 9 | self.inp = nn.Linear(16, self.h_size) 10 | self.out = nn.Linear(self.h_size, 5) 11 | 12 | def forward(self, x): # (batch_size, seq_length, d_size) 13 | n = x.size(1) 14 | b = x.size(0) 15 | h = torch.zeros(b, self.h_size) 16 | 17 | for i in range(n): 18 | a = self.inp(x[:, i, :]) 19 | h = torch.tanh(a + h) 20 | 21 | y = self.out(h) 22 | y = torch.sigmoid(y) 23 | return y 24 | 25 | 26 | batch_size = 8 # размер батча 27 | seq_length = 6 # длина последовательности 28 | in_features = 16 # размер каждого элемента последовательности 29 | x = torch.rand(batch_size, seq_length, in_features) 30 | 31 | # здесь продолжайте программу 32 | model = RNN() 33 | model.eval() 34 | 35 | out = model(x) 36 | -------------------------------------------------------------------------------- /solves/4.2.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс GetOutput 5 | class GetOutput(nn.Module): 6 | def forward(self, x): 7 | return x[1].squeeze(0) 8 | 9 | 10 | # тензор x в программе не менять 11 | batch_size = 4 12 | seq_length = 8 13 | in_features = 10 14 | x = torch.rand(batch_size, seq_length, in_features) 15 | 16 | # здесь продолжайте программу 17 | model = nn.Sequential( 18 | nn.RNN(in_features, 15, batch_first=True), 19 | GetOutput(), 20 | nn.ReLU(inplace=True), 21 | nn.Linear(15, 5) 22 | ) 23 | 24 | model.eval() 25 | res = model(x) 26 | -------------------------------------------------------------------------------- /solves/4.2.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс OutputModule 5 | class OutputModule(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.act = nn.ReLU(inplace=True) 9 | self.layer = nn.Linear(25, 10) 10 | 11 | def forward(self, x): 12 | y = self.layer(self.act(x[0])) 13 | return y 14 | 15 | 16 | # тензор x в программе не менять 17 | batch_size = 7 18 | seq_length = 5 19 | in_features = 15 20 | x = torch.rand(batch_size, seq_length, in_features) 21 | 22 | # здесь продолжайте программу 23 | model = nn.Sequential( 24 | nn.RNN(in_features, 25, batch_first=True), 25 | OutputModule(), 26 | ) 27 | 28 | model.eval() 29 | out = model(x) 30 | -------------------------------------------------------------------------------- /solves/4.2.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс OutputToLinear 5 | class OutputToLinear(nn.Module): 6 | def forward(self, x): 7 | return x[1][-1] 8 | 9 | 10 | # тензор x в программе не менять 11 | batch_size = 18 12 | seq_length = 21 13 | in_features = 5 14 | x = torch.rand(batch_size, seq_length, in_features) 15 | 16 | # здесь продолжайте программу 17 | model = nn.Sequential( 18 | nn.RNN(in_features, 25, 2, batch_first=True), 19 | OutputToLinear(), 20 | nn.ReLU(inplace=True), 21 | nn.Linear(25, 5) 22 | ) 23 | 24 | model.eval() 25 | predict = model(x) 26 | -------------------------------------------------------------------------------- /solves/4.3.1: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | sigma = 0.1 # стандартное отклонение отсчетов последовательности 5 | r = 0.9 # коэффициент регрессии 6 | sigma_noise = sigma * (1 - r * r) ** 0.5 # стандартное отклонение случайных величин 7 | 8 | total = 100 # длина генерируемой последовательности 9 | noise = torch.randn((total, )) # случайные величины, подаваемые на вход модели 10 | x0 = torch.randn((1, )) * sigma # начальное значение вектора скрытого состояния 11 | 12 | # здесь продолжайте программу 13 | model = nn.RNN(1, 1, bias=False, batch_first=True) 14 | model.weight_hh_l0.data = torch.tensor([[r]], dtype=torch.float32) 15 | model.weight_ih_l0.data = torch.tensor([[sigma_noise]], dtype=torch.float32) 16 | model.eval() 17 | 18 | x, _ = model(noise.view(1, total, 1), x0.view(1, 1, 1)) 19 | -------------------------------------------------------------------------------- /solves/4.3.2: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | sigma_x, sigma_y = 0.1, 0.15 # стандартные отклонения отсчетов последовательности 5 | rx, ry = 0.9, 0.99 # # коэффициенты регрессии 6 | sigma_noise_x = sigma_x * (1 - rx * rx) ** 0.5 # стандартное отклонение случайных величин 7 | sigma_noise_y = sigma_y * (1 - ry * ry) ** 0.5 # стандартное отклонение случайных величин 8 | 9 | total = 100 # длина генерируемой последовательности 10 | noise = torch.randn((total, 2)) # случайные величины, подаваемые на вход модели 11 | h0 = torch.randn((1, 2)) * torch.tensor([sigma_noise_x, sigma_noise_y]) # начальное значение вектора скрытого состояния 12 | 13 | # здесь продолжайте программу 14 | model = nn.RNN(2, 2, bias=False, batch_first=True) 15 | model.weight_hh_l0.data = torch.tensor([[rx, 0], [0, ry]], dtype=torch.float32) 16 | model.weight_ih_l0.data = torch.tensor([[sigma_noise_x, 0], [0, sigma_noise_y]], dtype=torch.float32) 17 | model.eval() 18 | 19 | x, _ = model(noise.view(1, total, 2), h0.view(1, 1, 2)) 20 | -------------------------------------------------------------------------------- /solves/4.3.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | # здесь объявляйте класс модели 7 | class MyModelRNN(nn.Module): 8 | def __init__(self): 9 | super().__init__() 10 | self._h_size = 5 11 | self.rnn = nn.RNN(1, self._h_size, batch_first=True) 12 | self.output = nn.Linear(self._h_size, 1) 13 | 14 | def forward(self, x): 15 | _, h = self.rnn(x) 16 | return self.output(h) 17 | 18 | 19 | x = torch.linspace(-20, 20, 2000) 20 | y = torch.cos(x) + 0.5 * torch.sin(5*x) + 0.1 * torch.randn_like(x) 21 | 22 | total = len(x) # общее количество отсчетов 23 | train_size = 1000 # размер обучающей выборки 24 | seq_length = 10 # число предыдущих отсчетов, по которым строится прогноз следующего значения 25 | 26 | y.unsqueeze_(1) 27 | train_data_y = torch.cat([y[i:i+seq_length] for i in range(train_size-seq_length)], dim=1) 28 | train_targets = torch.tensor([y[i+seq_length].item() for i in range(train_size-seq_length)]) 29 | 30 | test_data_y = torch.cat([y[i:i+seq_length] for i in range(train_size-seq_length, total-seq_length)], dim=1) 31 | test_targets = torch.tensor([y[i+seq_length].item() for i in range(train_size-seq_length, total-seq_length)]) 32 | 33 | d_train = data.TensorDataset(train_data_y.permute(1, 0), train_targets) 34 | d_test = data.TensorDataset(test_data_y.permute(1, 0), test_targets) 35 | 36 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=True) 37 | test_data = data.DataLoader(d_test, batch_size=len(d_test), shuffle=False) 38 | 39 | model = MyModelRNN() # создание объекта модели 40 | 41 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.001) 42 | loss_func = nn.MSELoss() 43 | 44 | epochs = 5 # число эпох 45 | model.train() 46 | 47 | for _e in range(epochs): 48 | for x_train, y_train in train_data: 49 | predict = model(x_train.unsqueeze(-1)).squeeze() 50 | loss = loss_func(predict, y_train) 51 | 52 | optimizer.zero_grad() 53 | loss.backward() 54 | optimizer.step() 55 | 56 | model.eval() 57 | d, t = next(iter(test_data)) 58 | with torch.no_grad(): 59 | predict = model(d.unsqueeze(-1)).squeeze() 60 | 61 | Q = loss_func(predict, t).item() 62 | -------------------------------------------------------------------------------- /solves/4.3.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | 5 | 6 | # здесь объявляйте класс CharsDataset 7 | class CharsDataset(data.Dataset): 8 | def __init__(self, prev_chars=7): 9 | self.prev_chars = prev_chars 10 | 11 | self.lines = _global_var_text 12 | self.alphabet = set(("".join(self.lines)).lower()) 13 | self.int_to_alpha = dict(enumerate(sorted(self.alphabet))) 14 | self.alpha_to_int = {b: a for a, b in self.int_to_alpha.items()} 15 | self.num_characters = len(self.alphabet) 16 | self.onehots = torch.eye(self.num_characters) 17 | 18 | data = [] 19 | targets = [] 20 | 21 | for i, t in enumerate(self.lines): 22 | t = t.lower() 23 | for item in range(len(t)-self.prev_chars): 24 | data.append([self.alpha_to_int[t[x]] for x in range(item, item + self.prev_chars)]) 25 | targets.append(self.alpha_to_int[t[item+self.prev_chars]]) 26 | 27 | self.data = torch.tensor(data) 28 | self.targets = torch.tensor(targets) 29 | 30 | self.length = len(data) 31 | 32 | def __getitem__(self, item): 33 | return self.onehots[self.data[item]], self.targets[item] 34 | 35 | def __len__(self): 36 | return self.length 37 | 38 | 39 | # здесь продолжайте программу 40 | d_train = CharsDataset(prev_chars=10) 41 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=True) 42 | -------------------------------------------------------------------------------- /solves/4.3.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | 7 | class CharsDataset(data.Dataset): 8 | def __init__(self, prev_chars=7): 9 | self.prev_chars = prev_chars 10 | 11 | self.lines = _global_var_text 12 | self.alphabet = set(("".join(self.lines)).lower()) 13 | self.int_to_alpha = dict(enumerate(sorted(self.alphabet))) 14 | self.alpha_to_int = {b: a for a, b in self.int_to_alpha.items()} 15 | self.num_characters = len(self.alphabet) 16 | self.onehots = torch.eye(self.num_characters) 17 | 18 | data = [] 19 | targets = [] 20 | 21 | for i, t in enumerate(self.lines): 22 | t = t.lower() 23 | for item in range(len(t)-self.prev_chars): 24 | data.append([self.alpha_to_int[t[x]] for x in range(item, item + self.prev_chars)]) 25 | targets.append(self.alpha_to_int[t[item+self.prev_chars]]) 26 | 27 | self.data = torch.tensor(data) 28 | self.targets = torch.tensor(targets) 29 | 30 | self.length = len(data) 31 | 32 | def __getitem__(self, item): 33 | return self.onehots[self.data[item]], self.targets[item] 34 | 35 | def __len__(self): 36 | return self.length 37 | 38 | 39 | class TextRNN(nn.Module): 40 | def __init__(self, in_features, out_features): 41 | super().__init__() 42 | self.hidden_size = 32 43 | self.in_features = in_features 44 | self.out_features = out_features 45 | 46 | self.rnn = nn.RNN(in_features, self.hidden_size, batch_first=True) 47 | self.out = nn.Linear(self.hidden_size, out_features) 48 | 49 | def forward(self, x): 50 | x, h = self.rnn(x) 51 | y = self.out(h) 52 | return y 53 | 54 | 55 | # сюда копируйте объекты d_train и train_data 56 | d_train = CharsDataset(prev_chars=10) 57 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=True) 58 | 59 | model = TextRNN(d_train.num_characters, d_train.num_characters) 60 | 61 | optimizer = optim.Adam(params=model.parameters(), lr=0.01) 62 | loss_func = nn.CrossEntropyLoss() 63 | 64 | epochs = 1 # число эпох 65 | model.train() 66 | 67 | for _e in range(epochs): 68 | for x_train, y_train in train_data: 69 | predict = model(x_train).squeeze(0) 70 | loss = loss_func(predict, y_train.long()) 71 | 72 | optimizer.zero_grad() 73 | loss.backward() 74 | optimizer.step() 75 | 76 | model.eval() 77 | predict = "нейронная сеть ".lower() # начальная фраза 78 | total = 20 # число прогнозируемых символов (дополнительно к начальной фразе) 79 | 80 | for _ in range(total): 81 | _data = d_train.onehots[[d_train.alpha_to_int[predict[-x]] for x in range(d_train.prev_chars, 0, -1)]] 82 | with torch.no_grad(): 83 | p = model(_data.unsqueeze(0)).squeeze(0) 84 | indx = torch.argmax(p, dim=1) 85 | predict += d_train.int_to_alpha[indx.item()] 86 | 87 | print(predict) 88 | -------------------------------------------------------------------------------- /solves/4.4.2: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | emb_words = torch.IntTensor([[10, 8], [6, 7], [-5, 4], [-4, 1], [4, -2]]) 4 | -------------------------------------------------------------------------------- /solves/4.4.5: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | emb_words = torch.IntTensor([ 4 | [1, 2, 0, 2, 1, 0, 0, 0, 1, 1, 0], 5 | [0, 1, 2, 0, 1, 1, 0, 0, 1, 1, 1], 6 | [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0], 7 | [0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0], 8 | [1, 0, 2, 1, 0, 0, 1, 1, 1, 0, 0], 9 | [0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0] 10 | ]) 11 | -------------------------------------------------------------------------------- /solves/4.5.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | emb_layer = nn.Embedding(128, 16) 6 | emb_layer.load_state_dict(st_emb) 7 | 8 | num = torch.LongTensor([78]) 9 | emb_vect = emb_layer(num) 10 | -------------------------------------------------------------------------------- /solves/4.5.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | 5 | class WordsDataset(data.Dataset): 6 | def __init__(self, navec_emb, prev_words=4): 7 | self.prev_words = prev_words 8 | self.navec_emb = navec_emb 9 | 10 | self.lines = _global_var_text 11 | self.vocab = set((" ".join(self.lines)).lower().split()) 12 | self.vocab_size = len(self.vocab) 13 | 14 | data = [] 15 | targets = [] 16 | 17 | for t in self.lines: 18 | words = t.lower().split() 19 | for item in range(len(words)-self.prev_words): 20 | data.append([self.navec_emb[words[x]].tolist() for x in range(item, item + self.prev_words)]) 21 | targets.append(self.navec_emb.vocab[words[item+self.prev_words]]) 22 | 23 | self.data = torch.tensor(data) 24 | self.targets = torch.tensor(targets) 25 | 26 | self.length = len(data) 27 | 28 | def __getitem__(self, item): 29 | return self.data[item], self.targets[item] 30 | 31 | def __len__(self): 32 | return self.length 33 | 34 | 35 | d_train = WordsDataset(global_navec) 36 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=True) 37 | -------------------------------------------------------------------------------- /solves/4.5.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | 7 | class WordsDataset(data.Dataset): 8 | def __init__(self, navec_emb, prev_words=4): 9 | self.prev_words = prev_words 10 | self.navec_emb = navec_emb 11 | 12 | self.lines = _global_var_text 13 | self.vocab = set((" ".join(self.lines)).lower().split()) 14 | self.vocab_size = len(self.vocab) 15 | 16 | data = [] 17 | targets = [] 18 | 19 | for t in self.lines: 20 | words = t.lower().split() 21 | for item in range(len(words)-self.prev_words): 22 | data.append([self.navec_emb[words[x]].tolist() for x in range(item, item + self.prev_words)]) 23 | targets.append(self.navec_emb.vocab[words[item+self.prev_words]]) 24 | 25 | self.data = torch.tensor(data) 26 | self.targets = torch.tensor(targets) 27 | 28 | self.length = len(data) 29 | 30 | def __getitem__(self, item): 31 | return self.data[item], self.targets[item] 32 | 33 | def __len__(self): 34 | return self.length 35 | 36 | 37 | class WordsRNN(nn.Module): 38 | def __init__(self, in_features, out_features): 39 | super().__init__() 40 | self.hidden_size = 16 41 | self.in_features = in_features 42 | self.out_features = out_features 43 | 44 | self.rnn = nn.RNN(in_features, self.hidden_size, batch_first=True) 45 | self.out = nn.Linear(self.hidden_size, out_features) 46 | 47 | def forward(self, x): 48 | x, h = self.rnn(x) 49 | y = self.out(h) 50 | return y 51 | 52 | 53 | d_train = WordsDataset(global_navec) 54 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=True) 55 | 56 | model = WordsRNN(100, len(global_navec.vocab)) 57 | 58 | optimizer = optim.Adam(params=model.parameters(), lr=0.01, weight_decay=0.0001) 59 | loss_func = nn.CrossEntropyLoss() 60 | 61 | epochs = 1 62 | model.train() 63 | 64 | for _e in range(epochs): 65 | for x_train, y_train in train_data: 66 | predict = model(x_train).squeeze(0) 67 | loss = loss_func(predict, y_train.long()) 68 | 69 | optimizer.zero_grad() 70 | loss.backward() 71 | optimizer.step() 72 | 73 | model.eval() 74 | predict = "Такими были первые нейронные сети предложенные".lower().split() 75 | total = 10 76 | 77 | int_to_word = dict(enumerate((global_navec.vocab))) 78 | for _ in range(total): 79 | _data = torch.tensor([d_train.navec_emb[predict[-x]].tolist() for x in range(d_train.prev_words, 0, -1)]) 80 | with torch.no_grad(): 81 | p = model(_data.unsqueeze(0)).squeeze(0) 82 | indx = torch.argmax(p, dim=1) 83 | predict.append(int_to_word[indx.item()]) 84 | 85 | predict = " ".join(predict) 86 | print(predict) 87 | -------------------------------------------------------------------------------- /solves/4.5.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torchvision import models 4 | 5 | x = torch.rand(3, 224, 224) # тензор x в программе не менять 6 | 7 | model = models.resnet18() 8 | model.eval() 9 | out = model(x.unsqueeze(0)) 10 | -------------------------------------------------------------------------------- /solves/4.6.10: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class MyModel(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.inp_2 = nn.Sequential( 8 | nn.Linear(12, 12), 9 | nn.Sigmoid() 10 | ) 11 | self.out = nn.Sequential( 12 | nn.Linear(12, 32), 13 | nn.ReLU(), 14 | nn.Linear(32, 1) 15 | ) 16 | self.inp_1 = nn.Sequential( 17 | nn.Linear(7, 12), 18 | nn.Tanh() 19 | ) 20 | 21 | def forward(self, a, b): 22 | x1 = self.inp_1(a) 23 | x2 = self.inp_2(b) 24 | return self.out(x1 + x2) 25 | 26 | 27 | batch_size=12 28 | a = torch.rand(batch_size, 7) # тензоры a, b в программе не менять 29 | b = torch.rand(batch_size, 12) 30 | 31 | model = MyModel() 32 | model.eval() 33 | predict = model(a, b) 34 | -------------------------------------------------------------------------------- /solves/4.6.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class TwoLayerModel(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.layer1 = nn.Linear(3, 2) 9 | self.layer2 = nn.Linear(2, 1) 10 | 11 | def forward(self, x): 12 | x = self.layer1(x) 13 | x = torch.sigmoid(x) 14 | x = self.layer2(x) 15 | return x 16 | 17 | 18 | x = torch.rand(3) # тензор x в программе не менять 19 | 20 | model = TwoLayerModel() 21 | model.eval() 22 | predict = model(x) 23 | -------------------------------------------------------------------------------- /solves/4.6.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size=8 5 | x = torch.rand(batch_size, 5) # тензор x в программе не менять 6 | 7 | model = nn.Sequential( 8 | nn.Linear(5, 16, bias=False), 9 | nn.ReLU(), 10 | nn.BatchNorm1d(16), 11 | nn.Linear(16, 3) 12 | ) 13 | 14 | model.eval() 15 | predict = model(x) 16 | -------------------------------------------------------------------------------- /solves/4.6.9: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | batch_size=16 5 | x = torch.rand(batch_size, 12) # тензор x в программе не менять 6 | 7 | model = nn.Sequential() 8 | model.add_module('layer1', nn.Linear(12, 24)) 9 | model.add_module('act1', nn.Tanh()) 10 | model.add_module('layer2', nn.Linear(24, 10)) 11 | model.add_module('act2', nn.Tanh()) 12 | model.add_module('out', nn.Linear(10, 1)) 13 | 14 | model.eval() 15 | predict = model(x) 16 | -------------------------------------------------------------------------------- /solves/4.7.2: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class RNNModel(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.rnn = nn.RNN(32, 12, batch_first=True, bidirectional=True) 9 | self.out = nn.Linear(24, 5) 10 | 11 | def forward(self, x): 12 | _, h = self.rnn(x) 13 | y = torch.cat([h[0], h[1]], dim=1) 14 | return self.out(y) 15 | 16 | 17 | batch_size = 8 18 | seq_length = 12 19 | d_size = 32 20 | x = torch.rand(batch_size, seq_length, d_size) 21 | 22 | model = RNNModel() 23 | model.eval() 24 | predict = model(x) 25 | -------------------------------------------------------------------------------- /solves/4.7.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс OutputModule 5 | class OutputModule(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.layer = nn.Linear(8, 2, bias=False) 9 | 10 | def forward(self, x): 11 | batch_size = x[0].size(0) 12 | n = x[0].size(1) 13 | y = torch.empty(batch_size, n, self.layer.out_features) 14 | 15 | for i in range(n): 16 | y[:, i, :] = self.layer(x[0][:, i, :]) 17 | return y 18 | 19 | 20 | # тензор x в программе не менять 21 | batch_size = 4 22 | seq_length = 64 23 | in_features = 5 24 | x = torch.rand(batch_size, seq_length, in_features) 25 | 26 | # здесь продолжайте программу 27 | model = nn.Sequential( 28 | nn.RNN(in_features, 4, batch_first=True, bidirectional=True), 29 | OutputModule(), 30 | ) 31 | 32 | model.eval() 33 | out = model(x) 34 | -------------------------------------------------------------------------------- /solves/4.7.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class MyModelRNN(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.rnn_1 = nn.RNN(5, 7, batch_first=True, bidirectional=True) 9 | self.output = nn.Linear(14, 2) 10 | 11 | def forward(self, x): 12 | y, _ = self.rnn_1(x) 13 | n = y.size(1) 14 | out = torch.empty(y.size(0), n, self.output.out_features) 15 | 16 | for i in range(n): 17 | out[:, i, :] = self.output(y[:, i, :]) 18 | return out 19 | 20 | 21 | # тензор x в программе не менять 22 | batch_size = 4 23 | seq_length = 12 24 | d_size = 5 25 | x = torch.rand(batch_size, d_size) 26 | 27 | # здесь продолжайте программу 28 | model = MyModelRNN() 29 | model.eval() 30 | 31 | u = torch.zeros(batch_size, seq_length, d_size) 32 | u[:, 0, :] = x 33 | predict = model(u) 34 | -------------------------------------------------------------------------------- /solves/4.7.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class MyModelRNN(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.rnn_1 = nn.RNN(5, 9, batch_first=True, bidirectional=True) 9 | self.rnn_2 = nn.RNN(18, 32, batch_first=True) 10 | self.output = nn.Linear(32, 3) 11 | self.out_length = 25 12 | 13 | def forward(self, x): 14 | _, h = self.rnn_1(x) 15 | y = torch.cat([h[0], h[1]], dim=1) 16 | u = torch.zeros(y.size(0), self.out_length, y.size(1)) 17 | u[:, 0, :] = y 18 | y, _ = self.rnn_2(u) 19 | 20 | n = y.size(1) 21 | out = torch.empty(y.size(0), n, self.output.out_features) 22 | 23 | for i in range(n): 24 | out[:, i, :] = self.output(y[:, i, :]) 25 | return out 26 | 27 | 28 | # тензор x в программе не менять 29 | batch_size = 2 30 | seq_length = 12 31 | in_features = 5 32 | x = torch.rand(batch_size, seq_length, in_features) 33 | 34 | # здесь продолжайте программу 35 | model = MyModelRNN() 36 | 37 | model.eval() 38 | results = model(x) 39 | -------------------------------------------------------------------------------- /solves/4.7.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | 5 | class WordsDataset(data.Dataset): 6 | def __init__(self, batch_size=8): # инициализатор класса 7 | self.batch_size = batch_size 8 | 9 | self.words_lst = [(_x, 0) for _x in _global_words_0] + [(_x, 1) for _x in _global_words_1] 10 | self.words_lst.sort(key=lambda _x: len(_x[0])) 11 | self.dataset_len = len(self.words_lst) 12 | 13 | _text = "".join(_global_words_0 + _global_words_1).lower() 14 | self.alphabet = set(_text) 15 | self.int_to_alpha = dict(enumerate(sorted(self.alphabet))) 16 | self.alpha_to_int = {b: a for a, b in self.int_to_alpha.items()} 17 | self.num_characters = len(self.alphabet) 18 | self.onehots = torch.eye(self.num_characters + 1, self.num_characters) 19 | 20 | def __getitem__(self, item): # формирование и возвращение батча данных по индексу item 21 | item *= self.batch_size 22 | item_last = item + self.batch_size 23 | if item_last > self.dataset_len: 24 | item_last = self.dataset_len 25 | 26 | max_length = len(self.words_lst[item_last - 1][0]) 27 | 28 | d = [[self.alpha_to_int[_x] for _x in _w[0]] + [-1] * (max_length - len(_w[0])) for _w in self.words_lst[item: item_last]] 29 | t = torch.FloatTensor([_w[1] for _w in self.words_lst[item: item_last]]) 30 | 31 | data = torch.zeros(len(d), max_length, self.num_characters) 32 | for i, indx in enumerate(d): 33 | data[i, :, :] = self.onehots[indx] 34 | 35 | return data, t 36 | 37 | def __len__(self): # возврат размер обучающей выборки в батчах 38 | last = 0 if self.dataset_len % self.batch_size == 0 else 1 39 | return self.dataset_len // self.batch_size + last 40 | 41 | 42 | # здесь продолжайте программу 43 | d_train = WordsDataset(batch_size=8) 44 | train_data = data.DataLoader(d_train, batch_size=1, shuffle=True) 45 | -------------------------------------------------------------------------------- /solves/4.7.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | class WordsDataset(data.Dataset): 7 | def __init__(self, batch_size=8): # инициализатор класса 8 | self.batch_size = batch_size 9 | 10 | self.words_lst = [(_x, 0) for _x in _global_words_0] + [(_x, 1) for _x in _global_words_1] 11 | self.words_lst.sort(key=lambda _x: len(_x[0])) 12 | self.dataset_len = len(self.words_lst) 13 | 14 | _text = "".join(_global_words_0 + _global_words_1).lower() 15 | self.alphabet = set(_text) 16 | self.int_to_alpha = dict(enumerate(sorted(self.alphabet))) 17 | self.alpha_to_int = {b: a for a, b in self.int_to_alpha.items()} 18 | self.num_characters = len(self.alphabet) 19 | self.onehots = torch.eye(self.num_characters + 1, self.num_characters) 20 | 21 | def __getitem__(self, item): # формирование и возвращение батча данных по индексу item 22 | item *= self.batch_size 23 | item_last = item + self.batch_size 24 | if item_last > self.dataset_len: 25 | item_last = self.dataset_len 26 | 27 | max_length = len(self.words_lst[item_last - 1][0]) 28 | 29 | d = [[self.alpha_to_int[_x] for _x in _w[0]] + [-1] * (max_length - len(_w[0])) for _w in self.words_lst[item: item_last]] 30 | t = torch.FloatTensor([_w[1] for _w in self.words_lst[item: item_last]]) 31 | 32 | data = torch.zeros(len(d), max_length, self.num_characters) 33 | for i, indx in enumerate(d): 34 | data[i, :, :] = self.onehots[indx] 35 | 36 | return data, t 37 | 38 | def __len__(self): # возврат размер обучающей выборки в батчах 39 | last = 0 if self.dataset_len % self.batch_size == 0 else 1 40 | return self.dataset_len // self.batch_size + last 41 | 42 | 43 | class WordPalindrom(nn.Module): 44 | def __init__(self, in_features): 45 | super().__init__() 46 | self.hidden_size = 16 47 | self.in_features = in_features 48 | 49 | self.rnn = nn.RNN(in_features, self.hidden_size, batch_first=True, bidirectional=True) 50 | self.out = nn.Linear(self.hidden_size * 2, 1) 51 | 52 | def forward(self, x): 53 | _, h = self.rnn(x) 54 | y = torch.cat([h[0], h[1]], dim=1) 55 | y = self.out(y) 56 | return y 57 | 58 | 59 | # здесь продолжайте программу 60 | d_train = WordsDataset(batch_size=8) 61 | train_data = data.DataLoader(d_train, batch_size=1, shuffle=True) 62 | 63 | model = WordPalindrom(d_train.num_characters) 64 | 65 | optimizer = optim.Adam(params=model.parameters(), lr=0.01, weight_decay=0.001) 66 | loss_func = nn.BCEWithLogitsLoss() 67 | 68 | epochs = 2 69 | model.train() 70 | 71 | for _e in range(epochs): 72 | for x_train, y_train in train_data: 73 | predict = model(x_train.squeeze(0)) 74 | loss = loss_func(predict, y_train.view(-1, 1)) 75 | 76 | optimizer.zero_grad() 77 | loss.backward() 78 | optimizer.step() 79 | 80 | model.eval() 81 | Q = 0 82 | for x_train, y_train in train_data: 83 | with torch.no_grad(): 84 | p = model(x_train.squeeze(0)) 85 | Q += torch.mean((torch.sign(p.flatten()) == 2 * y_train.flatten() - 1).float()) 86 | 87 | Q = Q / len(d_train) 88 | -------------------------------------------------------------------------------- /solves/4.8.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс LSTMToLinear 5 | class LSTMToLinear(nn.Module): 6 | def forward(self, x): 7 | return x[1][0].squeeze(0) 8 | 9 | 10 | # тензор x в программе не менять 11 | batch_size = 18 12 | seq_length = 21 13 | in_features = 5 14 | x = torch.rand(batch_size, seq_length, in_features) 15 | 16 | # здесь продолжайте программу 17 | model = nn.Sequential( 18 | nn.LSTM(in_features, 25, batch_first=True), 19 | LSTMToLinear(), 20 | nn.Linear(25, 5) 21 | ) 22 | 23 | model.eval() 24 | res = model(x) 25 | -------------------------------------------------------------------------------- /solves/4.8.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс OutputModule 5 | class OutputModule(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.layer = nn.Linear(24, 5) 9 | 10 | def forward(self, x): 11 | batch_size = x[0].size(0) 12 | n = x[0].size(1) 13 | y = torch.empty(batch_size, n, self.layer.out_features) 14 | 15 | for i in range(n): 16 | y[:, i, :] = self.layer(x[0][:, i, :]) 17 | return y 18 | 19 | 20 | # тензор x в программе не менять 21 | batch_size = 7 22 | seq_length = 89 23 | in_features = 3 24 | x = torch.rand(batch_size, seq_length, in_features) 25 | 26 | # здесь продолжайте программу 27 | model = nn.Sequential( 28 | nn.LSTM(in_features, 12, batch_first=True, bidirectional=True), 29 | OutputModule(), 30 | ) 31 | 32 | model.eval() 33 | out = model(x) 34 | -------------------------------------------------------------------------------- /solves/4.8.8: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | class LSTMToLinear(nn.Module): 7 | def forward(self, x): 8 | return x[1][0].squeeze(0) 9 | 10 | 11 | model = nn.Sequential( 12 | nn.LSTM(1, 10, batch_first=True), 13 | LSTMToLinear(), 14 | nn.Linear(10, 1) 15 | ) 16 | 17 | x = torch.linspace(-10, 10, 2000) 18 | y = torch.cos(x) + 0.5 * torch.sin(5*x) + 0.1 * torch.randn_like(x) + 0.2 * x 19 | 20 | total = len(x) # общее количество отсчетов 21 | train_size = 1000 # размер обучающей выборки 22 | seq_length = 20 # число предыдущих отсчетов, по которым строится прогноз следующего значения 23 | 24 | y.unsqueeze_(1) 25 | train_data_y = torch.cat([y[i:i+seq_length] for i in range(train_size-seq_length)], dim=1) 26 | train_targets = torch.tensor([y[i+seq_length].item() for i in range(train_size-seq_length)]) 27 | 28 | test_data_y = torch.cat([y[i:i+seq_length] for i in range(train_size-seq_length, total-seq_length)], dim=1) 29 | test_targets = torch.tensor([y[i+seq_length].item() for i in range(train_size-seq_length, total-seq_length)]) 30 | 31 | d_train = data.TensorDataset(train_data_y.permute(1, 0), train_targets) 32 | d_test = data.TensorDataset(test_data_y.permute(1, 0), test_targets) 33 | 34 | train_data = data.DataLoader(d_train, batch_size=8, shuffle=True) 35 | test_data = data.DataLoader(d_test, batch_size=len(d_test), shuffle=False) 36 | 37 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01) 38 | loss_func = nn.MSELoss() 39 | 40 | epochs = 5 # число эпох 41 | model.train() 42 | 43 | for _e in range(epochs): 44 | for x_train, y_train in train_data: 45 | predict = model(x_train.unsqueeze(-1)).squeeze() 46 | loss = loss_func(predict, y_train) 47 | 48 | optimizer.zero_grad() 49 | loss.backward() 50 | optimizer.step() 51 | 52 | model.eval() 53 | d, t = next(iter(test_data)) 54 | with torch.no_grad(): 55 | predict = model(d.unsqueeze(-1)).squeeze() 56 | 57 | Q = loss_func(predict, t).item() 58 | -------------------------------------------------------------------------------- /solves/4.9.7: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс модели 5 | class MyModelRNN(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.gru = nn.GRU(2, 5, batch_first=True, bidirectional=True, bias=False) 9 | self.mb = nn.BatchNorm1d(10) 10 | self.output = nn.Linear(10, 4) 11 | 12 | def forward(self, x): 13 | y, _ = self.gru(x) 14 | n = y.size(1) 15 | out = torch.empty(y.size(0), n, self.output.out_features) 16 | 17 | for i in range(n): 18 | out[:, i, :] = self.output(self.mb(y[:, i, :])) 19 | return out 20 | 21 | 22 | # тензор x в программе не менять 23 | batch_size = 3 24 | seq_length = 17 25 | d_size = 2 26 | x = torch.rand(batch_size, d_size) 27 | 28 | # здесь продолжайте программу 29 | model = MyModelRNN() 30 | model.eval() 31 | 32 | u = torch.zeros(batch_size, seq_length, d_size) 33 | u[:, 0, :] = x 34 | predict = model(u) 35 | -------------------------------------------------------------------------------- /solves/5.1.2: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class AutoEncoder(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.hidden = nn.Linear(2, 1) 8 | self.output = nn.Linear(1, 2) 9 | 10 | def forward(self, x): 11 | x = self.hidden(x) 12 | return self.output(x) 13 | 14 | 15 | model_ae = AutoEncoder() 16 | model_ae.hidden.weight.data = torch.tensor([[1.0, 0.0]]) 17 | model_ae.hidden.bias.data = torch.tensor([0.0]) 18 | model_ae.output.weight.data = torch.tensor([[1.0], [3/7]]) 19 | model_ae.output.bias.data = torch.tensor([0.0, 3.0]) 20 | -------------------------------------------------------------------------------- /solves/5.1.5: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class AutoEncoder(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.encoder = nn.Sequential( 8 | nn.Conv2d(3, 8, 3, stride=2, padding=1, bias=False), 9 | nn.ReLU(inplace=True), 10 | nn.BatchNorm2d(8), 11 | nn.Conv2d(8, 4, 3, stride=2, padding=1, bias=False), 12 | nn.ReLU(inplace=True), 13 | nn.BatchNorm2d(4), 14 | nn.Flatten(), 15 | nn.Linear(64, 4), 16 | nn.ReLU(inplace=True), 17 | ) 18 | 19 | self.decoder = nn.Sequential( 20 | nn.Linear(4, 64), 21 | nn.ELU(inplace=True), 22 | nn.Unflatten(1, (4, 4, 4)), 23 | nn.ConvTranspose2d(4, 8, 2, 2), 24 | nn.ELU(inplace=True), 25 | nn.ConvTranspose2d(8, 1, 2, 2), 26 | ) 27 | 28 | def forward(self, x): 29 | h = self.encoder(x) 30 | return self.decoder(h), h 31 | 32 | 33 | h = torch.rand(4) 34 | 35 | model = AutoEncoder() 36 | model.eval() 37 | model.load_state_dict(st_model) 38 | 39 | out = model.decoder(h.unsqueeze(0)) 40 | -------------------------------------------------------------------------------- /solves/5.1.6: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.utils.data as data 4 | import torch.optim as optim 5 | 6 | 7 | class AutoEncoder(nn.Module): 8 | def __init__(self): 9 | super().__init__() 10 | self.encoder = nn.Sequential( 11 | nn.Linear(8, 16), 12 | nn.ReLU(inplace=True), 13 | nn.Linear(16, 4), 14 | nn.ReLU(inplace=True), 15 | nn.Linear(4, 2), 16 | nn.Tanh(), 17 | ) 18 | 19 | self.decoder = nn.Sequential( 20 | nn.Linear(2, 4), 21 | nn.ReLU(inplace=True), 22 | nn.Linear(4, 8), 23 | ) 24 | 25 | def forward(self, x): 26 | h = self.encoder(x) 27 | return self.decoder(h), h 28 | 29 | 30 | total = 1000 # размер выборки 31 | data_x = torch.rand(total, 8) # обучающие данные 32 | ds = data.TensorDataset(data_x, data_x) 33 | train_data = data.DataLoader(ds, batch_size=16, shuffle=True) 34 | 35 | model = AutoEncoder() 36 | 37 | optimizer = optim.RMSprop(params=model.parameters(), lr=0.01, weight_decay=0.0001) 38 | loss_func = nn.MSELoss() 39 | 40 | epochs = 5 41 | model.train() 42 | 43 | for _e in range(epochs): 44 | for x_train, y_train in train_data: 45 | predict, _ = model(x_train) 46 | loss = loss_func(predict, y_train) 47 | 48 | optimizer.zero_grad() 49 | loss.backward() 50 | optimizer.step() 51 | 52 | model.eval() 53 | p, _ = model(data_x) 54 | Q = loss_func(p, data_x).item() 55 | -------------------------------------------------------------------------------- /solves/5.3.1: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс VAE 5 | class VAE(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.hidden_dim = 4 9 | 10 | self.encoder = nn.Sequential( 11 | nn.Linear(128, 32, bias=False), 12 | nn.BatchNorm1d(32), 13 | nn.ReLU(inplace=True), 14 | nn.Linear(32, 8, bias=False), 15 | nn.BatchNorm1d(8), 16 | nn.ReLU(inplace=True), 17 | ) 18 | 19 | self.h_mean = nn.Sequential(nn.Linear(8, self.hidden_dim)) 20 | self.h_var = nn.Linear(8, self.hidden_dim) 21 | 22 | self.decoder = nn.Sequential( 23 | nn.Linear(self.hidden_dim, 64), 24 | nn.ReLU(inplace=True), 25 | nn.Linear(64, 128), 26 | ) 27 | 28 | def forward(self, x): 29 | enc = self.encoder(x) 30 | 31 | h_mean = self.h_mean(enc) 32 | h_var = torch.relu(self.h_var(enc)) 33 | 34 | noise = torch.normal(mean=torch.zeros_like(h_mean), std=torch.ones_like(h_var)) 35 | h = noise * torch.sqrt(h_var) + h_mean 36 | x = self.decoder(h) 37 | 38 | return x, h_mean, h_var 39 | 40 | 41 | # тензор data_x в программе не менять 42 | batch_size = 8 43 | data_x = torch.rand(batch_size, 128) 44 | 45 | # здесь продолжайте программу 46 | model = VAE() 47 | model.eval() 48 | out, hm, hv = model(data_x) 49 | -------------------------------------------------------------------------------- /solves/5.3.2: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import torch 3 | import torch.nn as nn 4 | import torchvision.transforms as tfs 5 | # import torchvision.transforms.v2 as tfs_v2 - недоступен на Stepik 6 | 7 | # здесь объявляйте класс VAE_CNN 8 | class VAE_CNN(nn.Module): 9 | def __init__(self): 10 | super().__init__() 11 | self.hidden_dim = 7 12 | 13 | self.encoder = nn.Sequential( 14 | nn.Conv2d(3, 16, 3, padding=1), 15 | nn.ELU(inplace=True), 16 | nn.MaxPool2d(3, 2), 17 | nn.Conv2d(16, 4, 3, padding=1), 18 | nn.ELU(inplace=True), 19 | nn.MaxPool2d(3, 2), 20 | nn.Flatten() 21 | ) 22 | 23 | self.h_mean = nn.Linear(36, self.hidden_dim) 24 | self.h_log_var = nn.Linear(36, self.hidden_dim) 25 | 26 | self.decoder = nn.Sequential( 27 | nn.Linear(self.hidden_dim, 32), 28 | nn.ReLU(inplace=True), 29 | nn.Unflatten(1, (2, 4, 4)), 30 | nn.ConvTranspose2d(2, 8, 2, 2), 31 | nn.ReLU(inplace=True), 32 | nn.ConvTranspose2d(8, 1, 2, 2), 33 | nn.Sigmoid() 34 | ) 35 | 36 | def forward(self, x): 37 | enc = self.encoder(x) 38 | 39 | h_mean = self.h_mean(enc) 40 | h_log_var = self.h_log_var(enc) 41 | 42 | noise = torch.normal(mean=torch.zeros_like(h_mean), std=torch.ones_like(h_log_var)) 43 | h = noise * torch.exp(h_log_var / 2) + h_mean 44 | x = self.decoder(h) 45 | 46 | return x, h_mean, h_log_var 47 | 48 | 49 | img_pil = Image.new(mode="RGB", size=(64, 78), color=(0, 128, 255)) 50 | tr = tfs.Compose([tfs.CenterCrop(64), tfs.Resize(16), tfs.ToTensor()]) 51 | img = tr(img_pil) 52 | 53 | model = VAE_CNN() 54 | model.eval() 55 | 56 | out, hm, hlv = model(img.unsqueeze(0)) 57 | -------------------------------------------------------------------------------- /solves/5.3.3: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс KLLoss 5 | class KLLoss(nn.Module): 6 | def forward(self, h_mean, h_log_var): 7 | kl_loss = -0.5 * torch.sum(1 + h_log_var - h_mean ** 2 - torch.exp(h_log_var), dim=-1) 8 | return torch.mean(kl_loss) 9 | 10 | 11 | batch_size = 5 12 | h_mean = torch.rand(batch_size, 10) 13 | h_log_var = torch.rand(batch_size, 10) 14 | 15 | # здесь продолжайте программу 16 | loss_func = KLLoss() 17 | loss = loss_func(h_mean, h_log_var).item() 18 | -------------------------------------------------------------------------------- /solves/5.3.4: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | # здесь объявляйте класс KLLoss 5 | class KLLoss(nn.Module): 6 | def forward(self, h_mean, h_var): 7 | kl_loss = 0.5 * torch.sum(h_var + h_mean ** 2 - 1 - torch.log(h_var), dim=-1) 8 | return torch.mean(kl_loss) 9 | 10 | 11 | batch_size = 10 12 | h_mean = torch.rand(batch_size, 10) 13 | h_var = torch.rand(batch_size, 10) + 2.0 14 | 15 | # здесь продолжайте программу 16 | loss_func = KLLoss() 17 | loss = loss_func(h_mean, h_var) 18 | -------------------------------------------------------------------------------- /tests/1.11.3: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 9.0 -4.5 13.5 4.5 15.0 -15.0 -10.5 -10.5 -3.0 7.5 -4.5 15.0 -3.0 9.0 -1.5 -4.5 -15.0 3.0 -13.5 -7.5 -10.5 -1.5 -10.5 6.0 10.5 9.0 -10.5 -15.0 -15.0 -7.5 -7.5 13.5 3 | output: 4 | 5 | test #2 6 | input: 1.5 12.0 9.0 -10.5 0.0 0.0 13.5 -1.5 15.0 6.0 -6.0 0.0 -3.0 0.0 1.5 -3.0 4.5 -13.5 0.0 -6.0 -6.0 -12.0 4.5 1.5 6.0 1.5 6.0 -13.5 10.5 3.0 -10.5 -6.0 7 | output: 8 | -------------------------------------------------------------------------------- /tests/1.11.4: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 7.5 25.0 -25.0 17.5 -20.0 -22.5 -2.5 10.0 -15.0 -17.5 -15.0 -12.5 2.5 15.0 -7.5 2.5 10.0 10.0 10.0 17.5 -25.0 7.5 -12.5 -5.0 -10.0 12.5 2.5 22.5 12.5 -25.0 17.5 20.0 3 | output: 4 | 5 | test #2 6 | input: -1.0 -3.0 3.5 2.5 -4.0 2.0 4.0 -2.0 3.0 1.5 5.0 -1.0 4.0 1.0 2.0 -2.5 -0.5 3.0 -0.5 -0.5 -4.0 2.0 -5.0 5.0 -1.5 1.5 0.0 -1.5 4.5 -4.5 -4.5 -1.5 7 | output: 8 | -------------------------------------------------------------------------------- /tests/1.11.5: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 1 5 3 7 4 7 3 6 -4 -3 -2 -3 0 0 1.5 1.5 4.5 -4.5 -12.0 3.0 -13.5 13.5 -3.0 3.0 1.5 -3.0 0.0 -13.5 10.5 -10.5 1.5 0.0 9.0 -7.5 12.0 -13.5 7.5 -4.5 -3.0 12.0 13.5 -13.5 9.0 -6.0 -6.0 -1.5 15.0 -12.0 -10.5 10.5 13.5 -13.5 13.5 -1.5 -13.5 12.0 -3.0 -9.0 -7.5 3.0 10.5 -12.0 1.5 12.0 3 | output: 4 | 5 | test #2 6 | input: 10.5 0.0 1.5 -7.5 1.5 7.5 0.0 1.5 3.0 15.0 0.0 10.5 -9.0 -12.0 4.5 -1.5 -9.0 15.0 9.0 12.0 -12.0 15.0 15.0 -3.0 4.5 1.5 4.5 -3.0 -13.5 3.0 6.0 1.5 1.5 -13.5 3.0 7.5 13.5 -15.0 4.5 6.0 -9.0 6.0 4.5 1.5 -12.0 -12.0 3.0 1.5 -7.5 10.5 -3.0 -12.0 -3.0 13.5 3.0 1.5 -10.5 10.5 -9.0 -1.5 -10.5 -6.0 3.0 7.5 7 | output: 8 | -------------------------------------------------------------------------------- /tests/1.11.6: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 1 5 3 7 4 7 3 6 -4 -3 -2 -3 0 0 1.5 1.5 4.5 -4.5 -12.0 3.0 -13.5 13.5 -3.0 3.0 1.5 -3.0 0.0 -13.5 10.5 -10.5 1.5 0.0 9.0 -7.5 12.0 -13.5 7.5 -4.5 -3.0 12.0 13.5 -13.5 9.0 -6.0 -6.0 -1.5 15.0 -12.0 -10.5 10.5 13.5 -13.5 13.5 -1.5 -13.5 12.0 -3.0 -9.0 -7.5 3.0 10.5 -12.0 1.5 12.0 3 | output: 4 | 5 | test #2 6 | input: 10.5 0.0 1.5 -7.5 1.5 7.5 0.0 1.5 3.0 15.0 0.0 10.5 -9.0 -12.0 4.5 -1.5 -9.0 15.0 9.0 12.0 -12.0 15.0 15.0 -3.0 4.5 1.5 4.5 -3.0 -13.5 3.0 6.0 1.5 1.5 -13.5 3.0 7.5 13.5 -15.0 4.5 6.0 -9.0 6.0 4.5 1.5 -12.0 -12.0 3.0 1.5 -7.5 10.5 -3.0 -12.0 -3.0 13.5 3.0 1.5 -10.5 10.5 -9.0 -1.5 -10.5 -6.0 3.0 7.5 7 | output: 8 | -------------------------------------------------------------------------------- /tests/1.2.6: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 8 11 3 | output: -76.0 4 | 5 | test #2 6 | input: 0 0 7 | output: 2.5 8 | 9 | test #3 10 | input: 1 -1 11 | output: 3.5 12 | 13 | test #4 14 | input: -1 1 15 | output: 3.5 16 | 17 | test #5 18 | input: 1 2 19 | output: 2.0 20 | 21 | test #6 22 | input: 2 10 23 | output: -11.5 24 | -------------------------------------------------------------------------------- /tests/1.2.7: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 1 2 3 3 | output: 11.50 4 | 5 | test #2 6 | input: -3 2 5 7 | output: 13.50 8 | 9 | test #3 10 | input: 3 -12 5 11 | output: 1.50 12 | 13 | test #4 14 | input: 3 1 -11 15 | output: -43.00 16 | 17 | test #5 18 | input: 0 0 0 19 | output: -5.00 20 | -------------------------------------------------------------------------------- /tests/1.2.8: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 0 0 0 3 | output: 1.0 4 | 5 | test #2 6 | input: 1 2 3 7 | output: -2.0 8 | 9 | test #3 10 | input: 9 -10 -5 11 | output: 1.0 12 | 13 | test #4 14 | input: -3 10 1 15 | output: -3.0 16 | -------------------------------------------------------------------------------- /tests/1.2.9: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 -5 3 | output: 29.0 4 | 5 | test #2 6 | input: 0 0 7 | output: -5.0 8 | 9 | test #3 10 | input: -1 -1 11 | output: -3.0 12 | 13 | test #4 14 | input: -1 2 15 | output: 0.0 16 | 17 | test #5 18 | input: 2 1 19 | output: 0.0 20 | 21 | test #6 22 | input: 22 13 23 | output: 648.0 24 | -------------------------------------------------------------------------------- /tests/1.4.10: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 8 11 3 2 1 6 3 | output: 4 | 5 | test #2 6 | input: 1 2 3 4 5 6 7 | output: 8 | 9 | test #3 10 | input: 0 0 0 1 1 1 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.4.9: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 1 2 3 3 | output: 4 | 5 | test #2 6 | input: -5 10 0 1 1 3 4 -1 7 | output: 8 | 9 | test #3 10 | input: 1 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.6.10: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 11 18 5 7 5 11 1 3 | output: 4 | 5 | test #2 6 | input: 12 19 14 5 6 7 | output: 8 | 9 | test #3 10 | input: 13 17 2 6 15 4 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.6.11: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 6 3 -3 -8 -6 -5 -10 7 -6 9 3 8 -6 -6 3 -7 -7 -10 6 9 0 6 -7 -5 -5 -6 2 -3 1 -4 2 -9 8 -1 -2 3 | output: 4 | 5 | test #2 6 | input: -1 2 -4 5 1 5 -1 2 3 3 5 -2 -3 -5 -3 -1 7 | output: 8 | 9 | test #3 10 | input: -1 0 1 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.6.2: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 0.1 4.3 -1.2 0.7 0.5 3 | output: 0.1 -1.2 0.5 4 | 5 | test #2 6 | input: -5.3 0.6 10.4 1.2 0.1 0.2 0.3 7 | output: -5.3 10.4 0.1 0.3 8 | 9 | test #3 10 | input: 2.3 11 | output: 2.3 12 | -------------------------------------------------------------------------------- /tests/1.6.4: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 100 3 | output: 0 100 0 100 0 100 0 100 0 100 4 | 5 | test #2 6 | input: 7 7 | output: 0 7 0 7 0 7 0 7 0 7 8 | -------------------------------------------------------------------------------- /tests/1.6.5: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 | 1 2 3 4 5 6 7 8 9 4 | 7 6 5 4 5 | output: 1 7 6 5 4 6 7 8 9 6 | 7 | test #2 8 | input: 9 | 10 -23 80 0 1 2 -1 -2 -3 10 | 1 2 3 4 5 6 7 8 11 | output: 10 1 2 3 4 5 6 7 8 12 | 13 | test #3 14 | input: 15 | 1 2 16 | 0 17 | output: 1 0 18 | -------------------------------------------------------------------------------- /tests/1.6.7: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 | output: 4 | 5 | test #2 6 | input: -2 -2 -1 10 2 -1 9 10 10 4 6 4 6 7 -2 7 -1 6 3 8 8 0 10 3 2 7 9 0 10 0 6 3 3 -2 10 -1 7 | output: 8 | 9 | test #3 10 | input: 71 25 33 70 90 60 42 24 43 23 91 90 77 74 19 90 96 40 30 63 20 21 27 25 62 20 70 20 17 52 54 16 49 33 94 88 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.6.8: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 2 8 -5 13 11 10 4 4 0 12 16 18 12 -3 5 -3 -7 17 20 9 -4 16 4 10 4 -10 -6 11 0 -4 -2 -4 5 3 14 17 3 | output: 4 | 5 | test #2 6 | input: 156 106 95 161 200 186 195 24 62 155 81 67 164 115 156 151 145 70 59 85 146 12 166 83 120 170 112 97 52 75 139 17 189 63 182 26 7 | output: 8 | 9 | test #3 10 | input: 3 14 2 19 9 9 11 18 5 7 5 11 1 4 18 13 6 12 12 19 14 5 6 6 1 8 17 6 20 13 17 2 6 15 4 18 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.6.9: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 8 11 -4 2 -1 -2 0 -1 3 | output: 4 | 5 | test #2 6 | input: -1 0 -2 7 | output: 8 | 9 | test #3 10 | input: 1 2 3 4 5 6 11 | output: 12 | 13 | test #4 14 | input: -1 -2 -3 -4 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.7.10: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 0 5 4 2 3 3 | output: 4 | 5 | test #2 6 | input: 4 3 2 1 7 | output: 8 | 9 | test #3 10 | input: 1 0 11 | output: 12 | 13 | test #4 14 | input: 1 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.7.2: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 2 6 2 10 9 6 6 9 10 5 9 2 3 | output: 4 | 5 | test #2 6 | input: 7 1 3 1 10 3 9 1 5 6 7 | output: 8 | 9 | test #3 10 | input: 10 11 | output: 12 | 13 | test #4 14 | input: 1 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.7.3: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 | 6 -1 -4 -5 1 8 -4 14 -5 10 9 18 12 18 11 4 | 2 2 2 3 4 5 2 1 3 1 5 | output: 6 | 7 | test #2 8 | input: 9 | -2 5 5 18 10 5 -3 18 16 7 -5 -5 2 4 -3 3 7 16 13 2 -3 5 10 | 1 1 1 3 2 4 3 2 3 2 1 0 1 2 11 | output: 12 | 13 | test #3 14 | input: 15 | 1 2 3 16 | 0 17 | output: 18 | 19 | test #4 20 | input: 21 | 1 22 | 1 2 3 23 | output: 24 | -------------------------------------------------------------------------------- /tests/1.7.4: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 0.8 -5.0 -1.3 -0.7 1.5 -1.3 4.0 -3.6 -0.2 -4.6 -3.4 3.3 4.9 -2.6 3.9 -0.5 3 | output: 0.8 5.0 -1.3 0.7 1.5 1.3 4.0 3.6 -0.2 4.6 -3.4 -3.3 4.9 2.6 3.9 0.5 4 | 5 | test #2 6 | input: 10.0 9.3 2.1 2.7 6.3 6.9 5.4 1.1 8.6 4.5 9.0 3.7 1.3 1.8 4.7 9.2 5.3 8.2 9.2 4.9 7 | output: 10.0 -9.3 2.1 -2.7 6.3 -6.9 5.4 -1.1 8.6 -4.5 9.0 -3.7 1.3 -1.8 4.7 -9.2 5.3 8.2 9.2 4.9 8 | 9 | test #3 10 | input: -0.9 -7.9 -8.2 -0.5 -9.5 -5.5 -1.9 -9.3 -1.1 -2.1 -4.4 -1.6 -1.2 -8.2 -4.7 -5.8 -2.4 -2.8 -6.7 -6.8 -1.5 -3.7 -5.6 -1.9 11 | output: -0.9 7.9 -8.2 0.5 -9.5 5.5 -1.9 9.3 -1.1 2.1 -4.4 1.6 -1.2 8.2 -4.7 5.8 -2.4 -2.8 -6.7 -6.8 -1.5 -3.7 -5.6 -1.9 12 | -------------------------------------------------------------------------------- /tests/1.7.8: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 76 72 1 18 79 78 43 93 44 69 7 88 23 16 64 60 3 74 41 44 77 76 9 22 3 | output: 4 | 5 | test #2 6 | input: 19 7 17 11 13 18 14 12 14 6 7 14 15 6 4 5 17 15 7 | output: 8 | 9 | test #3 10 | input: 1 2 11 | output: 12 | 13 | test #4 14 | input: 2 1 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.7.9: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 26685 3 | output: 4 | 5 | test #2 6 | input: 12732 7 | output: 8 | 9 | test #3 10 | input: 3725 11 | output: 12 | 13 | test #4 14 | input: 0 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.8.10: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 49 66 74 17 38 49 2 46 35 94 100 8 29 41 26 47 24 12 14 25 27 25 91 25 3 | output: 4 | 5 | test #2 6 | input: 84 92 50 62 42 46 1 36 67 98 9 85 78 58 36 84 81 36 97 4 21 95 68 62 90 62 19 66 90 83 71 33 62 7 | output: 8 | 9 | test #3 10 | input: 45 87 4 22 55 39 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.8.2: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 5 4 7 3 2 3 | output: 4 | 5 | test #2 6 | input: 3 2 1 2 2 2 2 2 2 7 | output: 8 | 9 | test #3 10 | input: 0 0 1 1 2 2 2 11 | output: 12 | 13 | test #4 14 | input: 0 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.8.4: -------------------------------------------------------------------------------- 1 | test #1 2 | input: -2 3 10 3 | output: 4 | 5 | test #2 6 | input: 0 4 7 7 | output: 8 | 9 | test #3 10 | input: -3 -0.1 10 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.8.7: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 39390 82672 68902 35165 32969 97967 28057 66890 85532 43443 21427 68419 68605 29956 76932 79372 3 | output: 4 | 5 | test #2 6 | input: 60508 18499 30215 98327 59506 88001 37663 7 | output: 8 | 9 | test #3 10 | input: 30215 98327 59506 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.8.8: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 46294 43525 55769 64791 13620 91130 22601 57158 66245 34398 23007 3 | output: 4 | 5 | test #2 6 | input: 11549 51744 66268 88228 82070 7 | output: 8 | 9 | test #3 10 | input: 93237 61776 11 | output: 12 | 13 | test #4 14 | input: 123 15 | output: 16 | -------------------------------------------------------------------------------- /tests/1.8.9: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 11 3 23 97 88 67 22 11 85 50 47 42 22 18 40 87 38 75 28 99 62 14 2 60 98 56 3 | output: 4 | 5 | test #2 6 | input: 25 71 93 48 44 75 7 | output: 8 | 9 | test #3 10 | input: 65 46 54 78 100 28 41 96 80 61 7 81 86 81 36 44 11 | output: 12 | -------------------------------------------------------------------------------- /tests/1.9.5: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 | -2.3 0.5 0.5 1.0 4 | 10 20 30 5 | output: 42.7 6 | 7 | test #2 8 | input: 9 | 10 0.1 0.8 -2.5 10 | 4.3 -2.1 8.7 11 | output: -13.0 12 | 13 | test #3 14 | input: 15 | 0 11.7 5.1 23.2 16 | 3.2 0.1 5.5 17 | output: 165.6 18 | -------------------------------------------------------------------------------- /tests/1.9.7: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 | -2.3597 -1.6090 -1.4605 -1.0693 1.7774 -0.8170 -1.8682 0.9619 4 | 2 0.5 -0.5 5 | output: 6 | -------------------------------------------------------------------------------- /tests/2.4.4: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 4.5 3 | output: 4 | 5 | test #2 6 | input: -2.3 7 | output: 8 | 9 | test #3 10 | input: 0.1 11 | output: 12 | 13 | test #4 14 | input: 40.0 15 | output: 16 | -------------------------------------------------------------------------------- /tests/2.4.5: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 | 8.5 4 | 12.7 5 | output: 6 | 7 | test #2 8 | input: 9 | 10.1 10 | 5.7 11 | output: 12 | 13 | test #3 14 | input: 15 | 12.5 16 | 34.2 17 | output: 18 | -------------------------------------------------------------------------------- /tests/2.4.6: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 3 | 2.5 4 | 0.12 0.5 -2.3 0.1 5 | output: 6 | 7 | test #2 8 | input: 9 | -0.5 10 | 5.12 -1.5 1.3 -0.5 11 | output: 12 | 13 | test #3 14 | input: 15 | 3.0 16 | -4.3 0.1 0.2 -0.01 17 | output: 18 | -------------------------------------------------------------------------------- /tests/2.4.7: -------------------------------------------------------------------------------- 1 | test #1 2 | input: 0.12 0.5 -2.3 0.1 3 | output: 4 | 5 | test #2 6 | input: 5.12 -1.5 1.3 -0.5 7 | output: 8 | 9 | test #3 10 | input: -4.3 0.1 0.2 -0.01 11 | output: 12 | -------------------------------------------------------------------------------- /tests/2.6.3: -------------------------------------------------------------------------------- 1 | test #1 2 | input: -1.6 7.8 3.6 0.1 3.7 5.9 -1.8 10.0 -0.6 -2.8 0.8 0.4 9.5 2.4 4.9 9.6 3 | output: 51.9 4 | 5 | test #2 6 | input: 6.3 -1.7 -2.8 7.0 3.8 3.1 8.2 0.5 9.4 8.4 1.3 2.7 3.8 -3.4 -1.7 -0.1 7 | output: 44.8 8 | -------------------------------------------------------------------------------- /tests/3.5.7: -------------------------------------------------------------------------------- 1 | test #1 2 | input: -2 3 10 3 | output: 4 | 5 | test #2 6 | input: 0 4 7 7 | output: 8 | 9 | test #3 10 | input: -3 -0.1 10 11 | output: 12 | -------------------------------------------------------------------------------- /train_data_false: -------------------------------------------------------------------------------- 1 | Я притягиваю только плохое 2 | Кому я нужен с такой внешностью 3 | Не доверяй никому, тебя обязательно обманут 4 | Если рискнешь высунуться, то добром это не кончиться 5 | Говорила я тебе, не верь никому 6 | Все новое и неизвестное – это опасно 7 | Чтобы я не сделал, мне все равно не повезет 8 | Я не могу с этим ничего поделать 9 | Как бы я не старался, от судьбы не уйдешь 10 | Мне не повезло родиться в бедной семье, значит, и умру я нищим 11 | У меня точно не получиться, никогда 12 | Я всегда все путаю, поэтому такой бестолковый 13 | Мне все равно не выиграть, можно даже не пытаться 14 | Кому я такая страшная нужна 15 | Мне всегда не везет 16 | Меня никто не любит 17 | У меня плохая память 18 | Я неудачник 19 | Мне публично выступить? Что вы?! Это точно не мое! 20 | У меня такая плохая карма 21 | Чтобы я не делал, ни в чем нет успеха 22 | Мой возраст не позволяет сделать этого. 23 | Я никогда не сяду за руль, это же смертельно опасно! 24 | Я никому не доверяю, это опасно. Только понадеешься, тут же подведут 25 | Этих жизненных препятствий никогда не преодолеть, уж лучше и не начинать. 26 | Мне никогда не выздороветь 27 | Никакие лекарства не помогут мне 28 | Эту болячку я сама накликала на себя 29 | Лишний вес никогда не уйдет, даже не стоит и пробовать 30 | Что толку от этих медитаций, только потерянное время 31 | С возрастом болячки только усиливаются и увеличиваются 32 | К врачам только попади, сразу найдут кучу болезней 33 | Ни один врач не сможет вылечить мою болезнь 34 | От хронических болезней еще никто не избавлялся – это приговор на всю жизнь 35 | Внутреннюю энергию ни чем не вернешь 36 | Меня любят только тогда, когда я болею 37 | Мои дети звонят только тогда, когда я плохо себя чувствую 38 | Только когда я болею, я могу себе позволить есть фрукты 39 | Здоровое питание сильно дорого для меня 40 | Много пить воды вредно, вчера по телевизору показывали. 41 | Все мужчины рано или поздно изменяют 42 | Муж должен держать жену в строгости 43 | Всем парням только одно надо, и чем раньше, тем лучше 44 | Порядочных девушек сейчас не осталось, все доступные – только свистни, в койку так и прыгают 45 | Любовь бывает только в фильмах и в книжках, а в жизни все более прагматично 46 | Стоит только пожениться, тут же начинаются ссора и обиды 47 | Сама выросла без отца и дедушки, и ребенка выращу без мужика 48 | Я первым никогда не подойду, не мужской поступок, пусть сама на коленях приползет 49 | Если женщина в браке следит за собой, то значит кто-то есть 50 | Все женщины — стервы 51 | Женщинам только и подавай: деньги и шопинг 52 | Блондинки все пустоголовые 53 | Мужики все бесчувственные 54 | Меня никогда не полюбят 55 | По карьерной лестнице не пробиться, двигают только своих 56 | Хоть сто пядей во лбу, начальник никогда тебя не похвалит 57 | На работе инициатива наказуема, лучше не бери на себя лишнего, не подводи коллег 58 | Нельзя быть в коллективе самым умным, этого никто не любит 59 | Работа не волк, в лес не убежит, поэтому делай все в последний момент 60 | Хороших начальников не бывает 61 | Никогда не признавайся в своих ошибках, а то накажут 62 | Сиди и помалкивай, пока тебя лично, поименно и прямо не спросили 63 | Никому никогда не помогай, это к добру не приведет, все равно, виноватым будешь 64 | Любое сказанное слово против тебя обернется 65 | Мой опыт никому не нужен 66 | Вокруг полно более молодых и успешных 67 | С такой внешностью не работать в этой профессии 68 | Во столько лет на хорошую работу не устроиться 69 | Хоть 3 копейки» но зато свои, и два раза в месяц 70 | Огромные деньги наживают только воровством 71 | Честные деньги не бывают большими 72 | Если денег много, значит, добыты обманным путем 73 | Чтобы заработать свою получку, надо попотеть 74 | То, что получено быстро, мигом и теряется 75 | Не зря говорят, что бесплатный сыр в мышеловке 76 | Особняки и вертолеты только у воров 77 | За помощь надо платить монетой 78 | Бескорыстно никто никому ничего не делает 79 | Благотворительность только для богатых, у них денег куры не клюют, пусть и помогают 80 | Почему такой умный, и такой бедный? Как только появляются деньги, так начинаешь их транжирить 81 | Богатство – это зло, а бедность — не порок 82 | Чем больше денег, тем больше зависти. Чем больше зависти, тем больше болезней 83 | Богатство до добра не доведет 84 | Большие деньги честными не бывают 85 | Много денег портят человека 86 | У того, кто имеет очень много денег, черствеет душа 87 | Для того чтобы заработать миллион – жизни не хватит 88 | За все хорошее надо платить 89 | --------------------------------------------------------------------------------