├── TargetLocalization
├── fig
│ ├── .DS_Store
│ └── network_attackerNum20.png
├── results
│ └── .DS_Store
├── plot_results
│ └── paper_result.jpg
├── agent.py
├── README.md
├── utils.py
├── single_task.py
└── multi_task.py
├── DigitClassification
├── results
│ └── .DS_Store
├── plot_results
│ ├── legend.jpg
│ ├── mnist_synthetic.jpg
│ ├── paper_result_group1.jpg
│ ├── paper_result_group2.jpg
│ └── plot.py
├── agent.py
├── README.md
├── synthetic_images_visualization.py
└── main.py
├── HumanActivityRecog
├── results
│ └── .DS_Store
├── plot_results
│ ├── legend.jpg
│ ├── paper_result.jpg
│ ├── plot_range.py
│ ├── plot_individual.py
│ └── plot.py
├── README.md
├── agent.py
└── main.py
└── README.md
/TargetLocalization/fig/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/TargetLocalization/fig/.DS_Store
--------------------------------------------------------------------------------
/DigitClassification/results/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/DigitClassification/results/.DS_Store
--------------------------------------------------------------------------------
/HumanActivityRecog/results/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/HumanActivityRecog/results/.DS_Store
--------------------------------------------------------------------------------
/TargetLocalization/results/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/TargetLocalization/results/.DS_Store
--------------------------------------------------------------------------------
/DigitClassification/plot_results/legend.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/DigitClassification/plot_results/legend.jpg
--------------------------------------------------------------------------------
/HumanActivityRecog/plot_results/legend.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/HumanActivityRecog/plot_results/legend.jpg
--------------------------------------------------------------------------------
/HumanActivityRecog/plot_results/paper_result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/HumanActivityRecog/plot_results/paper_result.jpg
--------------------------------------------------------------------------------
/TargetLocalization/fig/network_attackerNum20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/TargetLocalization/fig/network_attackerNum20.png
--------------------------------------------------------------------------------
/TargetLocalization/plot_results/paper_result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/TargetLocalization/plot_results/paper_result.jpg
--------------------------------------------------------------------------------
/DigitClassification/plot_results/mnist_synthetic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/DigitClassification/plot_results/mnist_synthetic.jpg
--------------------------------------------------------------------------------
/DigitClassification/plot_results/paper_result_group1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/DigitClassification/plot_results/paper_result_group1.jpg
--------------------------------------------------------------------------------
/DigitClassification/plot_results/paper_result_group2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JianiLi/resilientDistributedMTL/HEAD/DigitClassification/plot_results/paper_result_group2.jpg
--------------------------------------------------------------------------------
/DigitClassification/agent.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 |
4 | class agent:
5 | def __init__(self, net):
6 | self.net = net
7 | self.optimizer = torch.optim.Adam(self.net.parameters(), lr=1e-3)
8 | self.loss_func = torch.nn.CrossEntropyLoss()
9 | self.train_loss = 0
10 | self.train_acc = 0
11 |
12 |
13 | def optimize(self, batch_x, batch_y):
14 | batch_x, batch_y = Variable(batch_x), Variable(batch_y)
15 | out = self.net(batch_x)
16 | loss = self.loss_func(out, batch_y)
17 | self.train_loss += loss.item()
18 | pred = torch.max(out, 1)[1]
19 | train_correct = (pred == batch_y).sum()
20 | self.train_acc += train_correct.item()
21 | self.optimizer.zero_grad()
22 | loss.backward()
23 | self.optimizer.step()
24 |
25 | return loss.item(), train_correct.item()
26 |
27 |
28 | def getLoss(self, batch_x, batch_y, neighbor_net):
29 | neighbor_net.eval()
30 | batch_x, batch_y = Variable(batch_x), Variable(batch_y)
31 | out = neighbor_net(batch_x)
32 | loss = self.loss_func(out, batch_y)
33 |
34 | return loss.item()
--------------------------------------------------------------------------------
/TargetLocalization/agent.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 |
4 | class agent:
5 | def __init__(self, net):
6 | self.net = net
7 | self.optimizer = torch.optim.Adam(self.net.parameters(), lr=1e-3)
8 | self.loss_func = torch.nn.CrossEntropyLoss()
9 | self.train_loss = 0
10 | self.train_acc = 0
11 |
12 |
13 | def optimize(self, batch_x, batch_y):
14 | batch_x, batch_y = Variable(batch_x), Variable(batch_y)
15 | out = self.net(batch_x)
16 | loss = self.loss_func(out, batch_y)
17 | self.train_loss += loss.item()
18 | pred = torch.max(out, 1)[1]
19 | train_correct = (pred == batch_y).sum()
20 | self.train_acc += train_correct.item()
21 | self.optimizer.zero_grad()
22 | loss.backward()
23 | self.optimizer.step()
24 |
25 | return loss.item(), train_correct.item()
26 |
27 |
28 | def getLoss(self, batch_x, batch_y, neighbor_net):
29 | neighbor_net.eval()
30 | batch_x, batch_y = Variable(batch_x), Variable(batch_y)
31 | out = neighbor_net(batch_x)
32 | loss = self.loss_func(out, batch_y)
33 |
34 | return loss.item()
--------------------------------------------------------------------------------
/TargetLocalization/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### Case Study - Target Localization
3 | Target localization is a widely-studied linear regression problem.
4 | The task is to estimate the location of the target by minimizing the squared error loss of noisy streaming sensor data.
5 | We consider a network of 100 agents with four targets.
6 | Agents in the same color share the same target, however, they do not know this group information beforehand.
7 |
8 | ### Dataset
9 | - Data is generated in the code.
10 |
11 | ### Instructions
12 | Tested on python 3.7
13 |
14 | - Run multi_task.py to reproduce the results shown in the paper (We also provide single-task.py where agents have the same target).
15 | - In multi_task.py, we simulate four cases: "no-cooperation", "loss", "distance", " average", as explained in the paper.
16 | - "numAgents" is the total number of agents in the network including the Byzantine agents, "attackerNum" defines the number of attackers.
17 |
18 | ### Results
19 | Results show that the loss-based weight assignment rule outperforms all the other rules as well as the non-cooperative case,
20 | with respect to the mean and range of the average loss and accuracy, with and without the presence of Byzantine agents.
21 | Hence, our simulations imply that the loss-based weights have accurately learned the relationship among agents.
22 | Moreover, normal agents having a large regret in their estimation indeed benefit from cooperating with other agents having a small regret.
23 |
24 |
25 |
26 | ### Cite the paper
27 | ```
28 | @inproceedings{neurips_2020_byzantineMTL,
29 | title={Byzantine Resilient Distributed Multi-Task Learning},
30 | author={Jiani Li and Waseem Abbas and Xenofon Koutsoukos},
31 | booktitle = {Thirty-fourth Conference on Neural Information Processing Systems (NeurIPS)},
32 | year = {2020}
33 | }
34 | ```
35 |
--------------------------------------------------------------------------------
/TargetLocalization/utils.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import numpy as np
4 | from matplotlib import pyplot as plt
5 | from shapely.geometry import Point
6 |
7 |
8 | def random_point_set(n, lower=-10, upper=10):
9 | points = []
10 | assert lower <= upper
11 | for i in range(n):
12 | x = random.uniform(lower, upper)
13 | y = random.uniform(lower, upper)
14 | points.append(Point(x, y))
15 | return points
16 |
17 |
18 | def plot_point_set(point_set, color='b', ax=None, alpha=1):
19 | for p in point_set:
20 | plot_point(p, color=color, ax=ax, alpha=alpha)
21 |
22 |
23 | def plot_point(P, marker='o', color='b', size=4, ax=None, alpha=1):
24 | if ax == None:
25 | plt.plot(P.x, P.y, marker=marker, color=color, markersize=size, markeredgecolor='k', markeredgewidth=0.1, alpha=alpha)
26 | plt.draw()
27 | else:
28 | ax.plot(P.x, P.y, marker=marker, color=color, markersize=size, markeredgecolor='k', markeredgewidth=0.1, alpha=alpha)
29 |
30 |
31 |
32 | def findNeighbors(x, k, numAgents, rmax, maxNeighborSize=10):
33 | N = [k]
34 | for i in range(numAgents):
35 | if i == k:
36 | continue
37 | n = x[i]
38 | if np.sqrt((n.y - x[k].y) ** 2 + (n.x - x[k].x) ** 2) <= rmax:
39 | N.append(i)
40 |
41 | if len(N) > maxNeighborSize:
42 | selection = random.sample(N[1:], maxNeighborSize-1)
43 | return [N[0]]+selection
44 | else:
45 | return N
46 |
47 |
48 | def h(w, x, s=1):
49 | dist = w.distance(x)
50 | if dist <= s:
51 | return np.array([w.x - x.x, w.y - x.y])
52 | else:
53 | return np.array([w.x - x.x, w.y - x.y]) / dist * s
54 |
55 |
56 | def Delta(x, k, numAgents, r=2, sensingRange=10):
57 | N = []
58 | for l in range(numAgents):
59 | if x[l].distance(x[k]) <= sensingRange:
60 | N.append(l)
61 | delta = 0
62 | for l in N:
63 | if l == k:
64 | continue
65 | dist = x[l].distance(x[k])
66 | if dist != 0:
67 | delta += np.array([x[l].x - x[k].x, x[l].y - x[k].y]) / dist * (dist - r)
68 |
69 | #return delta * (1 / len(N))
70 | return delta
--------------------------------------------------------------------------------
/HumanActivityRecog/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### Case Study - Human Acticity Recognition
3 | Mobile phone sensor data (accelerometer and gyroscope) is collected from 30 individuals performing one of six activities:
4 | {walking, walking-upstairs, walking-downstairs, sitting, standing, lying-down}.
5 | The goal is to predict the activities performed using 561-length feature vectors for each instance generated by the processed sensor signals.
6 | We model each individual as a separate task and use a complete graph to model the network topology.
7 | We use a linear model as the prediction function with cross-entropy-loss.
8 |
9 | ### Download Dataset
10 | - Human activity recognition (HAR) Dataset can be downloaded here: https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones
11 | - Download the dataset, unzip it, and put the folder under HumanActivityRecog/
12 |
13 | ### Instructions
14 | Tested on python 3.7
15 |
16 | - Run main.py to reproduce the results shown in the paper.
17 | - In main.py, "rule" can be "no-cooperation", "loss", "distance", " average", as explained in the paper.
18 | - "N" is the total number of agents in the network including the Byzantine agents, "attacker_num" defines the number of attackers.
19 |
20 | ### Results
21 | Results show that the loss-based weight assignment rule outperforms all the other rules as well as the non-cooperative case,
22 | with respect to the mean and range of the average loss and accuracy, with and without the presence of Byzantine agents.
23 | Hence, our simulations imply that the loss-based weights have accurately learned the relationship among agents.
24 | Moreover, normal agents having a large regret in their estimation indeed benefit from cooperating with other agents having a small regret.
25 |
26 |
27 |
28 | ### Cite the paper
29 | ```
30 | @inproceedings{neurips_2020_byzantineMTL,
31 | title={Byzantine Resilient Distributed Multi-Task Learning},
32 | author={Jiani Li and Waseem Abbas and Xenofon Koutsoukos},
33 | booktitle = {Thirty-fourth Conference on Neural Information Processing Systems (NeurIPS)},
34 | year = {2020}
35 | }
36 | ```
37 |
--------------------------------------------------------------------------------
/HumanActivityRecog/agent.py:
--------------------------------------------------------------------------------
1 | # import torch
2 | #
3 | # class agent:
4 | # def __init__(self, net):
5 | # self.net = net
6 | #
7 | #
8 | # def optimizer(self, x, y):
9 | # # print(net) # net architecture
10 | #
11 | # optimizer = torch.optim.SGD(self.net.parameters(), lr=0.01)
12 | # loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
13 | #
14 | # #plt.ion() # something about plotting
15 | #
16 | # for t in range(10):
17 | # out = self.net(x) # input x and predict based on x
18 | # loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
19 | # optimizer.zero_grad() # clear gradients for next train
20 | # loss.backward() # backpropagation, compute gradients
21 | # optimizer.step() # apply gradients
22 | #
23 | # prediction = torch.max(out, 1)[1]
24 | # pred_y = prediction.data.numpy()
25 | # target_y = y.data.numpy()
26 | # accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
27 | #
28 | # return accuracy, loss.item()
29 |
30 | import torch
31 | from torch.autograd import Variable
32 | import math
33 |
34 | class agent:
35 | def __init__(self, net):
36 | self.net = net
37 | self.optimizer = torch.optim.SGD(self.net.parameters(), lr=0.01)
38 | self.loss_func = torch.nn.CrossEntropyLoss()
39 | self.train_loss = 0
40 | self.train_acc = 0
41 |
42 |
43 | def optimize(self, batch_x, batch_y):
44 | batch_x, batch_y = Variable(batch_x), Variable(batch_y)
45 | out = self.net(batch_x)
46 | loss = self.loss_func(out, batch_y)
47 | pred = torch.max(out, 1)[1]
48 | train_correct = (pred == batch_y).sum()
49 |
50 | if math.isnan(loss.item()) or math.isnan(train_correct.item()):
51 | return loss.item(), train_correct.item()
52 |
53 | self.train_loss += loss.item()
54 | self.train_acc += train_correct.item()
55 | self.optimizer.zero_grad()
56 | loss.backward()
57 | self.optimizer.step()
58 |
59 | return loss.item(), train_correct.item()
60 |
61 |
62 | def getLoss(self, batch_x, batch_y, neighbor_net):
63 | neighbor_net.eval()
64 | batch_x, batch_y = Variable(batch_x), Variable(batch_y)
65 | out = neighbor_net(batch_x)
66 | loss = self.loss_func(out, batch_y)
67 |
68 | return loss.item()
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ResilientDistributedMTL
2 | Code for NeurIPS 2020 paper [Byzantine Resilient Distributed Multi-Task Learning](https://arxiv.org/pdf/2010.13032.pdf)
3 |
4 | ### Background
5 |
6 | In a multi-agent multi-task distributed system, agents aim to learn distinct but correlated models simultaneously.
7 | They share their models with neighbors and actively learn from peers performing a similar task.
8 | Such cooperation has been demonstrated to benefit the overall learning performance over the network.
9 | However, distributed algorithms for learning similarities among tasks are not resilient in the presence of Byzantine agents.
10 | Inthis paper, we present an approach for Byzantine resilient distributed multi-task learning. We propose an efficient online weight assignment rule by measuringthe accumulated loss using an agent’s data and the models of its neighbors.
11 | A small accumulated loss indicates a large similarity between the two tasks.
12 | In order to ensure Byzantine resilience of the aggregation at a normal agent, we introduce a step for filtering out larger losses.
13 | We analyze the approach in the case of convex models and we show that normal agents converge resiliently towards their true target. Further, the learning performance of an agent using the proposed weight assignment rule is guaranteed to be at least as good as the non-cooperative case as measured by the expected regret.
14 | Finally, we demonstrate the approach using three case studies that include both regression and classification and show that our method has good empirical performance for non-convex models such as convolutional neural networks.
15 |
16 | ### An example of multi-task distributed system with Byzantine agents
17 | - nodes in the same color performs the same task
18 | - nodes connected by the links means they are neighbors and can share messages
19 | - nodes in red are Byzantine agents
20 | - Byzantine agents send arbitrary messages to normal agents
21 |
22 | ### Case Studies
23 | - [Target Localization](https://github.com/JianiLi/resilientDistributedMTL/tree/main/TargetLocalization)
24 | - [Human Activity Recognition](https://github.com/JianiLi/resilientDistributedMTL/tree/main/HumanActivityRecog)
25 | - [Digit Classification](https://github.com/JianiLi/resilientDistributedMTL/tree/main/DigitClassification)
26 |
27 | ### Cite the paper
28 | ```
29 | @inproceedings{neurips_2020_byzantineMTL,
30 | title={Byzantine Resilient Distributed Multi-Task Learning},
31 | author={Jiani Li and Waseem Abbas and Xenofon Koutsoukos},
32 | booktitle = {Thirty-fourth Conference on Neural Information Processing Systems (NeurIPS)},
33 | year = {2020}
34 | }
35 | ```
36 |
--------------------------------------------------------------------------------
/HumanActivityRecog/plot_results/plot_range.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | # rule = "no-cooperation"
4 | # #rule = "average"
5 | # #rule = "loss"
6 | # #rule = "distance"
7 |
8 | # train_test = "train"
9 | train_test = "test"
10 | loss_or_acc = "acc"
11 | attack = True
12 | attackerNum = 10
13 |
14 | if attackerNum == 10:
15 | attacker = [27,12,24,13,1,8,16,15,28,9]
16 | elif attackerNum == 29:
17 | normal = [4]
18 | attacker = np.delete(range(0,30),normal)
19 |
20 |
21 |
22 | print("attack: %s, train or test: %s, loss or acc: %s" % (attack, train_test, loss_or_acc))
23 |
24 |
25 | def print_num(l, string, num=5):
26 | print(string + "=[", end=' ')
27 | for i in range(len(l)):
28 | print("%.7f" % l[i], end=' ')
29 | if (i + 1) % num == 0:
30 | print()
31 | print("]")
32 |
33 |
34 | for rule in ["no-cooperation", "average", "distance", "loss"]:
35 | if attack:
36 | loss = np.load("results/attacked/%d/individual_average_%s_loss_%s.npy" % (attackerNum, train_test, rule))
37 | acc = np.load("results/attacked/%d/individual_average_%s_acc_%s.npy" % (attackerNum, train_test, rule))
38 | else:
39 | loss = np.load("results/individual_average_%s_loss_%s.npy" % (train_test, rule))
40 | acc = np.load("results/individual_average_%s_acc_%s.npy" % (train_test, rule))
41 |
42 | # loss
43 | if attack:
44 | loss = np.delete(loss, attacker, axis=1)
45 | acc = np.delete(acc, attacker, axis=1)
46 |
47 | mean_loss = np.nanmean(loss, 1)
48 | max_loss = np.nanmax(loss, 1)
49 | min_loss = np.nanmin(loss, 1)
50 | variance_loss = np.nanvar(loss, 1)
51 |
52 | # accuracy
53 | mean_acc = np.nanmean(acc, 1)
54 | max_acc = np.nanmax(acc, 1)
55 | min_acc = np.nanmin(acc, 1)
56 | variance_acc = np.nanvar(acc, 1)
57 |
58 | # print("mean_loss_%s" % rule, locals()["mean_" + rule])
59 | # # print("variance_loss_%s" % task, locals()["variance_" + task])
60 | # print("min_loss_%s" % rule, locals()["min_" + rule])
61 | # print("max_loss_%s" % rule, locals()["max_" + rule])
62 |
63 | if rule == "no-cooperation":
64 | rule = "no_cooperation"
65 |
66 | if loss_or_acc == "loss":
67 | print_num(mean_loss, "mean_loss_%s" % rule, num=5)
68 | print_num(min_loss, "min_loss_%s" % rule, num=5)
69 | print_num(max_loss, "max_loss_%s" % rule, num=5)
70 |
71 | # print("mean_acc_%s" % rule, locals()["mean_" + rule + "_acc"])
72 | # # print("variance_acc_%s" % task, locals()["variance_" + task + "_acc"])
73 | # print("min_acc_%s" % rule, locals()["min_" + rule + "_acc"])
74 | # print("max_acc_%s" % rule, locals()["max_" + rule + "_acc"])
75 | else:
76 | print_num(mean_acc, "mean_acc_%s" % rule, num=5)
77 | print_num(min_acc, "min_acc_%s" % rule, num=5)
78 | print_num(max_acc, "max_acc_%s" % rule, num=5)
79 |
--------------------------------------------------------------------------------
/DigitClassification/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### Case Study - Digit Classification
3 | We consider a network of ten agents performing digit classification.
4 | Five of the ten agents have access to the MNIST dataset (group 1) and the other five have access to the synthetic dataset(group 2) that is composed by generated images of digits embedded on random backgrounds.
5 | All the images are preprocessed to be 28×28 grayscale images.
6 | We model each agent as a separate task and use a complete graph to model the network topology.
7 | An agent does not know which of its neighbors are performing the same task as the agent itself. We use a CNN model of the same architecture for each agent and cross-entropy-loss.
8 |
9 | ### Download Dataset
10 | - Synthetic_digits Dataset can be downloaded here: https://www.kaggle.com/prasunroy/synthetic-digits
11 | - Download the dataset, unzip it, rename the folder to "synthetic_digits", and put the folder under DigitClassification/
12 |
13 | ### MNIST and Synthetic Digits
14 |
15 |
16 | ### Instructions
17 | Tested on python 3.7
18 |
19 | - Run main.py to reproduce the results shown in the paper.
20 | - Run synthetic_images_visualization.py to generate the synthetic digit images.
21 |
22 | - In main.py, "rule" can be "no-cooperation", "loss", "distance", " average", as explained in the paper.
23 | - Set "attacker" as [] to simulate the attack-free case, and set it to be e.g., [2, 7], to simulate the case when agent 2 and agent 7 are attackers.
24 |
25 | ### Results
26 | Results show that the loss-based weight assignment rule outperforms all the other rules as well as the non-cooperative case,
27 | with respect to the mean and range of the average loss and accuracy, with and without the presence of Byzantine agents.
28 | Hence, our simulations imply that the loss-based weights have accurately learned the relationship among agents.
29 | Moreover, normal agents having a large regret in their estimation indeed benefit from cooperating with other agents having a small regret.
30 |
31 | - Average testing loss and accuracy for normal agents in group 1 (MNIST digit classification):
32 |
33 | - Average testing loss and accuracy for normal agents in group 2 (Synthetic digits classifcation):
34 |
35 |
36 | ### Cite the paper
37 | ```
38 | @inproceedings{neurips_2020_byzantineMTL,
39 | title={Byzantine Resilient Distributed Multi-Task Learning},
40 | author={Jiani Li and Waseem Abbas and Xenofon Koutsoukos},
41 | booktitle = {Thirty-fourth Conference on Neural Information Processing Systems (NeurIPS)},
42 | year = {2020}
43 | }
44 | ```
45 |
--------------------------------------------------------------------------------
/DigitClassification/synthetic_images_visualization.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | import torch
4 | import torch.nn as nn
5 | from torchvision import *
6 | from matplotlib import pyplot as plt
7 | from torch.utils.data import DataLoader
8 | import torch.nn.functional as F
9 | import pathlib
10 |
11 | # n_epochs = 3
12 | # batch_size_train = 64
13 | # batch_size_test = 1000
14 | # learning_rate = 0.01
15 | # momentum = 0.5
16 | # log_interval = 10
17 | #
18 | # random_seed = 1
19 | # torch.backends.cudnn.enabled = False
20 | # torch.manual_seed(random_seed)
21 | #
22 | # train_loader = torch.utils.data.DataLoader(
23 | # torchvision.datasets.MNIST('./mnist', train=True, download=True,
24 | # transform=torchvision.transforms.Compose([
25 | # torchvision.transforms.ToTensor(),
26 | # torchvision.transforms.Normalize(
27 | # (0.1307,), (0.3081,))
28 | # ])),
29 | # batch_size=batch_size_train, shuffle=True)
30 | #
31 | # test_loader = torch.utils.data.DataLoader(
32 | # torchvision.datasets.MNIST('./mnist', train=False, download=True,
33 | # transform=torchvision.transforms.Compose([
34 | # torchvision.transforms.ToTensor(),
35 | # torchvision.transforms.Normalize(
36 | # (0.1307,), (0.3081,))
37 | # ])),
38 | # batch_size=batch_size_test, shuffle=True)
39 | #
40 | # examples = enumerate(test_loader)
41 | # batch_idx, (example_data, example_targets) = next(examples)
42 | #
43 | # print(example_data.shape)
44 | #
45 | #
46 | # import matplotlib.pyplot as plt
47 | #
48 | # plt.figure(figsize=(4,3))
49 | # for i in range(30):
50 | # plt.subplot(3,10,i+1)
51 | # plt.tight_layout()
52 | # plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
53 | # #print(example_data[i][0])
54 | # #plt.title("Ground Truth: {}".format(example_targets[i]))
55 | # # plt.xticks([])
56 | # # plt.yticks([])
57 | # plt.axis('off')
58 | # plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
59 | # wspace=None, hspace=None)
60 | # plt.tight_layout()
61 | # plt.subplots_adjust(wspace=0, hspace=0)
62 | # plt.show()
63 |
64 | #---------------------------------------------------------------------
65 |
66 | transformtrain = transforms.Compose([
67 | transforms.Resize((28,28)),
68 | transforms.RandomHorizontalFlip(),
69 | #transforms.Grayscale(num_output_channels=1),
70 | transforms.ToTensor(),
71 | transforms.Normalize((0.5,), (0.5,))
72 | ])
73 |
74 | transformtest = transforms.Compose([
75 | transforms.Resize((28,28)),
76 | #transforms.Grayscale(num_output_channels=1),
77 | transforms.ToTensor(),
78 | transforms.Normalize((0.5, ), (0.5, ))
79 | ])
80 |
81 | train_data = datasets.ImageFolder('synthetic_digits/synthetic_digits/imgs_train', transform=transformtrain)
82 | test_data = datasets.ImageFolder('synthetic_digits/synthetic_digits/imgs_valid', transform=transformtest)
83 |
84 | test_loader = DataLoader(test_data, batch_size=64, shuffle=False)
85 |
86 | examples = enumerate(test_loader)
87 | batch_idx, (example_data, example_targets) = next(examples)
88 |
89 | print(example_data.shape)
90 |
91 |
92 | import matplotlib.pyplot as plt
93 |
94 | plt.figure(figsize=(4,3))
95 | for i in range(30):
96 | plt.subplot(3,10,i+1)
97 | plt.tight_layout()
98 | img = example_data[i]
99 | img_new = img.transpose(0,2)
100 | #print(img_new.shape)
101 | #img.view(28,28,3)
102 | plt.imshow(img_new)
103 | #print(example_data[i][0])
104 | #plt.title("Ground Truth: {}".format(example_targets[i]))
105 | # plt.xticks([])
106 | # plt.yticks([])
107 | plt.axis('off')
108 | plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
109 | wspace=None, hspace=None)
110 | plt.tight_layout()
111 | plt.subplots_adjust(wspace=0, hspace=0)
112 | plt.show()
113 |
--------------------------------------------------------------------------------
/DigitClassification/plot_results/plot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | #
5 | # no_coop = np.load("attacked_average_accuracy_no-cooperation.npy")
6 | # no_coop.tolist()
7 | # print(no_coop)
8 | #
9 | # ave = np.load("attacked_average_accuracy_average.npy")
10 | # ave.tolist()
11 | # print(ave)
12 | #
13 | # loss = np.load("attacked_average_accuracy_loss.npy")
14 | # loss.tolist()
15 | # print(loss)
16 | #
17 | # fig = plt.figure()
18 | # ax = fig.add_subplot(1, 1, 1)
19 | # ax.plot(no_coop, label="No cooperation")
20 | # ax.plot(ave, label="Average weights")
21 | # ax.plot(loss, label="Loss based weights")
22 | # plt.xlabel("Iteration", fontsize=15)
23 | # plt.ylabel("Average prediction accuracy", fontsize=15)
24 | # plt.legend(fontsize=15)
25 | # plt.show()
26 | # plt.savefig("prediction accuracy_comparison.eps")
27 | #
28 |
29 |
30 | no_coop = np.load("results/average_train_loss_no-cooperation.npy")
31 | no_coop_acc = np.load("results/average_train_acc_no-cooperation.npy")
32 |
33 | #no_coop = 20*np.log(np.load("results/average_train_loss_no-cooperation.npy"))
34 | no_coop.tolist()
35 | print("loss_no_coop", no_coop)
36 | print("acc_no_coop", no_coop_acc)
37 |
38 | ave = np.load("results/average_train_loss_average.npy")
39 | ave_acc = np.load("results/average_train_acc_average.npy")
40 | #ave = 20*np.log(np.load("results/average_train_loss_average.npy"))
41 |
42 | ave.tolist()
43 | print("loss_ave", ave)
44 | print("acc_ave", ave_acc)
45 |
46 | loss = np.load("results/average_train_loss_loss.npy")
47 | loss_acc = np.load("results/average_train_acc_loss.npy")
48 | #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
49 | loss.tolist()
50 | print("loss_loss", loss)
51 | print("acc_loss", loss_acc)
52 |
53 | distance_loss = np.load("results/average_train_loss_distance.npy")
54 | distance_loss_acc = np.load("results/average_train_acc_distance.npy")
55 | #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
56 | distance_loss.tolist()
57 | print("distance_loss_loss", distance_loss)
58 | print("distance_acc_loss", distance_loss_acc)
59 |
60 | # reverse_loss = np.load("results/average_train_loss_distanceLoss.npy")
61 | # reverse_loss_acc = np.load("results/average_train_acc_distanceLoss.npy")
62 | # #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
63 | # reverse_loss.tolist()
64 | # print(reverse_loss)
65 |
66 | no_coop_test = np.load("results/average_test_loss_no-cooperation.npy")
67 | no_coop_acc_test = np.load("results/average_test_acc_no-cooperation.npy")
68 |
69 | #no_coop_test = 20*np.log(np.load("results/average_test_loss_no-cooperation.npy"))
70 | no_coop_test.tolist()
71 | print("loss_no_coop_test", no_coop_test)
72 | print("acc_no_coop_test", no_coop_acc_test)
73 |
74 | ave_test = np.load("results/average_test_loss_average.npy")
75 | ave_acc_test = np.load("results/average_test_acc_average.npy")
76 |
77 | ave_test.tolist()
78 | print("loss_ave_test", ave_test)
79 | print("acc_ave_test", ave_acc_test)
80 |
81 | loss_test = np.load("results/average_test_loss_loss.npy")
82 | loss_acc_test = np.load("results/average_test_acc_loss.npy")
83 | #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
84 | loss_test.tolist()
85 | print("loss_loss_test", loss_test)
86 | print("acc_loss_test", loss_acc_test)
87 |
88 | distance_loss_test = np.load("results/average_test_loss_distance.npy")
89 | distance_loss_acc_test = np.load("results/average_test_acc_distance.npy")
90 |
91 | #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
92 | distance_loss_test.tolist()
93 | print("distance_loss_loss_test", distance_loss_test)
94 | print("distance_acc_loss_test", distance_loss_acc_test)
95 |
96 |
97 | # reverse_loss_test = np.load("results/average_test_loss_distanceLoss.npy")
98 | # reverse_loss_acc_test = np.load("results/average_test_acc_distanceLoss.npy")
99 | # #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
100 | # reverse_loss_test.tolist()
101 | # print(reverse_loss_test)
102 |
103 | fig = plt.figure(figsize=(10,2.5))
104 | ax = fig.add_subplot(1, 4, 1)
105 | ax.plot(no_coop, label="No cooperation")
106 | ax.plot(ave, label="Average weights")
107 | ax.plot(loss, label="Loss based weights")
108 | ax.plot(distance_loss, label="distance Loss based weights")
109 | #ax.plot(reverse_loss, label="distance Loss based weights")
110 | plt.xlabel("Epoch", fontsize=15)
111 | plt.ylabel("Average training loss", fontsize=15)
112 | plt.legend(fontsize=15)
113 |
114 | ax = fig.add_subplot(1, 4, 2)
115 | ax.plot(no_coop_acc, label="No cooperation")
116 | ax.plot(ave_acc, label="Average weights")
117 | ax.plot(loss_acc, label="Loss based weights")
118 | ax.plot(distance_loss_acc, label="distance Loss based weights")
119 | plt.xlabel("Epoch", fontsize=15)
120 | plt.ylabel("Average training accuracy", fontsize=15)
121 | # plt.legend(fontsize=15)
122 |
123 | ax = fig.add_subplot(1, 4, 3)
124 | ax.plot(no_coop_test, label="No cooperation")
125 | ax.plot(ave_test, label="Average weights")
126 | ax.plot(loss_test, label="Loss based weights")
127 | ax.plot(distance_loss_test, label="distance Loss based weights")
128 | plt.xlabel("Epoch", fontsize=15)
129 | plt.ylabel("Average testing loss", fontsize=15)
130 | # plt.legend(fontsize=15)
131 |
132 | ax = fig.add_subplot(1, 4, 4)
133 | ax.plot(no_coop_acc_test, label="No cooperation")
134 | ax.plot(ave_acc_test, label="Average weights")
135 | ax.plot(loss_acc_test, label="Loss based weights")
136 | ax.plot(distance_loss_acc_test, label="distance Loss based weights")
137 | plt.xlabel("Epoch", fontsize=15)
138 | plt.ylabel("Average testing accuracy", fontsize=15)
139 | # plt.legend(fontsize=15)
140 |
141 | plt.show()
142 | plt.savefig("prediction accuracy_comparison.eps")
143 |
144 | #
145 | #
146 | # # --------------------- Under attack below -----------------------
147 | #
148 | no_coop = np.load("results/attacked/average_train_loss_no-cooperation.npy")
149 | no_coop_acc = np.load("results/attacked/average_train_acc_no-cooperation.npy")
150 |
151 | #no_coop = 20*np.log(np.load("results/attacked/average_train_loss_no-cooperation.npy"))
152 | no_coop.tolist()
153 | print("loss-no_coop", no_coop)
154 | print("acc-no_coop", no_coop_acc)
155 |
156 |
157 | ave = np.load("results/attacked/average_train_loss_average.npy")
158 | ave_acc = np.load("results/attacked/average_train_acc_average.npy")
159 |
160 | ave.tolist()
161 | print("loss-ave", ave)
162 | print("acc-ave", ave_acc)
163 |
164 | loss = np.load("results/attacked/average_train_loss_loss.npy")
165 | loss_acc = np.load("results/attacked/average_train_acc_loss.npy")
166 | #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
167 | loss.tolist()
168 | print("loss-loss", loss)
169 | print("acc-loss", loss_acc)
170 |
171 | distance_loss = np.load("results/attacked/average_train_loss_distanceLoss.npy")
172 | distance_loss_acc = np.load("results/attacked/average_train_acc_distanceLoss.npy")
173 | #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
174 | distance_loss.tolist()
175 | print("distance_loss-loss", distance_loss)
176 | print("distance_acc-loss", distance_loss_acc)
177 |
178 |
179 | # reverse_loss = np.load("results/attacked/average_train_loss_distanceLoss.npy")
180 | # reverse_loss_acc = np.load("results/attacked/average_train_acc_distanceLoss.npy")
181 | # #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
182 | # reverse_loss.tolist()
183 | # print(reverse_loss)
184 |
185 | no_coop_test = np.load("results/attacked/average_test_loss_no-cooperation.npy")
186 | no_coop_acc_test = np.load("results/attacked/average_test_acc_no-cooperation.npy")
187 |
188 | #no_coop_test = 20*np.log(np.load("results/attacked/average_test_loss_no-cooperation.npy"))
189 | no_coop_test.tolist()
190 | print("loss-no_coop_test", no_coop_test)
191 | print("acc-no_coop_test", no_coop_acc_test)
192 |
193 |
194 | ave_test = np.load("results/attacked/average_test_loss_average.npy")
195 | ave_acc_test = np.load("results/attacked/average_test_acc_average.npy")
196 |
197 | ave_test.tolist()
198 | print("loss-ave_test",ave_test)
199 | print("acc-ave_test",ave_acc_test)
200 |
201 | loss_test = np.load("results/attacked/average_test_loss_loss.npy")
202 | loss_acc_test = np.load("results/attacked/average_test_acc_loss.npy")
203 | #loss_test = 20*np.log(np.load("results/attacked/average_test_loss_loss.npy"))
204 | loss_test.tolist()
205 | print("loss-loss_test",loss_test)
206 | print("acc-loss_test",loss_acc_test)
207 |
208 | distance_loss_test = np.load("results/attacked/average_test_loss_distanceLoss.npy")
209 | distance_loss_acc_test = np.load("results/attacked/average_test_acc_distanceLoss.npy")
210 | #loss_test = 20*np.log(np.load("results/attacked/average_test_loss_loss.npy"))
211 | distance_loss_test.tolist()
212 | print("distance_loss-loss_test",distance_loss_test)
213 | print("distance_acc-loss_test",distance_loss_acc_test)
214 |
215 |
216 |
217 | fig = plt.figure(figsize=(10,2.5))
218 | ax = fig.add_subplot(1, 4, 1)
219 | ax.plot(no_coop, label="No cooperation")
220 | ax.plot(ave, label="Average weights")
221 | ax.plot(loss, label="Loss based weights")
222 | ax.plot(distance_loss, label="distance Loss based weights")
223 | plt.xlabel("Epoch", fontsize=15)
224 | plt.ylabel("Average training loss", fontsize=15)
225 | plt.legend(fontsize=15)
226 |
227 | ax = fig.add_subplot(1, 4, 2)
228 | ax.plot(no_coop_acc, label="No cooperation")
229 | ax.plot(ave_acc, label="Average weights")
230 | ax.plot(loss_acc, label="Loss based weights")
231 | ax.plot(distance_loss_acc, label="distance Loss based weights")
232 | plt.xlabel("Epoch", fontsize=15)
233 | plt.ylabel("Average training accuracy", fontsize=15)
234 | # plt.legend(fontsize=15)
235 |
236 | ax = fig.add_subplot(1, 4, 3)
237 | ax.plot(no_coop_test, label="No cooperation")
238 | ax.plot(ave_test, label="Average weights")
239 | ax.plot(loss_test, label="Loss based weights")
240 | ax.plot(distance_loss_test, label="distance Loss based weights")
241 | plt.xlabel("Epoch", fontsize=15)
242 | plt.ylabel("Average testing loss", fontsize=15)
243 | # plt.legend(fontsize=15)
244 |
245 | ax = fig.add_subplot(1, 4, 4)
246 | ax.plot(no_coop_acc_test, label="No cooperation")
247 | ax.plot(ave_acc_test, label="Average weights")
248 | ax.plot(loss_acc_test, label="Loss based weights")
249 | ax.plot(distance_loss_acc_test, label="distance Loss based weights")
250 | plt.xlabel("Epoch", fontsize=15)
251 | plt.ylabel("Average testing accuracy", fontsize=15)
252 | # plt.legend(fontsize=15)
253 |
254 | plt.show()
255 | plt.savefig("attacked_prediction accuracy_comparison.eps")
256 |
--------------------------------------------------------------------------------
/HumanActivityRecog/plot_results/plot_individual.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import itertools
4 |
5 |
6 | def print_num(l, string, num=5):
7 | print(string+"=[", end=' ')
8 | for i in range(len(l)):
9 | print(l[i], end=' ')
10 | if (i+1) % num == 0:
11 | print()
12 | print("]")
13 |
14 | # no_coop = np.load("attacked_average_accuracy_no-cooperation.npy")
15 | # no_coop.tolist()
16 | # print(no_coop)
17 | #
18 | # ave = np.load("attacked_average_accuracy_average.npy")
19 | # ave.tolist()
20 | # print(ave)
21 | #
22 | # loss = np.load("attacked_average_accuracy_loss.npy")
23 | # loss.tolist()
24 | # print(loss)
25 | #
26 | # fig = plt.figure()
27 | # ax = fig.add_subplot(1, 1, 1)
28 | # ax.plot(no_coop, label="No cooperation")
29 | # ax.plot(ave, label="Average weights")
30 | # ax.plot(loss, label="Loss based weights")
31 | # plt.xlabel("Iteration", fontsize=15)
32 | # plt.ylabel("Average prediction accuracy", fontsize=15)
33 | # plt.legend(fontsize=15)
34 | # plt.show()
35 | # plt.savefig("prediction accuracy_comparison.eps")
36 | #
37 |
38 |
39 | no_coop = np.load("results/average_train_loss_no-cooperation.npy")
40 | no_coop_acc = np.load("results/average_train_acc_no-cooperation.npy")
41 | #no_coop = 20*np.log(np.load("results/average_train_loss_no-cooperation.npy"))
42 | no_coop.tolist()
43 | #print(no_coop)
44 | # print(no_coop_acc)
45 | print_num(no_coop, "no_coop", num=5)
46 |
47 | ave = np.load("results/average_train_loss_average.npy")
48 | ave_acc = np.load("results/average_train_acc_average.npy")
49 | #ave = 20*np.log(np.load("results/average_train_loss_average.npy"))
50 |
51 | ave.tolist()
52 | # print(ave)
53 | print(ave_acc)
54 | loss = np.load("results/average_train_loss_loss.npy")
55 | loss_acc = np.load("results/average_train_acc_loss.npy")
56 |
57 | #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
58 | loss.tolist()
59 | # print(loss)
60 | print(loss_acc)
61 |
62 | distance = np.load("results/average_train_loss_distance.npy")
63 | distance_acc = np.load("results/average_train_acc_distance.npy")
64 | #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
65 | distance.tolist()
66 | # print(loss)
67 | print(distance_acc)
68 |
69 |
70 | # reverse_loss = np.load("results/average_train_loss_reversedLoss.npy")
71 | # reverse_loss_acc = np.load("results/average_train_acc_reversedLoss.npy")
72 | # #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
73 | # reverse_loss.tolist()
74 | # print(reverse_loss)
75 |
76 | no_coop_test = np.load("results/average_test_loss_no-cooperation.npy")
77 | no_coop_acc_test = np.load("results/average_test_acc_no-cooperation.npy")
78 |
79 | #no_coop_test = 20*np.log(np.load("results/average_test_loss_no-cooperation.npy"))
80 | no_coop_test.tolist()
81 | # print(no_coop_test)
82 | print(no_coop_acc_test)
83 |
84 | ave_test = np.load("results/average_test_loss_average.npy")
85 | ave_acc_test = np.load("results/average_test_acc_average.npy")
86 |
87 | ave_test.tolist()
88 | # print(ave_test)
89 | print(ave_acc_test)
90 | loss_test = np.load("results/average_test_loss_loss.npy")
91 | loss_acc_test = np.load("results/average_test_acc_loss.npy")
92 |
93 | #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
94 | loss_test.tolist()
95 | # print(loss_test)
96 | print(loss_acc_test)
97 |
98 | distance_test = np.load("results/average_test_loss_distance.npy")
99 | distance_acc_test = np.load("results/average_test_acc_distance.npy")
100 |
101 | #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
102 | distance_test.tolist()
103 | # print(loss_test)
104 | print(distance_acc_test)
105 |
106 |
107 | # reverse_loss_test = np.load("results/average_test_loss_reversedLoss.npy")
108 | # reverse_loss_acc_test = np.load("results/average_test_acc_reversedLoss.npy")
109 | # #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
110 | # reverse_loss_test.tolist()
111 | # print(reverse_loss_test)
112 |
113 | fig = plt.figure(figsize=(10,2.5))
114 | ax = fig.add_subplot(1, 4, 1)
115 | ax.plot(no_coop, label="No cooperation")
116 | ax.plot(ave, label="Average weights")
117 | ax.plot(loss, label="Loss based weights")
118 | ax.plot(distance, label="Distance based weights")
119 | #ax.plot(reverse_loss, label="Reversed Loss based weights")
120 | plt.xlabel("Epoch", fontsize=15)
121 | plt.ylabel("Average training loss", fontsize=15)
122 | plt.legend(fontsize=15)
123 |
124 | ax = fig.add_subplot(1, 4, 2)
125 | ax.plot(no_coop_acc, label="No cooperation")
126 | ax.plot(ave_acc, label="Average weights")
127 | ax.plot(loss_acc, label="Loss based weights")
128 | ax.plot(distance_acc, label="Distance based weights")
129 | #ax.plot(reverse_loss_acc, label="Reversed Loss based weights")
130 | plt.xlabel("Epoch", fontsize=15)
131 | plt.ylabel("Average training accuracy", fontsize=15)
132 | # plt.legend(fontsize=15)
133 |
134 | ax = fig.add_subplot(1, 4, 3)
135 | ax.plot(no_coop_test, label="No cooperation")
136 | ax.plot(ave_test, label="Average weights")
137 | ax.plot(loss_test, label="Loss based weights")
138 | ax.plot(distance_test, label="Distance based weights")
139 | #ax.plot(reverse_loss_test, label="Reversed Loss based weights")
140 | plt.xlabel("Epoch", fontsize=15)
141 | plt.ylabel("Average testing loss", fontsize=15)
142 | # plt.legend(fontsize=15)
143 |
144 | ax = fig.add_subplot(1, 4, 4)
145 | ax.plot(no_coop_acc_test, label="No cooperation")
146 | ax.plot(ave_acc_test, label="Average weights")
147 | ax.plot(loss_acc_test, label="Loss based weights")
148 | ax.plot(distance_acc_test, label="Distance based weights")
149 | #ax.plot(reverse_loss_acc_test, label="Reversed Loss based weights")
150 | plt.xlabel("Epoch", fontsize=15)
151 | plt.ylabel("Average testing accuracy", fontsize=15)
152 | # plt.legend(fontsize=15)
153 |
154 | plt.show()
155 | plt.savefig("prediction accuracy_comparison.eps")
156 |
157 |
158 |
159 | # --------------------- Under attack below -----------------------
160 |
161 | no_coop = np.load("results/attacked/average_train_loss_no-cooperation.npy")
162 | no_coop_acc = np.load("results/attacked/average_train_acc_no-cooperation.npy")
163 |
164 | #no_coop = 20*np.log(np.load("results/attacked/average_train_loss_no-cooperation.npy"))
165 | no_coop.tolist()
166 | print("loss-no_coop", no_coop)
167 | print("acc-no_coop", no_coop_acc)
168 |
169 |
170 | ave = np.load("results/attacked/average_train_loss_average.npy")
171 | ave_acc = np.load("results/attacked/average_train_acc_average.npy")
172 |
173 | ave.tolist()
174 | print("loss-ave", ave)
175 | print("acc-ave", ave_acc)
176 |
177 | loss = np.load("results/attacked/average_train_loss_loss.npy")
178 | loss_acc = np.load("results/attacked/average_train_acc_loss.npy")
179 | #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
180 | loss.tolist()
181 | print("loss-loss", loss)
182 | print("acc-loss", loss_acc)
183 |
184 | distance = np.load("results/attacked/average_train_loss_distance.npy")
185 | distance_acc = np.load("results/attacked/average_train_acc_distance.npy")
186 |
187 | #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
188 | distance.tolist()
189 | print("loss-loss", distance)
190 | print("acc-loss", distance_acc)
191 |
192 |
193 |
194 | # reverse_loss = np.load("results/attacked/average_train_loss_reversedLoss.npy")
195 | # reverse_loss_acc = np.load("results/attacked/average_train_acc_reversedLoss.npy")
196 | # #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
197 | # reverse_loss.tolist()
198 | # print(reverse_loss)
199 |
200 | no_coop_test = np.load("results/attacked/average_test_loss_no-cooperation.npy")
201 | no_coop_acc_test = np.load("results/attacked/average_test_acc_no-cooperation.npy")
202 |
203 | #no_coop_test = 20*np.log(np.load("results/attacked/average_test_loss_no-cooperation.npy"))
204 | no_coop_test.tolist()
205 | print("loss-no_coop_test", no_coop_test)
206 | print("acc-no_coop_test", no_coop_acc_test)
207 |
208 |
209 | ave_test = np.load("results/attacked/average_test_loss_average.npy")
210 | ave_acc_test = np.load("results/attacked/average_test_acc_average.npy")
211 |
212 | ave_test.tolist()
213 | print("loss-ave_test",ave_test)
214 | print("acc-ave_test",ave_acc_test)
215 |
216 | loss_test = np.load("results/attacked/average_test_loss_loss.npy")
217 | loss_acc_test = np.load("results/attacked/average_test_acc_loss.npy")
218 | #loss_test = 20*np.log(np.load("results/attacked/average_test_loss_loss.npy"))
219 | loss_test.tolist()
220 | print("loss-loss_test",loss_test)
221 | print("acc-loss_test",loss_acc_test)
222 |
223 |
224 | distance_test = np.load("results/attacked/average_test_loss_distance.npy")
225 | distance_acc_test = np.load("results/attacked/average_test_acc_distance.npy")
226 | #loss_test = 20*np.log(np.load("results/attacked/average_test_loss_loss.npy"))
227 | distance_test.tolist()
228 | print("distance-loss_test",distance_test)
229 | print("acc-distance_test",distance_acc_test)
230 |
231 |
232 | fig = plt.figure(figsize=(10,2.5))
233 | ax = fig.add_subplot(1, 4, 1)
234 | ax.plot(no_coop, label="No cooperation")
235 | ax.plot(ave, label="Average weights")
236 | ax.plot(loss, label="Loss based weights")
237 | ax.plot(distance, label="Distance based weights")
238 | #ax.plot(reverse_loss, label="Reversed Loss based weights")
239 | plt.xlabel("Epoch", fontsize=15)
240 | plt.ylabel("Average training loss", fontsize=15)
241 | plt.legend(fontsize=15)
242 |
243 | ax = fig.add_subplot(1, 4, 2)
244 | ax.plot(no_coop_acc, label="No cooperation")
245 | ax.plot(ave_acc, label="Average weights")
246 | ax.plot(loss_acc, label="Loss based weights")
247 | ax.plot(distance_acc, label="Distance based weights")
248 | #ax.plot(reverse_loss_acc, label="Reversed Loss based weights")
249 | plt.xlabel("Epoch", fontsize=15)
250 | plt.ylabel("Average training accuracy", fontsize=15)
251 | # plt.legend(fontsize=15)
252 |
253 | ax = fig.add_subplot(1, 4, 3)
254 | ax.plot(no_coop_test, label="No cooperation")
255 | ax.plot(ave_test, label="Average weights")
256 | ax.plot(loss_test, label="Loss based weights")
257 | ax.plot(distance_test, label="Distance based weights")
258 | #ax.plot(reverse_loss_test, label="Reversed Loss based weights")
259 | plt.xlabel("Epoch", fontsize=15)
260 | plt.ylabel("Average testing loss", fontsize=15)
261 | # plt.legend(fontsize=15)
262 |
263 | ax = fig.add_subplot(1, 4, 4)
264 | ax.plot(no_coop_acc_test, label="No cooperation")
265 | ax.plot(ave_acc_test, label="Average weights")
266 | ax.plot(loss_acc_test, label="Loss based weights")
267 | ax.plot(distance_acc_test, label="Distance based weights")
268 | #ax.plot(reverse_loss_acc_test, label="Reversed Loss based weights")
269 | plt.xlabel("Epoch", fontsize=15)
270 | plt.ylabel("Average testing accuracy", fontsize=15)
271 | # plt.legend(fontsize=15)
272 |
273 | plt.show()
274 | plt.savefig("attacked_prediction accuracy_comparison.eps")
275 |
--------------------------------------------------------------------------------
/HumanActivityRecog/plot_results/plot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import itertools
4 |
5 |
6 | def print_num(l, string, num=5):
7 | print(string+"=[", end=' ')
8 | for i in range(len(l)):
9 | print(l[i], end=' ')
10 | if (i+1) % num == 0:
11 | print()
12 | print("]")
13 |
14 | # no_coop = np.load("attacked_average_accuracy_no-cooperation.npy")
15 | # no_coop.tolist()
16 | # print(no_coop)
17 | #
18 | # ave = np.load("attacked_average_accuracy_average.npy")
19 | # ave.tolist()
20 | # print(ave)
21 | #
22 | # loss = np.load("attacked_average_accuracy_loss.npy")
23 | # loss.tolist()
24 | # print(loss)
25 | #
26 | # fig = plt.figure()
27 | # ax = fig.add_subplot(1, 1, 1)
28 | # ax.plot(no_coop, label="No cooperation")
29 | # ax.plot(ave, label="Average weights")
30 | # ax.plot(loss, label="Loss based weights")
31 | # plt.xlabel("Iteration", fontsize=15)
32 | # plt.ylabel("Average prediction accuracy", fontsize=15)
33 | # plt.legend(fontsize=15)
34 | # plt.show()
35 | # plt.savefig("prediction accuracy_comparison.eps")
36 | #
37 |
38 |
39 | no_coop = np.load("results/average_train_loss_no-cooperation.npy")
40 | no_coop_acc = np.load("results/average_train_acc_no-cooperation.npy")
41 | #no_coop = 20*np.log(np.load("results/average_train_loss_no-cooperation.npy"))
42 | no_coop.tolist()
43 | #print(no_coop)
44 | # print(no_coop_acc)
45 | print_num(no_coop, "no_coop", num=5)
46 |
47 | ave = np.load("results/average_train_loss_average.npy")
48 | ave_acc = np.load("results/average_train_acc_average.npy")
49 | #ave = 20*np.log(np.load("results/average_train_loss_average.npy"))
50 |
51 | ave.tolist()
52 | # print(ave)
53 | print(ave_acc)
54 | loss = np.load("results/average_train_loss_loss.npy")
55 | loss_acc = np.load("results/average_train_acc_loss.npy")
56 |
57 | #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
58 | loss.tolist()
59 | # print(loss)
60 | print(loss_acc)
61 |
62 | distance = np.load("results/average_train_loss_distance.npy")
63 | distance_acc = np.load("results/average_train_acc_distance.npy")
64 | #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
65 | distance.tolist()
66 | # print(loss)
67 | print(distance_acc)
68 |
69 |
70 | # reverse_loss = np.load("results/average_train_loss_reversedLoss.npy")
71 | # reverse_loss_acc = np.load("results/average_train_acc_reversedLoss.npy")
72 | # #loss = 20*np.log(np.load("results/average_train_loss_loss.npy"))
73 | # reverse_loss.tolist()
74 | # print(reverse_loss)
75 |
76 | no_coop_test = np.load("results/average_test_loss_no-cooperation.npy")
77 | no_coop_acc_test = np.load("results/average_test_acc_no-cooperation.npy")
78 |
79 | #no_coop_test = 20*np.log(np.load("results/average_test_loss_no-cooperation.npy"))
80 | no_coop_test.tolist()
81 | # print(no_coop_test)
82 | print(no_coop_acc_test)
83 |
84 | ave_test = np.load("results/average_test_loss_average.npy")
85 | ave_acc_test = np.load("results/average_test_acc_average.npy")
86 |
87 | ave_test.tolist()
88 | # print(ave_test)
89 | print(ave_acc_test)
90 | loss_test = np.load("results/average_test_loss_loss.npy")
91 | loss_acc_test = np.load("results/average_test_acc_loss.npy")
92 |
93 | #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
94 | loss_test.tolist()
95 | # print(loss_test)
96 | print(loss_acc_test)
97 |
98 | distance_test = np.load("results/average_test_loss_distance.npy")
99 | distance_acc_test = np.load("results/average_test_acc_distance.npy")
100 |
101 | #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
102 | distance_test.tolist()
103 | # print(loss_test)
104 | print(distance_acc_test)
105 |
106 |
107 | # reverse_loss_test = np.load("results/average_test_loss_reversedLoss.npy")
108 | # reverse_loss_acc_test = np.load("results/average_test_acc_reversedLoss.npy")
109 | # #loss_test = 20*np.log(np.load("results/average_test_loss_loss.npy"))
110 | # reverse_loss_test.tolist()
111 | # print(reverse_loss_test)
112 |
113 | fig = plt.figure(figsize=(10,2.5))
114 | ax = fig.add_subplot(1, 4, 1)
115 | ax.plot(no_coop, label="No cooperation")
116 | ax.plot(ave, label="Average weights")
117 | ax.plot(loss, label="Loss based weights")
118 | ax.plot(distance, label="Distance based weights")
119 | #ax.plot(reverse_loss, label="Reversed Loss based weights")
120 | plt.xlabel("Epoch", fontsize=15)
121 | plt.ylabel("Average training loss", fontsize=15)
122 | plt.legend(fontsize=15)
123 |
124 | ax = fig.add_subplot(1, 4, 2)
125 | ax.plot(no_coop_acc, label="No cooperation")
126 | ax.plot(ave_acc, label="Average weights")
127 | ax.plot(loss_acc, label="Loss based weights")
128 | ax.plot(distance_acc, label="Distance based weights")
129 | #ax.plot(reverse_loss_acc, label="Reversed Loss based weights")
130 | plt.xlabel("Epoch", fontsize=15)
131 | plt.ylabel("Average training accuracy", fontsize=15)
132 | # plt.legend(fontsize=15)
133 |
134 | ax = fig.add_subplot(1, 4, 3)
135 | ax.plot(no_coop_test, label="No cooperation")
136 | ax.plot(ave_test, label="Average weights")
137 | ax.plot(loss_test, label="Loss based weights")
138 | ax.plot(distance_test, label="Distance based weights")
139 | #ax.plot(reverse_loss_test, label="Reversed Loss based weights")
140 | plt.xlabel("Epoch", fontsize=15)
141 | plt.ylabel("Average testing loss", fontsize=15)
142 | # plt.legend(fontsize=15)
143 |
144 | ax = fig.add_subplot(1, 4, 4)
145 | ax.plot(no_coop_acc_test, label="No cooperation")
146 | ax.plot(ave_acc_test, label="Average weights")
147 | ax.plot(loss_acc_test, label="Loss based weights")
148 | ax.plot(distance_acc_test, label="Distance based weights")
149 | #ax.plot(reverse_loss_acc_test, label="Reversed Loss based weights")
150 | plt.xlabel("Epoch", fontsize=15)
151 | plt.ylabel("Average testing accuracy", fontsize=15)
152 | # plt.legend(fontsize=15)
153 |
154 | plt.show()
155 | plt.savefig("prediction accuracy_comparison.eps")
156 |
157 |
158 |
159 | # --------------------- Under attack below -----------------------
160 |
161 | attacker_num = 10
162 |
163 | no_coop = np.load("results/attacked/%d/average_train_loss_no-cooperation.npy" % attacker_num)
164 | no_coop_acc = np.load("results/attacked/%d/average_train_acc_no-cooperation.npy" % attacker_num)
165 |
166 | #no_coop = 20*np.log(np.load("results/attacked/average_train_loss_no-cooperation.npy"))
167 | no_coop.tolist()
168 | print("loss-no_coop", no_coop)
169 | print("acc-no_coop", no_coop_acc)
170 |
171 |
172 | ave = np.load("results/attacked/%d/average_train_loss_average.npy" % attacker_num)
173 | ave_acc = np.load("results/attacked/%d/average_train_acc_average.npy" % attacker_num)
174 |
175 | ave.tolist()
176 | print("loss-ave", ave)
177 | print("acc-ave", ave_acc)
178 |
179 | loss = np.load("results/attacked/%d/average_train_loss_loss.npy" % attacker_num)
180 | loss_acc = np.load("results/attacked/%d/average_train_acc_loss.npy" % attacker_num)
181 | #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
182 | loss.tolist()
183 | print("loss-loss", loss)
184 | print("acc-loss", loss_acc)
185 |
186 | distance = np.load("results/attacked/%d/average_train_loss_distance.npy" % attacker_num)
187 | distance_acc = np.load("results/attacked/%d/average_train_acc_distance.npy" % attacker_num)
188 |
189 | #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
190 | distance.tolist()
191 | print("loss-loss", distance)
192 | print("acc-loss", distance_acc)
193 |
194 |
195 |
196 | # reverse_loss = np.load("results/attacked/average_train_loss_reversedLoss.npy")
197 | # reverse_loss_acc = np.load("results/attacked/average_train_acc_reversedLoss.npy")
198 | # #loss = 20*np.log(np.load("results/attacked/average_train_loss_loss.npy"))
199 | # reverse_loss.tolist()
200 | # print(reverse_loss)
201 |
202 | no_coop_test = np.load("results/attacked/%d/average_test_loss_no-cooperation.npy" % attacker_num)
203 | no_coop_acc_test = np.load("results/attacked/%d/average_test_acc_no-cooperation.npy" % attacker_num)
204 |
205 | #no_coop_test = 20*np.log(np.load("results/attacked/average_test_loss_no-cooperation.npy"))
206 | no_coop_test.tolist()
207 | print("loss-no_coop_test", no_coop_test)
208 | print("acc-no_coop_test", no_coop_acc_test)
209 |
210 |
211 | ave_test = np.load("results/attacked/%d/average_test_loss_average.npy" % attacker_num)
212 | ave_acc_test = np.load("results/attacked/%d/average_test_acc_average.npy" % attacker_num)
213 |
214 | ave_test.tolist()
215 | print("loss-ave_test",ave_test)
216 | print("acc-ave_test",ave_acc_test)
217 |
218 | loss_test = np.load("results/attacked/%d/average_test_loss_loss.npy" % attacker_num)
219 | loss_acc_test = np.load("results/attacked/%d/average_test_acc_loss.npy" % attacker_num)
220 | #loss_test = 20*np.log(np.load("results/attacked/average_test_loss_loss.npy"))
221 | loss_test.tolist()
222 | print("loss-loss_test",loss_test)
223 | print("acc-loss_test",loss_acc_test)
224 |
225 |
226 | distance_test = np.load("results/attacked/%d/average_test_loss_distance.npy" % attacker_num)
227 | distance_acc_test = np.load("results/attacked/%d/average_test_acc_distance.npy" % attacker_num)
228 | #loss_test = 20*np.log(np.load("results/attacked/average_test_loss_loss.npy"))
229 | distance_test.tolist()
230 | print("distance-loss_test",distance_test)
231 | print("acc-distance_test",distance_acc_test)
232 |
233 |
234 | fig = plt.figure(figsize=(10,2.5))
235 | ax = fig.add_subplot(1, 4, 1)
236 | ax.plot(no_coop, label="No cooperation")
237 | ax.plot(ave, label="Average weights")
238 | ax.plot(loss, label="Loss based weights")
239 | ax.plot(distance, label="Distance based weights")
240 | #ax.plot(reverse_loss, label="Reversed Loss based weights")
241 | plt.xlabel("Epoch", fontsize=15)
242 | plt.ylabel("Average training loss", fontsize=15)
243 | plt.legend(fontsize=15)
244 |
245 | ax = fig.add_subplot(1, 4, 2)
246 | ax.plot(no_coop_acc, label="No cooperation")
247 | ax.plot(ave_acc, label="Average weights")
248 | ax.plot(loss_acc, label="Loss based weights")
249 | ax.plot(distance_acc, label="Distance based weights")
250 | #ax.plot(reverse_loss_acc, label="Reversed Loss based weights")
251 | plt.xlabel("Epoch", fontsize=15)
252 | plt.ylabel("Average training accuracy", fontsize=15)
253 | # plt.legend(fontsize=15)
254 |
255 | ax = fig.add_subplot(1, 4, 3)
256 | ax.plot(no_coop_test, label="No cooperation")
257 | ax.plot(ave_test, label="Average weights")
258 | ax.plot(loss_test, label="Loss based weights")
259 | ax.plot(distance_test, label="Distance based weights")
260 | #ax.plot(reverse_loss_test, label="Reversed Loss based weights")
261 | plt.xlabel("Epoch", fontsize=15)
262 | plt.ylabel("Average testing loss", fontsize=15)
263 | # plt.legend(fontsize=15)
264 |
265 | ax = fig.add_subplot(1, 4, 4)
266 | ax.plot(no_coop_acc_test, label="No cooperation")
267 | ax.plot(ave_acc_test, label="Average weights")
268 | ax.plot(loss_acc_test, label="Loss based weights")
269 | ax.plot(distance_acc_test, label="Distance based weights")
270 | #ax.plot(reverse_loss_acc_test, label="Reversed Loss based weights")
271 | plt.xlabel("Epoch", fontsize=15)
272 | plt.ylabel("Average testing accuracy", fontsize=15)
273 | # plt.legend(fontsize=15)
274 |
275 | plt.show()
276 | plt.savefig("attacked_prediction accuracy_comparison.eps")
277 |
--------------------------------------------------------------------------------
/TargetLocalization/single_task.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 | import numpy as np
4 | from matplotlib import rc
5 | from scipy.spatial.distance import cdist, euclidean
6 | from utils import *
7 | from collections import defaultdict
8 |
9 |
10 | def noncooperative_learn(i, numAgents, w0, x, vu, u, vd, d, psi, w, mu_k, q, attackers, psi_a):
11 | for k in range(numAgents):
12 | dist = w0.distance(x[k])
13 | unit = [(w0.x - x[k].x) / dist, (w0.y - x[k].y) / dist]
14 | u[:, k] = unit + vu[i, k]
15 | d[k] = np.dot([w0.x - x[k].x, w0.y - x[k].y], u[:, k].T) + vd[i, k]
16 | q[:, k] = [x[k].x, x[k].y] + d[k] * u[:, k]
17 | a = 0
18 | for k in range(numAgents):
19 | if k not in attackers:
20 | # target estimation
21 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
22 | w[:, k] = psi[:, k]
23 | else:
24 | w[:, k] = psi_a[:, a]
25 | a += 1
26 |
27 |
28 | return w
29 |
30 |
31 | def average_learn(i, numAgents, w0, x, vu, u, vd, d, psi, w, mu_k, q, attackers, psi_a, Neigh):
32 | for k in range(numAgents):
33 | dist = w0.distance(x[k])
34 | unit = [(w0.x - x[k].x) / dist, (w0.y - x[k].y) / dist]
35 | u[:, k] = unit + vu[i, k]
36 | d[k] = np.dot([w0.x - x[k].x, w0.y - x[k].y], u[:, k].T) + vd[i, k]
37 | q[:, k] = [x[k].x, x[k].y] + d[k] * u[:, k]
38 |
39 | a = 0
40 | for k in range(numAgents):
41 | if k not in attackers:
42 | # target estimation
43 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
44 |
45 | else:
46 | psi[:, k] = psi_a[:, a]
47 | a += 1
48 |
49 | for k in range(numAgents):
50 | if k not in attackers:
51 | w[:, k] = np.mean(np.array([psi[:, j] for j in Neigh[k]]), axis=0)
52 | else:
53 | w[:, k] = psi[:, k]
54 |
55 | return w
56 |
57 | def loss_learn(i, numAgents, w0, x, vu, u, vd, d, psi, w, mu_k, q, attackers, psi_a, Accumulated_Loss, Neigh):
58 | for k in range(numAgents):
59 | dist = w0.distance(x[k])
60 | unit = [(w0.x - x[k].x) / dist, (w0.y - x[k].y) / dist]
61 | u[:, k] = unit + vu[i, k]
62 | d[k] = np.dot([w0.x - x[k].x, w0.y - x[k].y], u[:, k].T) + vd[i, k]
63 | q[:, k] = [x[k].x, x[k].y] + d[k] * u[:, k]
64 |
65 | a = 0
66 | gamma = 0.01
67 |
68 | for k in range(numAgents):
69 | if k not in attackers:
70 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
71 |
72 | else:
73 | psi[:, k] = psi_a[:, a]
74 | a += 1
75 |
76 |
77 | for k in range(numAgents):
78 | if k not in attackers:
79 | Weight = np.zeros((numAgents,))
80 | reversed_loss = np.zeros((numAgents,))
81 | loss = (d[k] - (np.dot([psi[:, k]], u[:, k].T)).item()) ** 2
82 | Accumulated_Loss[k, k] = (1 - gamma) * Accumulated_Loss[k, k] + gamma * loss
83 | for l in Neigh[k]:
84 | loss = (d[k] - (np.dot([psi[:, l]], u[:, k].T)).item())**2
85 | Accumulated_Loss[k, l] = (1 - gamma) * Accumulated_Loss[k, l] + gamma * loss
86 | if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
87 | reversed_loss[l] = (1./Accumulated_Loss[k, l])
88 | sum_reversedLoss = sum(reversed_loss)
89 | for l in Neigh[k]:
90 | if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
91 | weight = reversed_loss[l] / sum_reversedLoss
92 | Weight[l] = weight
93 | #print(Weight)
94 | w[:, k] = np.dot(psi, Weight)
95 | else:
96 | w[:, k] = psi[:, k]
97 |
98 | return w, Accumulated_Loss
99 |
100 |
101 | if __name__ == '__main__':
102 | random.seed(0)
103 | np.random.seed(0)
104 | # parameters
105 | iteration = 1000
106 | sensingRange = 1
107 | r = 1
108 | box = 12
109 | numAgents = 100
110 | mu_k = 0.01
111 | w0 = Point(10, 10)
112 | lower = 0
113 | upper = 3
114 |
115 | attackerNum = 5
116 | attackers = random.sample(list(range(numAgents)), attackerNum)
117 | normalAgents = [k for k in range(numAgents) if k not in attackers]
118 |
119 | x_no = random_point_set(numAgents, lower=lower, upper=upper)
120 | # for k in attackers:
121 | # w_no[k] = Point(np.random.random(), np.random.random())
122 |
123 | x_init = copy.deepcopy(x_no)
124 | x_avg = copy.deepcopy(x_no)
125 | x_loss = copy.deepcopy(x_no)
126 |
127 | Neigh = []
128 | for k in range(numAgents):
129 | neighbor = findNeighbors(x_init, k, numAgents, sensingRange, maxNeighborSize=10)
130 | Neigh.append(neighbor)
131 |
132 |
133 | plt.clf()
134 | # plt.grid(True, which='major')
135 | ax = plt.gca()
136 | ax.set_xlim(lower-0.1, upper+0.1)
137 | ax.set_ylim(lower-0.1, upper+0.1)
138 | # plt.xticks([0.3*i for i in range(-5,5, 1)])
139 | # plt.yticks([0.3*i for i in range(-5,5, 1)])
140 | plt.gca().set_aspect('equal', adjustable='box')
141 | # ax.grid(True)
142 | for tic in ax.xaxis.get_major_ticks():
143 | tic.tick1On = tic.tick2On = False
144 | tic.label1On = tic.label2On = False
145 | for tic in ax.yaxis.get_major_ticks():
146 | tic.tick1On = tic.tick2On = False
147 | tic.label1On = tic.label2On = False
148 | start, end = ax.get_xlim()
149 | ax.xaxis.set_ticks(np.arange(start, end + 0.01, 0.2))
150 | ax.yaxis.set_ticks(np.arange(start, end + 0.01, 0.2))
151 |
152 | # ax.set_xticks([0, 0.3, 0.4, 1.0, 1.5])
153 | ax.set_xticklabels([-1, "", "", "", "", 0, "", "", "", "", 1])
154 | ax.set_yticklabels([-1, "", "", "", "", 0, "", "", "", "", 1])
155 | for i in range(0, numAgents):
156 | for neighbor in Neigh[i]:
157 | plt.plot([x_init[i].x, x_init[neighbor].x], [x_init[i].y, x_init[neighbor].y], linewidth=0.2,
158 | color='gray')
159 |
160 | plot_point_set(x_init, color='b') # fault-free robots are plotted in blue
161 | plot_point_set([x_init[p] for p in attackers], color='r') # faulty robots are plotted in red
162 |
163 | plt.pause(0.1)
164 | # plt.show()
165 | # plt.savefig('./result/largeNetwork/%s%d.eps' % (method, t))
166 | # end = input('Press enter to end the program.')
167 |
168 |
169 |
170 | psi_a = 0 * np.ones((2, len(attackers)))
171 | phi_a = 0 * np.ones((2, len(attackers)))
172 |
173 | mu_vd = 0
174 | mu_vu = 0
175 | sigma_vd2 = 0.5 + 0.5 * np.random.random((numAgents, 1))
176 | #sigma_vd2[random.sample(range(numAgents), 20)] = 5
177 | sigma_vu2 = 0.01 + 0.04 * np.random.random((numAgents, 1))
178 | sigma_vu2[random.sample(range(numAgents), 5)] = 0.3
179 |
180 | # The following parameters work
181 | # sigma_vd2 = 1 + 0.4 * np.random.random((numAgents, 1))
182 | # #sigma_vd2[random.sample(range(numAgents), 3)] = 3
183 | # sigma_vu2 = 0.5 + 0.05 * np.random.random((numAgents, 1))
184 | # #sigma_vu2[random.sample(range(numAgents), 3)] = 3
185 | vd = np.zeros((iteration, numAgents))
186 | vu = np.zeros((iteration, numAgents))
187 | for k in range(numAgents):
188 | vd[:, k] = np.random.normal(mu_vd, sigma_vd2[k], iteration)
189 | vu[:, k] = np.random.normal(mu_vu, sigma_vu2[k], iteration)
190 |
191 | d = np.zeros((numAgents,))
192 | u = np.zeros((2, numAgents))
193 | q = np.zeros((2, numAgents))
194 | psi = np.zeros((2, numAgents))
195 |
196 | w_no = np.zeros((2, numAgents))
197 | for k in range(numAgents):
198 | w_no[0, k], w_no[1, k] = x_no[k].x, x_no[k].y
199 | w_avg = np.zeros((2, numAgents))
200 | for k in range(numAgents):
201 | w_avg[0, k], w_avg[1, k] = x_avg[k].x, x_avg[k].y
202 | w_loss = np.zeros((2, numAgents))
203 | for k in range(numAgents):
204 | w_loss[0, k], w_loss[1, k] = x_loss[k].x, x_loss[k].y
205 |
206 | vg_no = np.zeros((2, numAgents))
207 | vg_avg = np.zeros((2, numAgents))
208 | vg_loss = np.zeros((2, numAgents))
209 |
210 |
211 | phi = np.zeros((2, numAgents))
212 | v_no = np.zeros((2, numAgents))
213 | v_avg = np.zeros((2, numAgents))
214 | v_loss = np.zeros((2, numAgents))
215 |
216 | MSE_x_no = np.zeros((iteration,))
217 | MSE_x_avg = np.zeros((iteration,))
218 | MSE_x_loss = np.zeros((iteration,))
219 |
220 | W1_no = np.zeros((iteration, numAgents))
221 | W1_avg = np.zeros((iteration, numAgents))
222 | W1_loss = np.zeros((iteration, numAgents))
223 |
224 | Accumulated_Loss = np.zeros((numAgents, numAgents))
225 |
226 | fig = plt.figure(figsize=(15, 4))
227 | ax1 = plt.subplot(151)
228 | ax2 = plt.subplot(152)
229 | ax3 = plt.subplot(153)
230 |
231 |
232 | for i in range(iteration):
233 | error_no = 0
234 | for k in normalAgents:
235 | agent = Point(w_no[0, k], w_no[1, k])
236 | error_no += agent.distance(w0) ** 2
237 | W1_no[i, k] = w_no[0, k]
238 | error_no /= len(w_no)
239 | MSE_x_no[i] = error_no
240 |
241 | error_avg = 0
242 | for k in normalAgents:
243 | agent = Point(w_avg[0, k], w_avg[1, k])
244 | error_avg += agent.distance(w0) ** 2
245 | W1_avg[i, k] = w_avg[0, k]
246 | error_avg /= len(w_avg)
247 | MSE_x_avg[i] = error_avg
248 |
249 | error_loss = 0
250 | for k in normalAgents:
251 | agent = Point(w_loss[0, k], w_loss[1, k])
252 | error_loss += (agent.distance(w0)) ** 2
253 | W1_loss[i, k] = w_loss[0, k]
254 | error_loss /= len(w_loss)
255 | MSE_x_loss[i] = error_loss
256 |
257 | print('iteration %d' % i)
258 |
259 | # plt.clf()
260 |
261 | # ax1 = plt.subplot(151)
262 | # ax1.set_xlim(-1, box)
263 | # ax1.set_ylim(-1, box)
264 | # ax1.set_aspect('equal', adjustable='box')
265 | # for tic in ax1.xaxis.get_major_ticks():
266 | # tic.tick1On = tic.tick2On = False
267 | # tic.label1On = tic.label2On = False
268 | # for tic in ax1.yaxis.get_major_ticks():
269 | # tic.tick1On = tic.tick2On = False
270 | # tic.label1On = tic.label2On = False
271 | # plot_point(w0, marker='*', color='chartreuse', size=12, ax=ax1)
272 | # plot_point_set(x_no, color='b', ax=ax1, alpha=0.5)
273 | # if attackers:
274 | # for i in attackers:
275 | # plot_point(x_no[i], color='r', ax=ax1)
276 | # ax1.set_title('Noncooperative LMS')
277 | #
278 | # ax2 = plt.subplot(152)
279 | # ax2.set_xlim(-1, box)
280 | # ax2.set_ylim(-1, box)
281 | # ax2.set_aspect('equal', adjustable='box')
282 | # for tic in ax2.xaxis.get_major_ticks():
283 | # tic.tick1On = tic.tick2On = False
284 | # tic.label1On = tic.label2On = False
285 | # for tic in ax2.yaxis.get_major_ticks():
286 | # tic.tick1On = tic.tick2On = False
287 | # tic.label1On = tic.label2On = False
288 | # plot_point(w0, marker='*', color='chartreuse', size=12, ax=ax2)
289 | # plot_point_set(x_avg, color='b', ax=ax2, alpha=0.5)
290 | # if attackers:
291 | # for i in attackers:
292 | # plot_point(x_avg[i], color='r', ax=ax2)
293 | # ax2.set_title('Average')
294 | #
295 | # ax3 = plt.subplot(153)
296 | # ax3.set_xlim(-1, box)
297 | # ax3.set_ylim(-1, box)
298 | # ax3.set_aspect('equal', adjustable='box')
299 | # for tic in ax3.xaxis.get_major_ticks():
300 | # tic.tick1On = tic.tick2On = False
301 | # tic.label1On = tic.label2On = False
302 | # for tic in ax3.yaxis.get_major_ticks():
303 | # tic.tick1On = tic.tick2On = False
304 | # tic.label1On = tic.label2On = False
305 | # plot_point(w0, marker='*', color='chartreuse', size=12, ax=ax3)
306 | # plot_point_set(x_loss, color='b', ax=ax3, alpha=0.5)
307 | # if attackers:
308 | # for i in attackers:
309 | # plot_point(x_loss[i], color='r', ax=ax3)
310 | # ax3.set_title('Coordinate-wise median')
311 | #
312 | #
313 | #
314 | # plt.pause(0.001)
315 |
316 | # noncooperative
317 | w_no = noncooperative_learn(i, numAgents, w0, x_no, vu, u, vd, d, psi, w_no, mu_k, q, attackers, psi_a)
318 |
319 | # cooperative
320 | w_avg = average_learn(i, numAgents, w0, x_avg, vu, u, vd, d, psi, w_avg, mu_k, q, attackers, psi_a, Neigh)
321 |
322 | w_loss, Accumulated_Loss = loss_learn(i, numAgents, w0, x_loss, vu, u, vd, d, psi, w_loss, mu_k, q, attackers, psi_a, Accumulated_Loss, Neigh)
323 |
324 |
325 |
326 |
327 | fig1 = plt.figure(figsize=(3.9, 2.5))
328 | #fig1 = plt.figure(figsize=(3.9, 2))
329 | plt.plot(10 * np.log10(MSE_x_no[1:]), label=r'Non-coop')
330 | plt.plot(10 * np.log10(MSE_x_avg[1:]), label=r'Average')
331 | plt.plot(10 * np.log10(MSE_x_loss[1:]), label=r'loss-based')
332 | # plt.plot(MSE_x_no[1:], label=r'Non-coop')
333 | # plt.plot((MSE_x_avg[1:]), label=r'Average')
334 | # plt.plot((MSE_x_loss[1:]), label=r'loss-based')
335 |
336 |
337 | # plt.title('cooperative under attack using median')
338 | plt.xlabel(r'iteration $i$', fontsize=10)
339 | plt.ylabel(r'MSD (dB)', fontsize=10)
340 | #plt.xticks([0, 100, 200, 300, 400, 500])
341 | # plt.legend(fontsize=7, loc='lower left', bbox_to_anchor=(0.34, 0.43))
342 | # plt.yticks([-30,-15,0,15,30])
343 | # plt.legend(fontsize=7, loc='best')
344 | plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
345 | # plt.yticks([-75, -50, -25, 0, 25])
346 | # if attackerNum == 6:
347 | # plt.yticks([-40, -20, 0, 20, 40])
348 | # plt.ylim([-45, 45])
349 | # elif attackerNum == 0:
350 | # plt.yticks([-60, -40, -20, 0, 20])
351 | # plt.ylim([-70, 40])
352 | plt.tight_layout()
353 | plt.show()
354 | #fig1.savefig('fig/MSD_mobile_attack%d.eps' % attackerNum)
355 |
356 | plt.subplot(151)
357 | plt.plot(10 * np.log10(MSE_x_no))
358 | plt.title('Noncooperative LMS')
359 | plt.xlabel('iteration')
360 | plt.ylabel('MSE (dB)')
361 |
362 | plt.subplot(152)
363 | plt.plot(10 * np.log10(MSE_x_avg))
364 | plt.title('Average')
365 | plt.xlabel('iteration')
366 | plt.ylabel('MSE (dB)')
367 |
368 | plt.subplot(153)
369 | plt.plot(10 * np.log10(MSE_x_loss))
370 | plt.title('Coordinate-wise median')
371 | plt.xlabel('iteration')
372 | plt.ylabel('MSE (dB)')
373 |
374 | plt.show()
375 |
376 |
377 | fig2 = plt.figure(figsize=(11, 2.5))
378 | plt.subplot(151)
379 | for k in normalAgents:
380 | plt.plot(W1_no[1:, k])
381 | plt.xlabel('iteration $i$', fontsize=20)
382 | plt.ylabel(r'$w_{k,i}(1)$', fontsize=25)
383 | plt.xticks([0, 100, 200, 300, 400, 500])
384 |
385 | plt.subplot(152)
386 | for k in normalAgents:
387 | plt.plot(W1_avg[1:, k])
388 | plt.xlabel('iteration $i$', fontsize=20)
389 | plt.xticks([0, 100, 200, 300, 400, 500])
390 |
391 | plt.subplot(153)
392 | for k in normalAgents:
393 | plt.plot(W1_loss[1:, k])
394 | plt.xlabel('iteration $i$', fontsize=20)
395 | plt.xticks([0, 100, 200, 300, 400, 500])
396 |
397 | plt.show()
--------------------------------------------------------------------------------
/DigitClassification/main.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | import random
4 | import time
5 | from copy import deepcopy
6 |
7 | import numpy as np
8 | import torch
9 | import torchvision
10 | from torch.autograd import Variable
11 | from torch.utils.data import DataLoader
12 |
13 | from agent import agent
14 |
15 | # from torchsummary import summary
16 |
17 | torch.manual_seed(1)
18 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
19 | print(device)
20 |
21 |
22 | def readData_mnist():
23 | train_data = torchvision.datasets.MNIST(
24 | './mnist', train=True, transform=torchvision.transforms.ToTensor(), download=True
25 | )
26 | test_data = torchvision.datasets.MNIST(
27 | './mnist', train=False, transform=torchvision.transforms.ToTensor()
28 | )
29 | print("train_data:", train_data.train_data.size())
30 | print("train_labels:", train_data.train_labels.size())
31 | print("test_data:", test_data.test_data.size())
32 | return train_data, test_data
33 |
34 |
35 | def generateData_mnist(train_data, test_data, tr_split_len, te_split_len, number):
36 | # if number == 1:
37 | # train_x_no = train_data.train_data[number*tr_split_len : number*tr_split_len + tr_split_len//10].float()/255
38 | # train_x_no = train_x_no.repeat(10, 1, 1)
39 | # train_y_no = train_data.train_labels[number*tr_split_len : number*tr_split_len + tr_split_len//10]
40 | # train_y_no = train_y_no.repeat(10)
41 | # else:
42 | train_x_no = train_data.train_data[number * tr_split_len: (number + 1) * tr_split_len].float() / 255
43 | train_y_no = train_data.train_labels[number * tr_split_len: (number + 1) * tr_split_len]
44 | test_x_no = test_data.test_data[number * te_split_len: (number + 1) * te_split_len].float() / 255
45 | test_y_no = test_data.test_labels[number * te_split_len: (number + 1) * te_split_len]
46 |
47 | train_data_no = []
48 | if number == 1:
49 | # wrong label / uneven data
50 | for i in range(len(train_x_no)):
51 | train_data_no.append([train_x_no[i].unsqueeze(0), train_y_no[i]])
52 | else:
53 | for i in range(len(train_x_no)):
54 | train_data_no.append([train_x_no[i].unsqueeze(0), train_y_no[i]])
55 |
56 | # print(train_x_1[i].float()/255)
57 | test_data_no = []
58 | for i in range(len(test_x_no)):
59 | test_data_no.append([test_x_no[i].unsqueeze(0), test_y_no[i]])
60 |
61 | train_loader_no = DataLoader(dataset=train_data_no, batch_size=64, shuffle=True)
62 | test_loader_no = DataLoader(dataset=test_data_no, batch_size=64)
63 |
64 | return train_loader_no, test_loader_no
65 |
66 |
67 | def readData_synthetic_digits():
68 | transformtrain = torchvision.transforms.Compose([
69 | torchvision.transforms.Resize((28, 28)),
70 | torchvision.transforms.RandomHorizontalFlip(),
71 | torchvision.transforms.Grayscale(num_output_channels=1),
72 | torchvision.transforms.ToTensor(),
73 | torchvision.transforms.Normalize((0.5,), (0.5,))
74 | ])
75 |
76 | transformtest = torchvision.transforms.Compose([
77 | torchvision.transforms.Resize((28, 28)),
78 | torchvision.transforms.Grayscale(num_output_channels=1),
79 | torchvision.transforms.ToTensor(),
80 | torchvision.transforms.Normalize((0.5,), (0.5,))
81 | ])
82 |
83 | train_data = torchvision.datasets.ImageFolder('synthetic_digits/synthetic_digits/imgs_train',
84 | transform=transformtrain)
85 | test_data = torchvision.datasets.ImageFolder('synthetic_digits/synthetic_digits/imgs_valid',
86 | transform=transformtest)
87 |
88 | # np.random.shuffle(train_data)
89 | # shuffled in ImageFolder
90 |
91 | return train_data, test_data
92 |
93 |
94 | def generateData_synthetic_digits(remaining_tr, remaining_te, tr_split_len, te_split_len):
95 | part_tr, part_tr2 = torch.utils.data.random_split(remaining_tr, [tr_split_len, len(remaining_tr) - tr_split_len])
96 | part_te, part_te2 = torch.utils.data.random_split(remaining_te, [te_split_len, len(remaining_te) - te_split_len])
97 |
98 | train_loader_no = DataLoader(part_tr, batch_size=128, shuffle=True)
99 | test_loader_no = DataLoader(part_te, batch_size=128, shuffle=False)
100 |
101 | return train_loader_no, test_loader_no, part_tr2, part_te2
102 |
103 |
104 | class Net(torch.nn.Module):
105 | def __init__(self):
106 | super(Net, self).__init__()
107 | self.conv1 = torch.nn.Sequential(
108 | torch.nn.Conv2d(1, 32, 3, 1, 1),
109 | torch.nn.ReLU(),
110 | torch.nn.MaxPool2d(2))
111 | self.conv2 = torch.nn.Sequential(
112 | torch.nn.Conv2d(32, 64, 3, 1, 1),
113 | torch.nn.ReLU(),
114 | torch.nn.MaxPool2d(2)
115 | )
116 | self.conv3 = torch.nn.Sequential(
117 | torch.nn.Conv2d(64, 64, 3, 1, 1),
118 | torch.nn.ReLU(),
119 | torch.nn.MaxPool2d(2)
120 | )
121 | self.dense = torch.nn.Sequential(
122 | torch.nn.Linear(64 * 3 * 3, 128),
123 | torch.nn.ReLU(),
124 | torch.nn.Linear(128, 10)
125 | )
126 |
127 | def forward(self, x):
128 | conv1_out = self.conv1(x)
129 | conv2_out = self.conv2(conv1_out)
130 | conv3_out = self.conv3(conv2_out)
131 | res = conv3_out.view(conv3_out.size(0), -1)
132 | out = self.dense(res)
133 | return out
134 |
135 |
136 | def getWeight(ego_k, A, Para_ex, Para_last, batch_x, batch_y, Accumulated_Loss, rule):
137 | Weight = []
138 | gamma = 0.05
139 | N = len(A)
140 |
141 | if rule == "loss":
142 | # # loss based filtering 1/0
143 | # for l in range(0, N):
144 | # loss = A[k].getLoss(batch_x, batch_y, A[l].net)
145 | # Accumulated_Loss[ego_k, l] = (1 - gamma) * Accumulated_Loss[ego_k, l] + gamma * loss
146 | # for l in range(0, N):
147 | # weight = 1 if l == np.argmin(Accumulated_Loss[ego_k, :]) else 0
148 | # Weight.append(weight)
149 | # elif rule == "reversedLoss":
150 | # Reversed loss
151 | Weight = np.zeros((N,))
152 | reversed_Loss = np.zeros((N,))
153 | loss = A[ego_k].getLoss(batch_x, batch_y, A[ego_k].net)
154 | Accumulated_Loss[ego_k, ego_k] = (1 - gamma) * Accumulated_Loss[ego_k, ego_k] + gamma * loss
155 | for l in range(0, N):
156 | if not l == ego_k:
157 | loss = A[ego_k].getLoss(batch_x, batch_y, A[l].net)
158 | Accumulated_Loss[ego_k, l] = (1 - gamma) * Accumulated_Loss[ego_k, l] + gamma * loss
159 | if Accumulated_Loss[ego_k, l] <= Accumulated_Loss[ego_k, ego_k]:
160 | reversed_Loss[l] = 1. / Accumulated_Loss[ego_k, l]
161 | sum_reversedLoss = sum(reversed_Loss)
162 | for l in range(0, N):
163 | if Accumulated_Loss[ego_k, l] <= Accumulated_Loss[ego_k, ego_k]:
164 | weight = reversed_Loss[l] / sum_reversedLoss
165 | Weight[l] = weight
166 | elif rule == "distance":
167 | Weight = np.zeros((N,))
168 | reversed_Loss = np.zeros((N,))
169 | para_ex_k = Para_ex[ego_k]
170 | para_last_k = Para_last[ego_k]
171 | dist = np.linalg.norm(para_ex_k - para_last_k)
172 | Accumulated_Loss[ego_k, ego_k] = (1 - gamma) * Accumulated_Loss[ego_k, ego_k] + gamma * dist ** 2
173 | for l in range(0, N):
174 | para_ex_l = Para_ex[l]
175 | para_last_k = Para_last[ego_k]
176 | # print(np.linalg.norm(para_ex_l - para_last_k))
177 | dist = np.linalg.norm(para_ex_l - para_last_k)
178 | Accumulated_Loss[ego_k, l] = (1 - gamma) * Accumulated_Loss[ego_k, l] + gamma * dist ** 2
179 | # if Accumulated_Loss[ego_k,l] <= Accumulated_Loss[ego_k, ego_k]:
180 | reversed_Loss[l] = 1. / Accumulated_Loss[ego_k, l]
181 | sum_reversedLoss = sum(reversed_Loss)
182 | for l in range(0, N):
183 | # if Accumulated_Loss[ego_k,l] <= Accumulated_Loss[ego_k, ego_k]:
184 | weight = reversed_Loss[l] / sum_reversedLoss
185 | Weight[l] = weight
186 | elif rule == "average":
187 | # average based weight
188 | for l in range(0, N):
189 | if not l == ego_k:
190 | weight = 1 / N
191 | else:
192 | weight = 1 - (N - 1) / N
193 | Weight.append(weight)
194 | elif rule == "no-cooperation":
195 | for l in range(0, N):
196 | if l == ego_k:
197 | weight = 1
198 | else:
199 | weight = 0
200 | Weight.append(weight)
201 | else:
202 | return Weight, Accumulated_Loss
203 |
204 | return Weight, Accumulated_Loss
205 |
206 |
207 | def cooperation(A, A_last, Batch_X, Batch_Y, Accumulated_Loss, rule, attacker):
208 | Parameters_last = []
209 | Parameters_exchange = []
210 | N = len(A)
211 |
212 | for k in range(0, N):
213 | Parameters_last.append({})
214 | Parameters_exchange.append({})
215 | a_last = A_last[k]
216 | a = A[k]
217 | for name, param in a.net.named_parameters():
218 | if param.requires_grad:
219 | if k in attacker:
220 | # a.net.named_parameters()[name] = param.data * random.random() * 0.1
221 | Parameters_exchange[k][name] = param.data * random.random() * 0.1
222 | else:
223 | Parameters_exchange[k][name] = param.data
224 | for name, param in a_last.net.named_parameters():
225 | if param.requires_grad:
226 | if k in attacker:
227 | # a_last.net.named_parameters()[name] = param.data * random.random() * 0.1
228 | Parameters_last[k][name] = param.data * random.random() * 0.1
229 | else:
230 | Parameters_last[k][name] = param.data
231 |
232 | Para_ex = []
233 | Para_last = []
234 | for k in range(0, N):
235 | para_ex_k = np.hstack([v.flatten().tolist() for v in Parameters_exchange[k].values()])
236 | para_last_k = np.hstack([v.flatten().tolist() for v in Parameters_last[k].values()])
237 | Para_ex.append(para_ex_k)
238 | Para_last.append(para_last_k)
239 |
240 | Parameters = deepcopy(Parameters_exchange)
241 | for k in range(0, N):
242 | a = A[k]
243 | if k not in attacker:
244 | batch_x, batch_y = Batch_X[k], Batch_Y[k]
245 | Weight, Accumulated_Loss = getWeight(k, A, Para_ex, Para_last, batch_x, batch_y, Accumulated_Loss, rule)
246 | # print(Accumulated_Loss)
247 | # print(Weight)
248 |
249 | for name, param in a.net.named_parameters():
250 | Parameters[k][name] = 0. * Parameters[k][name]
251 | for l in range(0, N):
252 | if param.requires_grad:
253 | Parameters[k][name] += Parameters_exchange[l][name] * Weight[l]
254 |
255 | for k in range(0, N):
256 | a = A[k]
257 | for name, param in a.net.named_parameters():
258 | param.data = Parameters[k][name]
259 |
260 | return A, Accumulated_Loss
261 |
262 |
263 | def run(rule, attacker, epochs):
264 | torch.manual_seed(1)
265 |
266 | start_time = time.time()
267 |
268 | N = 10
269 | N1 = 5
270 | tr_split_len1 = 2000
271 | te_split_len1 = 400
272 | tr_split_len2 = 2000
273 | te_split_len2 = 400
274 | A = []
275 | train_data1, test_data1 = readData_mnist()
276 | train_data2, test_data2 = readData_synthetic_digits()
277 | remaining_tr, remaining_te = train_data2, test_data2
278 |
279 | Parameters = []
280 |
281 | # attacker_num = 2
282 | # attacker = [2, 7]
283 |
284 | attacker_num = len(attacker)
285 | # Accumulated_Loss = np.zeros((N, N))
286 | Accumulated_Loss = np.ones((N, N))
287 |
288 | average_train_loss, average_train_acc = [], []
289 | average_test_loss, average_test_acc = [], []
290 |
291 | individual_average_train_loss, individual_average_train_acc = np.zeros((epochs, N)), np.zeros((epochs, N))
292 | individual_average_test_loss, individual_average_test_acc = np.zeros((epochs, N)), np.zeros((epochs, N))
293 |
294 | for k in range(0, N):
295 | net = Net().to(device)
296 | # print(net)
297 | # summary(net, (1,28,28), batch_size=-1)
298 | a = agent(net)
299 | A.append(a)
300 | Parameters.append({})
301 |
302 | for name, param in a.net.named_parameters():
303 | if param.requires_grad:
304 | Parameters[k][name] = param.data
305 |
306 | for epoch in range(epochs):
307 | print('epoch {}'.format(epoch + 1))
308 | Train_loader_iter = []
309 | Test_loader = []
310 | total_train_loss = 0.
311 | total_train_acc = 0.
312 | total_eval_loss = 0.
313 | total_eval_acc = 0.
314 | remaining_tr, remaining_te = train_data2, test_data2
315 |
316 | Count = np.zeros((N,))
317 |
318 | ave_train_loss = 0.
319 | ave_train_acc = 0.
320 | ave_eval_loss = 0.
321 | ave_eval_acc = 0.
322 | nanCount = 0
323 |
324 | for k in range(0, N):
325 | a = A[k]
326 | a.train_loss = 0.
327 | a.train_acc = 0.
328 |
329 | if k < N1:
330 | train_loader_no, test_loader_no = generateData_mnist(train_data1, test_data1, tr_split_len1,
331 | te_split_len1, k)
332 | else:
333 | train_loader_no, test_loader_no, remaining_tr, remaining_te = generateData_synthetic_digits(
334 | remaining_tr,
335 | remaining_te,
336 | tr_split_len2,
337 | te_split_len2)
338 |
339 | Train_loader_iter.append(iter(train_loader_no))
340 | Test_loader.append(test_loader_no)
341 |
342 | # for iteration in range(0, tr_split_len//64):
343 | # for k in range(0, N):
344 | # training-----------------------------
345 | try:
346 | while True:
347 | A_last = deepcopy(A)
348 | Batch_X, Batch_Y = {}, {}
349 | for k in range(0, N):
350 | batch_x, batch_y = next(Train_loader_iter[k])
351 | Batch_X[k] = batch_x.to(device)
352 | Batch_Y[k] = batch_y.to(device)
353 | if k in attacker:
354 | continue
355 | # 5 agents, get access to 1, 1/2, 1/3, 1/5, 1/10 data, so their models have different accuracy
356 | if k % 5 == 0:
357 | if random.randint(0, 1) in [0]:
358 | continue
359 | if k % 5 == 1:
360 | if random.randint(0, 2) in [0, 1]:
361 | continue
362 | if k % 5 in [2, 3]:
363 | if random.randint(0, 3) in [0, 1, 2]:
364 | continue
365 | # if k % 5 == 3:
366 | # if random.randint(0, 9) in [0,1,2,3,4,5,6,7,8]:
367 | # continue
368 | a = A[k]
369 | loss, acc = a.optimize(batch_x.to(device), batch_y.to(device))
370 | total_train_loss += loss
371 | total_train_acc += acc
372 | Count[k] += len(batch_x)
373 |
374 | A, Accumulated_Loss = cooperation(A, A_last, Batch_X, Batch_Y, Accumulated_Loss, rule, attacker)
375 | # print(Accumulated_Loss)
376 |
377 | except StopIteration:
378 | # print(iteration)
379 | Eval_count = np.zeros((N,))
380 | for k in range(0, N):
381 | if k in attacker:
382 | continue
383 | print('Agent: {:d}, Train Loss: {:.6f}, Acc: {:.6f}'.format(k, A[k].train_loss / Count[k],
384 | A[k].train_acc / Count[k]))
385 | individual_average_train_loss[epoch, k] = A[k].train_loss / Count[k]
386 | individual_average_train_acc[epoch, k] = A[k].train_acc / Count[k]
387 |
388 | if not (math.isnan(A[k].train_loss / Count[k]) or math.isnan(A[k].train_acc / Count[k])):
389 | ave_train_loss += A[k].train_loss / Count[k]
390 | ave_train_acc += A[k].train_acc / Count[k]
391 | else:
392 | nanCount += 1
393 |
394 | # evaluation--------------------------------
395 | A[k].net.eval()
396 | eval_loss = 0.
397 | eval_acc = 0.
398 | for batch_x, batch_y in Test_loader[k]:
399 | batch_x, batch_y = Variable(batch_x, volatile=True).to(device), Variable(batch_y, volatile=True).to(
400 | device)
401 | out = A[k].net(batch_x)
402 | loss_func = torch.nn.CrossEntropyLoss()
403 | loss = loss_func(out, batch_y)
404 | eval_loss += loss.item()
405 | total_eval_loss += loss.item()
406 | pred = torch.max(out, 1)[1]
407 | num_correct = (pred == batch_y).sum()
408 | eval_acc += num_correct.item()
409 | total_eval_acc += num_correct.item()
410 | Eval_count[k] += len(batch_x)
411 |
412 | if not (math.isnan(eval_loss / Eval_count[k]) or math.isnan(eval_acc / Eval_count[k])):
413 | ave_eval_loss += eval_loss / Eval_count[k]
414 | ave_eval_acc += eval_acc / Eval_count[k]
415 | print('Agent: {:d}, Test Loss: {:.6f}, Acc: {:.6f}'.format(k, eval_loss / Eval_count[k],
416 | eval_acc / Eval_count[k]))
417 | individual_average_test_loss[epoch, k] = eval_loss / Eval_count[k]
418 | individual_average_test_acc[epoch, k] = eval_acc / Eval_count[k]
419 |
420 | # print('Total Average Train Loss: {:.6f}, Train Acc: {:.6f}'.format(total_train_loss / sum(Count),
421 | # total_train_acc / sum(Count)))
422 | # average_train_loss.append(total_train_loss / sum(Count))
423 | # average_train_acc.append(total_train_acc / sum(Count))
424 | # print('Total Average Test Loss: {:.6f}, Test Acc: {:.6f}'.format(total_eval_loss / sum(Eval_count),
425 | # total_eval_acc / sum(Eval_count)))
426 | #
427 | # print('Training time by far: {:.2f}s'.format(time.time() - start_time))
428 | # average_test_loss.append(total_eval_loss / sum(Eval_count))
429 | # average_test_acc.append(total_eval_acc / sum(Eval_count))
430 |
431 | print(
432 | 'Total Average Train Loss: {:.6f}, Train Acc: {:.6f}'.format(ave_train_loss / (N - nanCount - attacker_num),
433 | ave_train_acc / (N - nanCount - attacker_num)))
434 | average_train_loss.append(ave_train_loss / (N - nanCount - attacker_num))
435 | average_train_acc.append(ave_train_acc / (N - nanCount - attacker_num))
436 | print('Total Average Test Loss: {:.6f}, Test Acc: {:.6f}'.format(ave_eval_loss / (N - attacker_num),
437 | ave_eval_acc / (N - attacker_num)))
438 |
439 | print('Training time by far: {:.2f}s'.format(time.time() - start_time))
440 | average_test_loss.append(ave_eval_loss / (N - attacker_num))
441 | average_test_acc.append(ave_eval_acc / (N - attacker_num))
442 |
443 | if epoch % 10 == 0 or epoch == epochs - 1:
444 | if attacker_num == 0:
445 | try:
446 | os.makedirs("results")
447 | except OSError:
448 | print("Creation of the directory %s failed")
449 | np.save('results/average_train_loss_%s.npy' % rule, average_train_loss)
450 | np.save('results/average_train_acc_%s.npy' % rule, average_train_acc)
451 | np.save('results/average_test_loss_%s.npy' % rule, average_test_loss)
452 | np.save('results/average_test_acc_%s.npy' % rule, average_test_acc)
453 | np.save('results/individual_average_train_loss_%s.npy' % rule, individual_average_train_loss)
454 | np.save('results/individual_average_train_acc_%s.npy' % rule, individual_average_train_acc)
455 | np.save('results/individual_average_test_loss_%s.npy' % rule, individual_average_test_loss)
456 | np.save('results/individual_average_test_acc_%s.npy' % rule, individual_average_test_acc)
457 | else:
458 | try:
459 | os.makedirs("results/attacked/%d" % attacker_num)
460 | except OSError:
461 | print("Creation of the directory %s failed")
462 | np.save('results/attacked/%d/average_train_loss_%s.npy' % (attacker_num, rule), average_train_loss)
463 | np.save('results/attacked/%d/average_train_acc_%s.npy' % (attacker_num, rule), average_train_acc)
464 | np.save('results/attacked/%d/average_test_loss_%s.npy' % (attacker_num, rule), average_test_loss)
465 | np.save('results/attacked/%d/average_test_acc_%s.npy' % (attacker_num, rule), average_test_acc)
466 | np.save('results/attacked/%d/individual_average_train_loss_%s.npy' % (attacker_num, rule),
467 | individual_average_train_loss)
468 | np.save('results/attacked/%d/individual_average_train_acc_%s.npy' % (attacker_num, rule),
469 | individual_average_train_acc)
470 | np.save('results/attacked/%d/individual_average_test_loss_%s.npy' % (attacker_num, rule),
471 | individual_average_test_loss)
472 | np.save('results/attacked/%d/individual_average_test_acc_%s.npy' % (attacker_num, rule),
473 | individual_average_test_acc)
474 |
475 |
476 | if __name__ == '__main__':
477 | epochs = 100
478 | # for rule in ["no-cooperation", "loss", "distance", " average"]:
479 | for rule in ["loss", "distance"]:
480 | # for attacker in [[0,1,2,3,5,6,7,8],[], [2, 7]]:
481 | for attacker in [[], [2, 7]]:
482 | run(rule, attacker, epochs)
483 |
--------------------------------------------------------------------------------
/HumanActivityRecog/main.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | import random
4 | import time
5 | from copy import deepcopy
6 |
7 | import numpy as np
8 | import pandas as pd
9 | import torch
10 | import torch.nn.functional as F
11 | from torch.autograd import Variable
12 | from torch.utils.data import DataLoader
13 |
14 | from agent import agent
15 |
16 | torch.manual_seed(1)
17 |
18 |
19 | def get_device():
20 | if torch.cuda.is_available():
21 | device = torch.device('cuda:0')
22 | else:
23 | device = torch.device('cpu') # don't have GPU
24 | return device
25 |
26 |
27 | def value_to_tensor(values):
28 | device = get_device()
29 | return torch.from_numpy(values).float().to(device)
30 |
31 |
32 | def readData():
33 | features = pd.read_csv('./UCI HAR Dataset/features.txt', sep='\s+', index_col=0, header=None)
34 | train_data = pd.read_csv('./UCI HAR Dataset/train/X_train.txt', sep='\s+',
35 | names=list(features.values.ravel()))
36 | test_data = pd.read_csv('./UCI HAR Dataset/test/X_test.txt', sep='\s+',
37 | names=list(features.values.ravel()))
38 |
39 | train_label = pd.read_csv('./UCI HAR Dataset/train/y_train.txt', sep='\s+', header=None)
40 | test_label = pd.read_csv('./UCI HAR Dataset/test/y_test.txt', sep='\s+', header=None)
41 |
42 | train_subject = pd.read_csv('./UCI HAR Dataset/train/subject_train.txt', sep='\s+', header=None)
43 | test_subject = pd.read_csv('./UCI HAR Dataset/test/subject_test.txt', sep='\s+', header=None)
44 |
45 | label_name = pd.read_csv('UCI HAR Dataset/activity_labels.txt', sep='\s+', header=None, index_col=0)
46 |
47 | train_data['label'] = train_label
48 | test_data['label'] = test_label
49 |
50 | train_data['subject'] = train_subject
51 | test_data['subject'] = test_subject
52 |
53 | def get_label_name(num):
54 | return label_name.iloc[num - 1, 0]
55 |
56 | train_data['label_name'] = train_data['label'].map(get_label_name)
57 | test_data['label_name'] = test_data['label'].map(get_label_name)
58 |
59 | # 原来标签为1-6,而算法需要0-5
60 | train_data['label'] = train_data['label'] - 1
61 | test_data['label'] = test_data['label'] - 1
62 |
63 | np.random.shuffle(train_data.values)
64 | np.random.shuffle(test_data.values)
65 |
66 | return train_data, test_data
67 |
68 |
69 | def generateData(train_data, test_data, subject, batch_size):
70 | x_train = [d[:-3] for d in train_data.values if d[-2] == subject]
71 | y_train = [d[-3] for d in train_data.values if d[-2] == subject]
72 | x_test = [d[:-3] for d in test_data.values if d[-2] == subject]
73 | y_test = [d[-3] for d in test_data.values if d[-2] == subject]
74 |
75 | all_x_data = x_train + x_test
76 | all_y_data = y_train + y_test
77 |
78 | x_tensor = torch.FloatTensor(all_x_data)
79 | y_tensor = torch.LongTensor(all_y_data)
80 |
81 | all_data = []
82 | for i in range(len(x_tensor)):
83 | all_data.append([x_tensor[i], y_tensor[i]])
84 |
85 | np.random.shuffle(all_data)
86 |
87 | train_data_subject, val_data_subject, test_data_subject = all_data[:len(all_data) // 4 * 3], \
88 | all_data[
89 | len(all_data) // 4 * 3: len(all_data) // 8 * 7], all_data[
90 | len(
91 | all_data) // 4 * 3:]
92 |
93 | # x_train_tensor, y_train_tensor, x_test_tensor, y_test_tensor = x_tensor[:len(x_tensor) // 4 * 3], y_tensor[:len(x_tensor) // 4 * 3], \
94 | # x_tensor[len(x_tensor) // 4 * 3:], y_tensor[len(x_tensor) // 4 * 3:]
95 | # x_val_tensor, y_val_tensor, x_test_tensor, y_test_tensor = x_test_tensor[:len(x_test_tensor) // 2], y_test_tensor[:len(x_test_tensor) // 2], \
96 | # x_test_tensor[len(x_test_tensor) // 2:], y_test_tensor[len(x_test_tensor) // 2:]
97 |
98 | # train_data_subject = []
99 | # for i in range(len(x_train_tensor)):
100 | # train_data_subject.append([x_train_tensor[i], y_train_tensor[i]])
101 | #
102 | # val_data_subject = []
103 | # for i in range(len(x_val_tensor)):
104 | # val_data_subject.append([x_val_tensor[i], y_val_tensor[i]])
105 | #
106 | # test_data_subject = []
107 | # for i in range(len(x_test_tensor)):
108 | # test_data_subject.append([x_test_tensor[i], y_test_tensor[i]])
109 |
110 | # un-even data
111 | # if subject % 5 == 0:
112 | # train_data_subject, val_data_subject, test_data_subject = train_data_subject[:len(train_data_subject)//10], \
113 | # val_data_subject[:len(val_data_subject)//10], test_data_subject[:len(test_data_subject)//10]
114 |
115 | train_loader_subject = DataLoader(dataset=train_data_subject, batch_size=batch_size, shuffle=True)
116 | test_loader_subject = DataLoader(dataset=test_data_subject, batch_size=batch_size, shuffle=True)
117 | val_loader_subject = DataLoader(dataset=val_data_subject, batch_size=batch_size, shuffle=True)
118 |
119 | return train_loader_subject, val_loader_subject, test_loader_subject
120 |
121 |
122 | class Net(torch.nn.Module):
123 | def __init__(self, n_feature, n_hidden1, n_output):
124 | super(Net, self).__init__()
125 | self.hidden1 = torch.nn.Linear(n_feature, n_hidden1) # hidden layer
126 | self.out = torch.nn.Linear(n_hidden1, n_output) # output layer
127 |
128 | def forward(self, x):
129 | x = F.relu(self.hidden1(x)) # activation function for hidden layer
130 | x = self.out(x)
131 | return x
132 |
133 |
134 | class linearRegression(torch.nn.Module):
135 | def __init__(self, inputSize, outputSize):
136 | super(linearRegression, self).__init__()
137 | self.linear = torch.nn.Linear(inputSize, outputSize)
138 |
139 | def forward(self, x):
140 | out = self.linear(x)
141 | return out
142 |
143 |
144 | def getWeight(ego_k, A, Para_ex, Para_last, batch_x, batch_y, Accumulated_Loss, rule):
145 | Weight = []
146 | # gamma = 1 #gamma=1 represent using loss only
147 | gamma = 0.001
148 |
149 | if rule == "loss":
150 | # # loss based filtering 1/0
151 | # for l in range(0, N):
152 | # loss = A[k].getLoss(batch_x, batch_y, A[l].net)
153 | # Accumulated_Loss[ego_k, l] = (1 - gamma) * Accumulated_Loss[ego_k, l] + gamma * loss
154 | # for l in range(0, N):
155 | # weight = 1 if l == np.argmin(Accumulated_Loss[ego_k, :]) else 0
156 | # Weight.append(weight)
157 | # elif rule == "reversedLoss":
158 | # Reversed loss
159 | Weight = np.zeros((N,))
160 | reversed_Loss = np.zeros((N,))
161 | loss = A[ego_k].getLoss(batch_x, batch_y, A[ego_k].net)
162 | Accumulated_Loss[ego_k, ego_k] = (1 - gamma) * Accumulated_Loss[ego_k, ego_k] + gamma * loss
163 | for l in range(0, N):
164 | if not l == ego_k:
165 | loss = A[ego_k].getLoss(batch_x, batch_y, A[l].net)
166 | Accumulated_Loss[ego_k, l] = (1 - gamma) * Accumulated_Loss[ego_k, l] + gamma * loss
167 | if Accumulated_Loss[ego_k, l] <= Accumulated_Loss[ego_k, ego_k]:
168 | reversed_Loss[l] = 1. / Accumulated_Loss[ego_k, l]
169 | sum_reversedLoss = sum(reversed_Loss)
170 | for l in range(0, N):
171 | if Accumulated_Loss[ego_k, l] <= Accumulated_Loss[ego_k, ego_k]:
172 | weight = reversed_Loss[l] / sum_reversedLoss
173 | Weight[l] = weight
174 | elif rule == "distance":
175 | Weight = np.zeros((N,))
176 | reversed_Loss = np.zeros((N,))
177 | para_ex_k = Para_ex[ego_k]
178 | para_last_k = Para_last[ego_k]
179 | dist = np.linalg.norm(para_ex_k - para_last_k)
180 | Accumulated_Loss[ego_k, ego_k] = (1 - gamma) * Accumulated_Loss[ego_k, ego_k] + gamma * dist ** 2
181 | for l in range(0, N):
182 | if not l == ego_k:
183 | para_ex_l = Para_ex[l]
184 | para_last_k = Para_last[ego_k]
185 | # print(np.linalg.norm(para_ex_l - para_last_k))
186 | dist = np.linalg.norm(para_ex_l - para_last_k)
187 | Accumulated_Loss[ego_k, l] = (1 - gamma) * Accumulated_Loss[ego_k, l] + gamma * dist ** 2
188 | # if Accumulated_Loss[ego_k,l] <= Accumulated_Loss[ego_k, ego_k]:
189 | reversed_Loss[l] = 1. / Accumulated_Loss[ego_k, l]
190 | sum_reversedLoss = sum(reversed_Loss)
191 | for l in range(0, N):
192 | # if Accumulated_Loss[ego_k,l] <= Accumulated_Loss[ego_k, ego_k]:
193 | weight = reversed_Loss[l] / sum_reversedLoss
194 | Weight[l] = weight
195 | elif rule == "average":
196 | # average based weight
197 | for l in range(0, N):
198 | if not l == ego_k:
199 | weight = 1 / N
200 | else:
201 | weight = 1 - (N - 1) / N
202 | Weight.append(weight)
203 | elif rule == "no-cooperation":
204 | for l in range(0, N):
205 | if l == ego_k:
206 | weight = 1
207 | else:
208 | weight = 0
209 | Weight.append(weight)
210 | else:
211 | return Weight, Accumulated_Loss
212 |
213 | return Weight, Accumulated_Loss
214 |
215 |
216 | def cooperation(A, A_last, Batch_X, Batch_Y, Accumulated_Loss, rule, attacker):
217 | Parameters_last = []
218 | Parameters_exchange = []
219 |
220 | for k in range(0, N):
221 | Parameters_last.append({})
222 | Parameters_exchange.append({})
223 | a_last = A_last[k]
224 | a = A[k]
225 | for name, param in a.net.named_parameters():
226 | if param.requires_grad:
227 | if k in attacker:
228 | # a.net.named_parameters()[name] = param.data * random.random() * 0.1
229 | # original attack
230 | # Parameters_exchange[k][name] = param.data * random.random() * 0.1
231 | # attack with small perturbation (by reviewer 4)
232 | Parameters_exchange[k][name] = param.data + random.random() * 0 # 1e-6
233 |
234 | else:
235 | Parameters_exchange[k][name] = param.data
236 | for name, param in a_last.net.named_parameters():
237 | if param.requires_grad:
238 | if k in attacker:
239 | # a_last.net.named_parameters()[name] = param.data * random.random() * 0.1
240 | # original attack
241 | # Parameters_last[k][name] = param.data * random.random() * 0.1
242 | # attack with small perturbation (by reviewer 4)
243 | Parameters_last[k][name] = param.data + random.random() * 0 # 1e-6
244 | else:
245 | Parameters_last[k][name] = param.data
246 |
247 | Para_ex = []
248 | Para_last = []
249 | for k in range(0, N):
250 | para_ex_k = np.hstack(np.array([np.hstack(v.tolist()) for v in Parameters_exchange[k].values()]))
251 | para_last_k = np.hstack(np.array([np.hstack(v.tolist()) for v in Parameters_last[k].values()]))
252 | Para_ex.append(para_ex_k)
253 | Para_last.append(para_last_k)
254 |
255 | Parameters = deepcopy(Parameters_exchange)
256 | for k in range(0, N):
257 | a = A[k]
258 | if k not in attacker:
259 | batch_x, batch_y = Batch_X[k], Batch_Y[k]
260 | Weight, Accumulated_Loss = getWeight(k, A, Para_ex, Para_last, batch_x, batch_y, Accumulated_Loss, rule)
261 | # print(Accumulated_Loss)
262 | # print(Weight)
263 |
264 | for name, param in a.net.named_parameters():
265 | Parameters[k][name] = 0. * Parameters[k][name]
266 | for l in range(0, N):
267 | if param.requires_grad:
268 | Parameters[k][name] += Parameters_exchange[l][name] * Weight[l]
269 |
270 | for k in range(0, N):
271 | a = A[k]
272 | for name, param in a.net.named_parameters():
273 | param.data = Parameters[k][name]
274 |
275 | return A, Accumulated_Loss
276 |
277 |
278 | def gaussian(x, mean, stddev):
279 | noise = Variable(x.new(x.size()).normal_(mean, stddev))
280 | return x + noise
281 |
282 |
283 | def run(rule, attacker, epochs):
284 | torch.manual_seed(0)
285 |
286 | start_time = time.time()
287 |
288 | N = 30
289 | A = []
290 | batch_size = 10
291 | train_data, test_data = readData()
292 |
293 | Parameters = []
294 |
295 | attacker_num = len(attacker)
296 |
297 | # Accumulated_Loss = np.zeros((N, N))
298 | Accumulated_Loss = np.ones((N, N))
299 | middle1_neurons = 50
300 |
301 | Train_loader, Test_loader = [], []
302 | Val_loader_iter = []
303 | Val_loader = []
304 |
305 | average_train_loss, average_train_acc = [], []
306 | average_test_loss, average_test_acc = [], []
307 |
308 | individual_average_train_loss, individual_average_train_acc = np.zeros((epochs, N)), np.zeros((epochs, N))
309 | individual_average_test_loss, individual_average_test_acc = np.zeros((epochs, N)), np.zeros((epochs, N))
310 |
311 | for k in range(0, N):
312 | # net = Net(n_feature=561, n_hidden1=middle1_neurons, n_output=6)
313 | net = linearRegression(561, 6)
314 | a = agent(net)
315 | A.append(a)
316 |
317 | train_loader_no, val_loader_no, test_loader_no = generateData(train_data, test_data, k + 1, batch_size)
318 | Train_loader.append(train_loader_no)
319 | Test_loader.append(test_loader_no)
320 | Val_loader.append(val_loader_no)
321 | Val_loader_iter.append(iter(val_loader_no))
322 |
323 | for epoch in range(epochs):
324 | print('epoch {}'.format(epoch + 1))
325 | Train_loader_iter = []
326 | total_train_loss = 0.
327 | total_train_acc = 0.
328 | total_eval_loss = 0.
329 | total_eval_acc = 0.
330 |
331 | Count = np.zeros((N,))
332 |
333 | ave_train_loss = 0.
334 | ave_train_acc = 0.
335 | ave_eval_loss = 0.
336 | ave_eval_acc = 0.
337 | nanCount = 0
338 |
339 | for k in range(0, N):
340 | a = A[k]
341 | a.train_loss = 0.
342 | a.train_acc = 0.
343 | Train_loader_iter.append(iter(Train_loader[k]))
344 |
345 | try:
346 | while True:
347 | A_last = deepcopy(A)
348 | Batch_X, Batch_Y = {}, {}
349 | for k in range(0, N):
350 | # if k in attacker:
351 | # continue
352 | batch_x, batch_y = Train_loader_iter[k].next()
353 | Batch_X[k] = batch_x
354 | Batch_Y[k] = batch_y
355 | # only process 1/10 data for 1/3 of agents
356 | if k % 3 == 0:
357 | if random.randint(0, 10) in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
358 | continue
359 |
360 | # if k % 3 == 0:
361 | # train_loader = Train_loader_iter[k].next()
362 | # batch_x, batch_y = (train_loader[0]).narrow(0,0,1), (train_loader[1]).narrow(0,0,1)
363 | # else:
364 | # batch_x, batch_y = Train_loader_iter[k].next()
365 | #
366 | # Batch_X.append(batch_x)
367 | # Batch_Y.append(batch_y)
368 |
369 | # if k % 3 == 0:
370 | # if random.randint(0, 5) == 1:
371 | # pass
372 | # batch_x = gaussian(batch_x, 5, 5)
373 | # batch_y = torch.LongTensor(np.random.randint(6, size=batch_size))
374 | # if random.randint(0, 2) == 1:
375 | # batch_y = torch.LongTensor(np.random.randint(6, size=batch_size))
376 | # if (k+1) % 5 == 0:
377 | # try:
378 | # batch_x, batch_y = Train_loader_iter[k].next()
379 | # except:
380 | # Train_loader_iter[k] = iter(Train_loader[k])
381 | # batch_x, batch_y = Train_loader_iter[k].next()
382 | # else:
383 | # batch_x, batch_y = Train_loader_iter[k].next()
384 | a = A[k]
385 | loss, acc = a.optimize(batch_x, batch_y)
386 | if math.isnan(loss) or math.isnan(acc):
387 | continue
388 | total_train_acc += acc
389 | # try:
390 | # val_x, val_y = Val_loader_iter[k].next()
391 | # except:
392 | # Val_loader_iter[k] = iter(Val_loader[k])
393 | # val_x, val_y = Val_loader_iter[k].next()
394 | # Batch_X.append(val_x)
395 | # Batch_Y.append(val_y)
396 | Count[k] += len(batch_x)
397 | A, Accumulated_Loss = cooperation(A, A_last, Batch_X, Batch_Y, Accumulated_Loss, rule, attacker)
398 | # print(Accumulated_Loss)
399 |
400 |
401 | except StopIteration:
402 | # print(iteration)
403 | Eval_count = np.zeros((N,))
404 | for k in range(0, N):
405 | if k in attacker:
406 | continue
407 | print('Agent: {:d}, Train Loss: {:.6f}, Acc: {:.6f}'.format(k, A[k].train_loss / Count[k],
408 | A[k].train_acc / Count[k]))
409 | individual_average_train_loss[epoch, k] = A[k].train_loss / Count[k]
410 | individual_average_train_acc[epoch, k] = A[k].train_acc / Count[k]
411 |
412 | if not (math.isnan(A[k].train_loss / Count[k]) or math.isnan(A[k].train_acc / Count[k])):
413 | ave_train_loss += A[k].train_loss / Count[k]
414 | ave_train_acc += A[k].train_acc / Count[k]
415 | else:
416 | nanCount += 1
417 |
418 | # evaluation--------------------------------
419 | A[k].net.eval()
420 | eval_loss = 0.
421 | eval_acc = 0.
422 | for batch_x, batch_y in Test_loader[k]:
423 | batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True)
424 | out = A[k].net(batch_x)
425 | loss_func = torch.nn.CrossEntropyLoss()
426 | loss = loss_func(out, batch_y)
427 | pred = torch.max(out, 1)[1]
428 | num_correct = (pred == batch_y).sum()
429 | if math.isnan(loss) or math.isnan(num_correct):
430 | continue
431 | eval_loss += loss.item()
432 | eval_acc += num_correct.item()
433 | total_eval_loss += loss.item()
434 | total_eval_acc += num_correct.item()
435 | Eval_count[k] += len(batch_x)
436 |
437 | if not (math.isnan(eval_loss / Eval_count[k]) or math.isnan(eval_acc / Eval_count[k])):
438 | ave_eval_loss += eval_loss / Eval_count[k]
439 | ave_eval_acc += eval_acc / Eval_count[k]
440 | print('Agent: {:d}, Test Loss: {:.6f}, Acc: {:.6f}'.format(k, eval_loss / Eval_count[k],
441 | eval_acc / Eval_count[k]))
442 | individual_average_test_loss[epoch, k] = eval_loss / Eval_count[k]
443 | individual_average_test_acc[epoch, k] = eval_acc / Eval_count[k]
444 |
445 | try:
446 | print('Total Average Train Loss: {:.6f}, Train Acc: {:.6f}'.format(
447 | ave_train_loss / (N - nanCount - attacker_num), ave_train_acc / (N - nanCount - attacker_num)))
448 | average_train_loss.append(ave_train_loss / (N - nanCount - attacker_num))
449 | average_train_acc.append(ave_train_acc / (N - nanCount - attacker_num))
450 | print('Total Average Test Loss: {:.6f}, Test Acc: {:.6f}'.format(ave_eval_loss / (N - attacker_num),
451 | ave_eval_acc / (N - attacker_num)))
452 | except:
453 | pass
454 |
455 | print('Training time by far: {:.2f}s'.format(time.time() - start_time))
456 | average_test_loss.append(ave_eval_loss / (N - attacker_num))
457 | average_test_acc.append(ave_eval_acc / (N - attacker_num))
458 |
459 | if epoch % 10 == 0 or epoch == epochs - 1:
460 | if attacker_num == 0:
461 | try:
462 | os.makedirs("results")
463 | except OSError:
464 | print("Creation of the directory %s failed")
465 | np.save('results/average_train_loss_%s.npy' % rule, average_train_loss)
466 | np.save('results/average_train_acc_%s.npy' % rule, average_train_acc)
467 | np.save('results/average_test_loss_%s.npy' % rule, average_test_loss)
468 | np.save('results/average_test_acc_%s.npy' % rule, average_test_acc)
469 | np.save('results/individual_average_train_loss_%s.npy' % rule, individual_average_train_loss)
470 | np.save('results/individual_average_train_acc_%s.npy' % rule, individual_average_train_acc)
471 | np.save('results/individual_average_test_loss_%s.npy' % rule, individual_average_test_loss)
472 | np.save('results/individual_average_test_acc_%s.npy' % rule, individual_average_test_acc)
473 | else:
474 | try:
475 | os.makedirs("results/attacked/%d" % attacker_num)
476 | except OSError:
477 | print("Creation of the directory %s failed")
478 | np.save('results/attacked/%d/average_train_loss_%s.npy' % (attacker_num, rule), average_train_loss)
479 | np.save('results/attacked/%d/average_train_acc_%s.npy' % (attacker_num, rule), average_train_acc)
480 | np.save('results/attacked/%d/average_test_loss_%s.npy' % (attacker_num, rule), average_test_loss)
481 | np.save('results/attacked/%d/average_test_acc_%s.npy' % (attacker_num, rule), average_test_acc)
482 | np.save('results/attacked/%d/individual_average_train_loss_%s.npy' % (attacker_num, rule),
483 | individual_average_train_loss)
484 | np.save('results/attacked/%d/individual_average_train_acc_%s.npy' % (attacker_num, rule),
485 | individual_average_train_acc)
486 | np.save('results/attacked/%d/individual_average_test_loss_%s.npy' % (attacker_num, rule),
487 | individual_average_test_loss)
488 | np.save('results/attacked/%d/individual_average_test_acc_%s.npy' % (attacker_num, rule),
489 | individual_average_test_acc)
490 |
491 |
492 | if __name__ == '__main__':
493 |
494 | epochs = 50
495 | N = 30
496 | # for attacker_num in [0, 10, 29]:
497 | attacker_num = 10
498 | random.seed(0)
499 | if attacker_num == 29:
500 | normal = [4]
501 | attacker = np.delete(range(0, 30), normal)
502 | else:
503 | attacker = random.sample(range(N), attacker_num)
504 | for rule in ["loss", "distance", "no-cooperation", "average"]:
505 | # for rule in ["loss", "distance"]:
506 | print("Total agent number:", N, "Attacker num:", attacker_num)
507 | print("attacker list:", attacker)
508 | print("rule: ", rule)
509 | run(rule, attacker, epochs)
510 |
511 |
--------------------------------------------------------------------------------
/TargetLocalization/multi_task.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import os
3 | from matplotlib import rc
4 | from utils import *
5 |
6 | rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
7 | ## for Palatino and other serif fonts use:
8 | # rc('font',**{'family':'serif','serif':['Palatino']})
9 | rc('text', usetex=True)
10 |
11 |
12 | def noncooperative_learn(i, numAgents, psi, w, mu_k, q, attackers, psi_a):
13 | a = 0
14 | for k in range(numAgents):
15 | if k not in attackers:
16 | # target estimation
17 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
18 | w[:, k] = psi[:, k]
19 | else:
20 | w[:, k] = psi_a[:, a]
21 | a += 1
22 |
23 | return w
24 |
25 |
26 | def average_learn(i, numAgents, psi, w, mu_k, q, attackers, psi_a, Neigh):
27 | a = 0
28 | for k in range(numAgents):
29 | if k not in attackers:
30 | # target estimation
31 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
32 |
33 | else:
34 | psi[:, k] = psi_a[:, a]
35 | a += 1
36 |
37 | for k in range(numAgents):
38 | if k not in attackers:
39 | w[:, k] = np.mean(np.array([psi[:, j] for j in Neigh[k]]), axis=0)
40 | else:
41 | w[:, k] = psi[:, k]
42 |
43 | return w
44 |
45 |
46 | def loss_learn(i, numAgents, x, u, d, psi, w, mu_k, q, attackers, psi_a, Accumulated_Loss, Neigh):
47 | a = 0
48 | gamma = 0.1
49 |
50 | for k in range(numAgents):
51 | if k not in attackers:
52 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
53 |
54 | else:
55 | psi[:, k] = psi_a[:, a]
56 | a += 1
57 |
58 | for k in range(numAgents):
59 | if k not in attackers:
60 | Weight = np.zeros((numAgents,))
61 | reversed_loss = np.zeros((numAgents,))
62 | loss = (d[k] + np.dot([x[k].x, x[k].y], u[:, k].T).item() - (np.dot([psi[:, k]], u[:, k].T)).item()) ** 2
63 | Accumulated_Loss[k, k] = (1 - gamma) * Accumulated_Loss[k, k] + gamma * loss
64 | for l in Neigh[k]:
65 | if not l == k:
66 | loss = (d[k] + np.dot([x[l].x, x[l].y], u[:, k].T).item() - (
67 | np.dot([psi[:, l]], u[:, k].T)).item()) ** 2
68 | Accumulated_Loss[k, l] = (1 - gamma) * Accumulated_Loss[k, l] + gamma * loss
69 | if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
70 | reversed_loss[l] = (1. / Accumulated_Loss[k, l])
71 | sum_reversedLoss = sum(reversed_loss)
72 | for l in Neigh[k]:
73 | if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
74 | weight = reversed_loss[l] / sum_reversedLoss
75 | Weight[l] = weight
76 | # print(k, Weight)
77 | w[:, k] = np.dot(psi, Weight)
78 | else:
79 | w[:, k] = psi[:, k]
80 |
81 | # for k in range(numAgents):
82 | # if k not in attackers:
83 | # Weight = np.zeros((numAgents,))
84 | # reversed_loss = np.zeros((numAgents,))
85 | # loss = (w[0, k] - psi[0, k])**2 + (w[1, k] - psi[1, k])** 2
86 | # Accumulated_Loss[k, k] = (1 - gamma) * Accumulated_Loss[k, k] + gamma * loss
87 | # for l in Neigh[k]:
88 | # loss = (w[0, k] - psi[0, l])**2 + (w[1, k] - psi[1, l])** 2
89 | # Accumulated_Loss[k, l] = (1 - gamma) * Accumulated_Loss[k, l] + gamma * loss
90 | # if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
91 | # reversed_loss[l] = (1. / Accumulated_Loss[k, l])
92 | # sum_reversedLoss = sum(reversed_loss)
93 | # for l in Neigh[k]:
94 | # if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
95 | # weight = reversed_loss[l] / sum_reversedLoss
96 | # Weight[l] = weight
97 | # #print(k, Weight)
98 | # w[:, k] = np.dot(psi, Weight)
99 | # else:
100 | # w[:, k] = psi[:, k]
101 |
102 | return w, Accumulated_Loss
103 |
104 |
105 | def distance_learn(i, numAgents, x, u, d, psi, w, mu_k, q, attackers, psi_a, Accumulated_Loss, Neigh):
106 | a = 0
107 | gamma = 0.1
108 |
109 | for k in range(numAgents):
110 | if k not in attackers:
111 | psi[:, k] = w[:, k] + mu_k * (q[:, k] - w[:, k])
112 |
113 | else:
114 | psi[:, k] = psi_a[:, a]
115 | a += 1
116 |
117 | for k in range(numAgents):
118 | if k not in attackers:
119 | Weight = np.zeros((numAgents,))
120 | reversed_loss = np.zeros((numAgents,))
121 | loss = (w[0, k] - psi[0, k]) ** 2 + (w[1, k] - psi[1, k]) ** 2
122 | Accumulated_Loss[k, k] = (1 - gamma) * Accumulated_Loss[k, k] + gamma * loss
123 | for l in Neigh[k]:
124 | if not l == k:
125 | loss = (w[0, k] - psi[0, l]) ** 2 + (w[1, k] - psi[1, l]) ** 2
126 | Accumulated_Loss[k, l] = (1 - gamma) * Accumulated_Loss[k, l] + gamma * loss
127 | # if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
128 | reversed_loss[l] = (1. / Accumulated_Loss[k, l])
129 | sum_reversedLoss = sum(reversed_loss)
130 | for l in Neigh[k]:
131 | # if Accumulated_Loss[k, l] <= Accumulated_Loss[k, k]:
132 | weight = reversed_loss[l] / sum_reversedLoss
133 | Weight[l] = weight
134 | # print(k, Weight)
135 | w[:, k] = np.dot(psi, Weight)
136 | else:
137 | w[:, k] = psi[:, k]
138 |
139 | return w, Accumulated_Loss
140 |
141 |
142 | if __name__ == '__main__':
143 | random.seed(0)
144 | np.random.seed(0)
145 | # parameters
146 | iteration = 500
147 |
148 | r = 1
149 | box = 12
150 | numAgents = 100
151 | mu_k = 0.1
152 |
153 | t = 1
154 | w0 = [Point(10 + t * random.random(), 10 + t * random.random()),
155 | Point(10 + t * random.random(), 20 + t * random.random()),
156 | Point(20 + t * random.random(), 10 + t * random.random()),
157 | Point(20 + t * random.random(), 20 + t * random.random())]
158 | print([(p.x, p.y) for p in w0])
159 | W0 = [w0[0]] * (numAgents // 4) + [w0[1]] * (numAgents // 4) + [w0[2]] * (numAgents // 4) + [w0[3]] * (
160 | numAgents // 4)
161 |
162 | lower = 0
163 | upper = 3
164 | sensingRange = 1
165 |
166 | x_no = random_point_set(numAgents, lower=lower, upper=upper)
167 | # for k in attackers:
168 | # w_no[k] = Point(np.random.random(), np.random.random())
169 |
170 | x_init = copy.deepcopy(x_no)
171 | x_avg = copy.deepcopy(x_no)
172 | x_loss = copy.deepcopy(x_no)
173 | x_dist = copy.deepcopy(x_no)
174 |
175 | attackerNum = 20
176 |
177 | attackers = random.sample(list(range(numAgents)), attackerNum)
178 |
179 | normalAgents = [k for k in range(numAgents) if k not in attackers]
180 |
181 | Neigh = []
182 | for k in range(numAgents):
183 | neighbor = findNeighbors(x_init, k, numAgents, sensingRange, maxNeighborSize=10)
184 | Neigh.append(neighbor)
185 |
186 | fig = plt.figure(figsize=(4, 3))
187 | # plt.grid(True, which='major')
188 | ax = plt.gca()
189 | ax.set_xlim(lower - 0.1, upper + 0.1)
190 | ax.set_ylim(lower - 0.1, upper + 0.1)
191 | # plt.xticks([0.3*i for i in range(-5,5, 1)])
192 | # plt.yticks([0.3*i for i in range(-5,5, 1)])
193 | plt.gca().set_aspect('equal', adjustable='box')
194 | ax.grid(True, linestyle='dotted')
195 | # for tic in ax.xaxis.get_major_ticks():
196 | # tic.tick1On = tic.tick2On = False
197 | # tic.label1On = tic.label2On = False
198 | # for tic in ax.yaxis.get_major_ticks():
199 | # tic.tick1On = tic.tick2On = False
200 | # tic.label1On = tic.label2On = False
201 | start, end = ax.get_xlim()
202 | ax.xaxis.set_ticks(np.arange(0, end + 0.4, 0.5))
203 | ax.yaxis.set_ticks(np.arange(0, end + 0.4, 0.5))
204 |
205 | # ax.set_xticks([0, 0.3, 0.4, 1.0, 1.5])
206 | # ax.set_xticklabels([0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
207 | # ax.set_yticklabels([0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
208 | for i in range(0, numAgents):
209 | for neighbor in Neigh[i]:
210 | plt.plot([x_init[i].x, x_init[neighbor].x], [x_init[i].y, x_init[neighbor].y], linewidth=0.1,
211 | color='gray')
212 |
213 | plot_point_set(x_init[:numAgents // 4], color='b') # fault-free robots are plotted in blue
214 | plot_point_set(x_init[numAgents // 4:numAgents // 2], color='g') # fault-free robots are plotted in blue
215 | plot_point_set(x_init[numAgents // 2:numAgents // 4 * 3], color='m') # fault-free robots are plotted in blue
216 | plot_point_set(x_init[numAgents // 4 * 3:], color='y') # fault-free robots are plotted in blue
217 | plot_point_set([x_init[p] for p in attackers], color='r') # faulty robots are plotted in red
218 | plt.savefig('fig/network_attackerNum%d.png' % attackerNum)
219 |
220 | # plt.pause(0.1)
221 | plt.title('Network Connectivity')
222 | plt.show()
223 | # plt.savefig('./result/largeNetwork/%s%d.eps' % (method, t))
224 | # end = input('Press enter to end the program.')
225 |
226 | fig4 = plt.figure(figsize=(4, 3))
227 | # plt.grid(True, which='major')
228 | ax = plt.gca()
229 | # ax.set_xlim(lower-0.1, upper+0.1)
230 | # ax.set_ylim(lower-0.1, upper+0.1)
231 | # plt.xticks([0.3*i for i in range(-5,5, 1)])
232 | # plt.yticks([0.3*i for i in range(-5,5, 1)])
233 | # plt.gca().set_aspect('equal', adjustable='box')
234 | ax.grid(True, linestyle='dotted')
235 | # for tic in ax.xaxis.get_major_ticks():
236 | # tic.tick1On = tic.tick2On = False
237 | # tic.label1On = tic.label2On = False
238 | # for tic in ax.yaxis.get_major_ticks():
239 | # tic.tick1On = tic.tick2On = False
240 | # tic.label1On = tic.label2On = False
241 | # start, end = ax.get_xlim()
242 | # ax.xaxis.set_ticks(np.arange(start, end + 0.01, 0.2))
243 | # ax.yaxis.set_ticks(np.arange(start, end + 0.01, 0.2))
244 |
245 | # ax.set_xticks([0, 0.3, 0.4, 1.0, 1.5])
246 | # ax.set_xticklabels([-1, "", "", "", "", 0, "", "", "", "", 1])
247 | # ax.set_yticklabels([-1, "", "", "", "", 0, "", "", "", "", 1])
248 |
249 | plot_point(w0[0], color='b') # fault-free robots are plotted in blue
250 | plot_point(w0[1], color='g') # fault-free robots are plotted in blue
251 | plot_point(w0[2], color='m') # fault-free robots are plotted in blue
252 | plot_point(w0[3], color='y') # fault-free robots are plotted in blue
253 | plt.title('Targets Position')
254 | plt.show()
255 |
256 | # psi_a = 0 * np.ones((2, len(attackers)))
257 | psi_a = 15 + np.random.random((2, len(attackers)))
258 |
259 | mu_vd = 0
260 | mu_vu = 0
261 | # sigma_vd2 = 0 + 0 * np.random.random((numAgents, 1))
262 | # # sigma_vd2[random.sample(range(numAgents), 20)] = 5
263 | # sigma_vu2 = 0 + 0 * np.random.random((numAgents, 1))
264 | # # sigma_vu2[random.sample(range(numAgents), 5)] = 0.3
265 | sigma_vd2 = 0.1 + 0.1 * np.random.random((numAgents, 1))
266 | # sigma_vd2[random.sample(range(numAgents), 20)] = 5
267 | sigma_vu2 = 0.01 + 0.01 * np.random.random((numAgents, 1))
268 | sigma_vu2[random.sample(range(numAgents), 5)] = 0.1
269 |
270 | # The following parameters work
271 | # sigma_vd2 = 1 + 0.4 * np.random.random((numAgents, 1))
272 | # #sigma_vd2[random.sample(range(numAgents), 3)] = 3
273 | # sigma_vu2 = 0.5 + 0.05 * np.random.random((numAgents, 1))
274 | # #sigma_vu2[random.sample(range(numAgents), 3)] = 3
275 | vd = np.zeros((iteration, numAgents))
276 | vu = np.zeros((iteration, numAgents))
277 | for k in range(numAgents):
278 | vd[:, k] = np.random.normal(mu_vd, sigma_vd2[k], iteration)
279 | vu[:, k] = np.random.normal(mu_vu, sigma_vu2[k], iteration)
280 |
281 | d = np.zeros((numAgents,))
282 | u = np.zeros((2, numAgents))
283 | q = np.zeros((2, numAgents))
284 | psi = np.zeros((2, numAgents))
285 |
286 | w_no = np.zeros((2, numAgents))
287 | for k in range(numAgents):
288 | w_no[0, k], w_no[1, k] = np.random.random(), np.random.random()
289 | w_avg = np.zeros((2, numAgents))
290 | for k in range(numAgents):
291 | w_avg[0, k], w_avg[1, k] = w_no[0, k], w_no[1, k]
292 | w_loss = np.zeros((2, numAgents))
293 | for k in range(numAgents):
294 | w_loss[0, k], w_loss[1, k] = w_no[0, k], w_no[1, k]
295 | w_dist = np.zeros((2, numAgents))
296 | for k in range(numAgents):
297 | w_dist[0, k], w_dist[1, k] = w_no[0, k], w_no[1, k]
298 |
299 | Loss_no = np.zeros((iteration, numAgents))
300 | Loss_avg = np.zeros((iteration, numAgents))
301 | Loss_loss = np.zeros((iteration, numAgents))
302 | Loss_dist = np.zeros((iteration, numAgents))
303 |
304 | W1_no = np.zeros((iteration, numAgents))
305 | W1_avg = np.zeros((iteration, numAgents))
306 | W1_loss = np.zeros((iteration, numAgents))
307 | W1_dist = np.zeros((iteration, numAgents))
308 |
309 | Accumulated_Loss = 10 * np.ones((numAgents, numAgents))
310 | Accumulated_dist = 10 * np.ones((numAgents, numAgents))
311 |
312 | for i in range(iteration):
313 |
314 | # plt.clf()
315 |
316 | # ax1 = plt.subplot(151)
317 | # ax1.set_xlim(-1, box)
318 | # ax1.set_ylim(-1, box)
319 | # ax1.set_aspect('equal', adjustable='box')
320 | # for tic in ax1.xaxis.get_major_ticks():
321 | # tic.tick1On = tic.tick2On = False
322 | # tic.label1On = tic.label2On = False
323 | # for tic in ax1.yaxis.get_major_ticks():
324 | # tic.tick1On = tic.tick2On = False
325 | # tic.label1On = tic.label2On = False
326 | # plot_point(w0, marker='*', color='chartreuse', size=12, ax=ax1)
327 | # plot_point_set(x_no, color='b', ax=ax1, alpha=0.5)
328 | # if attackers:
329 | # for i in attackers:
330 | # plot_point(x_no[i], color='r', ax=ax1)
331 | # ax1.set_title('Noncooperative LMS')
332 | #
333 | # ax2 = plt.subplot(152)
334 | # ax2.set_xlim(-1, box)
335 | # ax2.set_ylim(-1, box)
336 | # ax2.set_aspect('equal', adjustable='box')
337 | # for tic in ax2.xaxis.get_major_ticks():
338 | # tic.tick1On = tic.tick2On = False
339 | # tic.label1On = tic.label2On = False
340 | # for tic in ax2.yaxis.get_major_ticks():
341 | # tic.tick1On = tic.tick2On = False
342 | # tic.label1On = tic.label2On = False
343 | # plot_point(w0, marker='*', color='chartreuse', size=12, ax=ax2)
344 | # plot_point_set(x_avg, color='b', ax=ax2, alpha=0.5)
345 | # if attackers:
346 | # for i in attackers:
347 | # plot_point(x_avg[i], color='r', ax=ax2)
348 | # ax2.set_title('Average')
349 | #
350 | # ax3 = plt.subplot(153)
351 | # ax3.set_xlim(-1, box)
352 | # ax3.set_ylim(-1, box)
353 | # ax3.set_aspect('equal', adjustable='box')
354 | # for tic in ax3.xaxis.get_major_ticks():
355 | # tic.tick1On = tic.tick2On = False
356 | # tic.label1On = tic.label2On = False
357 | # for tic in ax3.yaxis.get_major_ticks():
358 | # tic.tick1On = tic.tick2On = False
359 | # tic.label1On = tic.label2On = False
360 | # plot_point(w0, marker='*', color='chartreuse', size=12, ax=ax3)
361 | # plot_point_set(x_loss, color='b', ax=ax3, alpha=0.5)
362 | # if attackers:
363 | # for i in attackers:
364 | # plot_point(x_loss[i], color='r', ax=ax3)
365 | # ax3.set_title('Coordinate-wise median')
366 | #
367 | #
368 | #
369 | # plt.pause(0.001)
370 |
371 | for k in range(numAgents):
372 | if k in attackers:
373 | continue
374 | dist = W0[k].distance(x_init[k])
375 | unit = [(W0[k].x - x_init[k].x) / dist, (W0[k].y - x_init[k].y) / dist]
376 | u[:, k] = unit + vu[i, k]
377 | d[k] = np.dot([W0[k].x - x_init[k].x, W0[k].y - x_init[k].y], u[:, k].T) + vd[i, k]
378 | q[:, k] = [x_init[k].x, x_init[k].y] + d[k] * u[:, k]
379 |
380 | # noncooperative
381 | w_no = noncooperative_learn(i, numAgents, psi, w_no, mu_k, q, attackers, psi_a)
382 |
383 | # cooperative
384 | w_avg = average_learn(i, numAgents, psi, w_avg, mu_k, q, attackers, psi_a, Neigh)
385 |
386 | w_loss, Accumulated_Loss = loss_learn(i, numAgents, x_loss, u, d, psi, w_loss, mu_k, q, attackers, psi_a,
387 | Accumulated_Loss, Neigh)
388 |
389 | w_dist, Accumulated_dist = distance_learn(i, numAgents, x_dist, u, d, psi, w_dist, mu_k, q, attackers, psi_a,
390 | Accumulated_dist, Neigh)
391 |
392 | # loss_no = 0
393 | for k in range(numAgents):
394 | if k in attackers:
395 | continue
396 | agent = Point(w_no[0, k], w_no[1, k])
397 | # error_no += agent.distance(W0[k]) ** 2
398 | loss_no = (d[k] + np.dot([x_init[k].x, x_init[k].y], u[:, k].T).item() - (
399 | np.dot([w_no[:, k]], u[:, k].T)).item()) ** 2
400 | W1_no[i, k] = w_no[0, k]
401 | Loss_no[i, k] = loss_no
402 |
403 | # loss_avg = 0
404 | for k in range(numAgents):
405 | if k in attackers:
406 | continue
407 | agent = Point(w_avg[0, k], w_avg[1, k])
408 | # error_avg += agent.distance(W0[k]) ** 2
409 | loss_avg = (d[k] + np.dot([x_init[k].x, x_init[k].y], u[:, k].T).item() - (
410 | np.dot([w_avg[:, k]], u[:, k].T)).item()) ** 2
411 | W1_avg[i, k] = w_avg[0, k]
412 | Loss_avg[i, k] = loss_avg
413 |
414 | # loss_loss = 0
415 | for k in range(numAgents):
416 | if k in attackers:
417 | continue
418 | agent = Point(w_loss[0, k], w_loss[1, k])
419 | # error_loss += (agent.distance(W0[k])) ** 2
420 | loss_loss = (d[k] + np.dot([x_init[k].x, x_init[k].y], u[:, k].T).item() - (
421 | np.dot([w_loss[:, k]], u[:, k].T)).item()) ** 2
422 | W1_loss[i, k] = w_loss[0, k]
423 | Loss_loss[i, k] = loss_loss
424 |
425 | for k in range(numAgents):
426 | if k in attackers:
427 | continue
428 | agent = Point(w_dist[0, k], w_dist[1, k])
429 | # error_loss += (agent.distance(W0[k])) ** 2
430 | loss_dist = (d[k] + np.dot([x_init[k].x, x_init[k].y], u[:, k].T).item() - (
431 | np.dot([w_dist[:, k]], u[:, k].T)).item()) ** 2
432 | W1_dist[i, k] = w_dist[0, k]
433 | Loss_dist[i, k] = loss_dist
434 |
435 | print('iteration %d' % i)
436 |
437 | print("Loss_no_mean =", np.mean(np.delete(Loss_no, attackers, axis=1), 1))
438 | # print("Loss_no_var", np.sqrt(np.var(np.delete(Loss_no, attackers, axis=1), 1)))
439 | print("Loss_no_min =", np.min(np.delete(Loss_no, attackers, axis=1), 1))
440 | print("Loss_no_max =", np.max(np.delete(Loss_no, attackers, axis=1), 1))
441 |
442 | print("Loss_avg_mean =", np.mean(np.delete(Loss_avg, attackers, axis=1), 1))
443 | # print("Loss_avg_var", np.sqrt(np.var(np.delete(Loss_avg, attackers, axis=1), 1)))
444 | print("Loss_avg_min =", np.min(np.delete(Loss_avg, attackers, axis=1), 1))
445 | print("Loss_avg_max =", np.max(np.delete(Loss_avg, attackers, axis=1), 1))
446 |
447 | print("Loss_loss_mean =", np.mean(np.delete(Loss_loss, attackers, axis=1), 1))
448 | # print("Loss_loss_var", np.sqrt(np.var(np.delete(Loss_loss, attackers, axis=1), 1)))
449 | print("Loss_loss_min =", np.min(np.delete(Loss_loss, attackers, axis=1), 1))
450 | print("Loss_loss_max =", np.max(np.delete(Loss_loss, attackers, axis=1), 1))
451 |
452 | print("Loss_dist_mean =", np.mean(np.delete(Loss_dist, attackers, axis=1), 1))
453 | # print("Loss_dist_var", np.sqrt(np.var(np.delete(Loss_dist, attackers, axis=1), 1)))
454 | print("Loss_dist_min =", np.min(np.delete(Loss_dist, attackers, axis=1), 1))
455 | print("Loss_dist_max =", np.max(np.delete(Loss_dist, attackers, axis=1), 1))
456 |
457 | if len(attackers) == 0:
458 | np.save('results/Loss_loss.npy', Loss_loss)
459 | np.save('results/Loss_avg.npy', Loss_avg)
460 | np.save('results/Loss_no.npy', Loss_no)
461 | np.save('results/Loss_dist.npy', Loss_dist)
462 |
463 |
464 | else:
465 | try:
466 | os.makedirs("results/attacked_num_%d" % len(attackers))
467 | except OSError:
468 | print("Creation of the directory %s failed")
469 | np.save('results/attacked_num_%d/Loss_loss.npy' % len(attackers), Loss_loss)
470 | np.save('results/attacked_num_%d/Loss_avg.npy' % len(attackers), Loss_avg)
471 | np.save('results/attacked_num_%d/Loss_no.npy' % len(attackers), Loss_no)
472 |
473 | fig1 = plt.figure(figsize=(3.9, 2.5))
474 | # fig1 = plt.figure(figsize=(3.9, 2))
475 | plt.plot(np.log10(np.mean(np.delete(Loss_no, attackers, axis=1), 1)), label=r'Non-coop')
476 | plt.plot(np.log10(np.mean(np.delete(Loss_avg, attackers, axis=1), 1)), label=r'Average')
477 | plt.plot(np.log10(np.mean(np.delete(Loss_loss, attackers, axis=1), 1)), label=r'loss-based')
478 | plt.plot(np.log10(np.mean(np.delete(Loss_dist, attackers, axis=1), 1)), label=r'dist-based')
479 |
480 | # plt.plot(MSE_x_no[1:], label=r'Non-coop')
481 | # plt.plot((MSE_x_avg[1:]), label=r'Average')
482 | # plt.plot((MSE_x_loss[1:]), label=r'loss-based')
483 |
484 | # plt.title('cooperative under attack using median')
485 | plt.xlabel(r'iteration $i$', fontsize=10)
486 | plt.ylabel(r'Loss', fontsize=10)
487 | # plt.xticks([0, 100, 200, 300, 400, 500])
488 | # plt.legend(fontsize=7, loc='lower left', bbox_to_anchor=(0.34, 0.43))
489 | # plt.yticks([-30,-15,0,15,30])
490 | # plt.legend(fontsize=7, loc='best')
491 | plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
492 | # plt.yticks([-75, -50, -25, 0, 25])
493 | # if attackerNum == 6:
494 | # plt.yticks([-40, -20, 0, 20, 40])
495 | # plt.ylim([-45, 45])
496 | # elif attackerNum == 0:
497 | # plt.yticks([-60, -40, -20, 0, 20])
498 | # plt.ylim([-70, 40])
499 | plt.tight_layout()
500 | plt.show()
501 | # fig1.savefig('fig/MSD_mobile_attack%d.eps' % attackerNum)
502 |
503 | fig2 = plt.figure(figsize=(11, 2.5))
504 | plt.subplot(151)
505 | for k in normalAgents:
506 | plt.plot(np.log10(Loss_no[:, k]))
507 | plt.xlabel('iteration $i$', fontsize=20)
508 | plt.ylabel(r'Average Loss', fontsize=25)
509 | plt.xticks([0, 100, 200, 300, 400, 500])
510 |
511 | plt.subplot(152)
512 | for k in normalAgents:
513 | plt.plot(np.log10(Loss_avg[:, k]))
514 | plt.xlabel('iteration $i$', fontsize=20)
515 | plt.xticks([0, 100, 200, 300, 400, 500])
516 |
517 | plt.subplot(153)
518 | for k in normalAgents:
519 | plt.plot(np.log10(Loss_loss[:, k]))
520 | plt.xlabel('iteration $i$', fontsize=20)
521 | plt.xticks([0, 100, 200, 300, 400, 500])
522 |
523 | plt.subplot(154)
524 | for k in normalAgents:
525 | plt.plot(np.log10(Loss_dist[:, k]))
526 | plt.xlabel('iteration $i$', fontsize=20)
527 | plt.xticks([0, 100, 200, 300, 400, 500])
528 |
529 | plt.show()
530 |
531 | fig3 = plt.figure(figsize=(11, 2.5))
532 | plt.subplot(151)
533 | for k in normalAgents:
534 | plt.plot(W1_no[1:, k])
535 | plt.xlabel('iteration $i$', fontsize=20)
536 | plt.ylabel(r'$w_{k,i}(1)$', fontsize=25)
537 | plt.xticks([0, 100, 200, 300, 400, 500])
538 |
539 | plt.subplot(152)
540 | for k in normalAgents:
541 | plt.plot(W1_avg[1:, k])
542 | plt.xlabel('iteration $i$', fontsize=20)
543 | plt.xticks([0, 100, 200, 300, 400, 500])
544 |
545 | plt.subplot(153)
546 | for k in normalAgents:
547 | plt.plot(W1_loss[1:, k])
548 | plt.xlabel('iteration $i$', fontsize=20)
549 | plt.xticks([0, 100, 200, 300, 400, 500])
550 |
551 | plt.subplot(154)
552 | for k in normalAgents:
553 | plt.plot(W1_dist[1:, k])
554 | plt.xlabel('iteration $i$', fontsize=20)
555 | plt.xticks([0, 100, 200, 300, 400, 500])
556 |
557 | plt.show()
558 |
--------------------------------------------------------------------------------