├── doc
├── loose.jpg
└── tight.jpg
├── .gitignore
├── fix_train_labels.py
├── filter_wrong_annotations.py
├── readme.md
├── plot_points.py
├── utils
└── bi_resize.py
├── fliplr_and_points.py
├── folders.py
├── spine_augmentation.py
├── load_utils.py
├── train_angle.py
├── resize_images.py
├── box_crop.py
├── part_affinity_field_net.py
├── redundant_bones_filter.py
├── train_spine_box.py
├── eval.py
├── train.py
├── ladder_shufflenet.py
├── cobb_angle_eval.py
├── confidence_map.py
├── cobb_angle_parse.py
├── GaussianToPoint.ipynb
├── LICENSE
└── LICENSE.txt
/doc/loose.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zc402/Scoliosis/HEAD/doc/loose.jpg
--------------------------------------------------------------------------------
/doc/tight.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zc402/Scoliosis/HEAD/doc/tight.jpg
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .pytest_cache
3 | *__pycache__*
4 | data_spine
5 | plotted_fig
6 | resized_data
7 | results
8 | checkpoint.pth
9 | data_spine.zip
10 | .ipynb*
11 |
--------------------------------------------------------------------------------
/fix_train_labels.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import os.path as path
4 | import glob
5 | import scipy.io
6 | import cv2
7 | import folders as f
8 |
9 | # Convert manually annotated training labels to mat format
10 | npy_list = glob.glob(path.join(f.manual_fix_train, "*.npy"))
11 | for npy_path in npy_list:
12 | img_path = path.join(f.train_img, path.basename(npy_path)[:-4] + ".jpg")
13 | img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
14 | hw = img.shape[:2]
15 | npy = np.load(npy_path)
16 | npy_ori = npy * [hw[1], hw[0]]
17 | mat_recover = {"p2": npy_ori}
18 | # aaa.jpg.mat
19 | scipy.io.savemat(path.join(f.manual_fix_train, path.basename(npy_path)[:-4] + ".jpg.mat"), mat_recover)
--------------------------------------------------------------------------------
/filter_wrong_annotations.py:
--------------------------------------------------------------------------------
1 | """This script tells us that there are too many wrong annotations in dataset (around 100)
2 | To keep enough training samples, we'll have to write compatible codes rather than deleting them"""
3 | import numpy as np
4 | import os.path as path
5 | import glob
6 | import os
7 | import random
8 | import cv2
9 | import folders as f
10 | import csv
11 |
12 | def select_wrong_annotation(img_folder, label_folder):
13 | """Find annotations with large indices at top of image"""
14 | img_paths = glob.glob(path.join(img_folder, "*.jpg"))
15 | def check_annotation(img_path):
16 | # Return True for correct annotations, False for wrong annotations.
17 | label_name = path.basename(img_path) + ".npy"
18 | label_path = path.join(label_folder, label_name)
19 | label = np.load(label_path) # P, xy
20 | left = label[0::2]
21 | right = label[1::2]
22 | for i in range(left.shape[0] - 1):
23 | if left[i+1, 1] < left[i, 1]:
24 | return False
25 | if right[i+1, 1] < right[i, 1]:
26 | return False
27 | return True
28 | wrong_paths = [p if not check_annotation(p) else None for p in img_paths]
29 | wrong_paths = list(filter(lambda x: x is not None, wrong_paths))
30 | print(wrong_paths)
31 |
32 | if __name__ == "__main__":
33 | select_wrong_annotation(f.train_img, f.resize_train_label)
34 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | #### Automated Vertebral Landmarks and Spinal Curvature Estimation using Non-directional Part Affinity Fields
2 |
3 |
4 |
5 |
6 |
7 |
8 | # Paper
9 |
10 | [Automated vertebral landmarks and spinal curvature estimation using non-directional part affinity fields](https://www.sciencedirect.com/science/article/abs/pii/S0925231221000989)
11 |
12 | # Preparation
13 | Acquire the datasets (see below).
14 |
15 | Unzip the train, val, data into `dataroot/boostnet_labeldata`
16 |
17 | Unzip the test data into `dataroot/submit_test_images` folder
18 |
19 | Merge train and val csv annotation, put into `dataroot/trainval-labels` folder
20 |
21 | By default, `dataroot` = `../ScoliosisData`
22 |
23 | ## Use "train" set, test on "val" set
24 | Run resize_images.py to apply augmentation and resize
25 |
26 | Run train.py to train on "train" set
27 |
28 | Run eval.py to produce heatmaps
29 |
30 | ## Use "train val" set, test on "submit test" set
31 | Run resize_images.py to flipLR, resize
32 |
33 | Run train.py --trainval to train
34 |
35 | Run eval.py --trainval to produce heatmaps
36 |
37 | Run cobb_angle_eval.py to evaluate landmark pairs and Cobb angles
38 |
39 | # Dataset
40 |
41 | Dataset provided by:
42 |
43 | >Wu, Hongbo, et al. "Automatic landmark estimation for adolescent idiopathic scoliosis assessment using BoostNet." International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2017.
44 |
--------------------------------------------------------------------------------
/plot_points.py:
--------------------------------------------------------------------------------
1 | """
2 | Run this script to plot keypoints on training images, and save to folder.
3 | """
4 | import numpy as np
5 | import glob
6 | import os.path as path
7 | import scipy.io
8 | import matplotlib.pyplot as plt
9 | import os
10 | import folders as f
11 |
12 |
13 | if __name__ == '__main__':
14 |
15 | [os.makedirs(p, exist_ok=True) for p in [f.plot]]
16 |
17 | def plot(img_folder, mat_folder):
18 | # List of paths
19 | train_img_list = glob.glob(path.join(img_folder, "*"))
20 | # List of name
21 | train_img_name_list = [path.basename(p) for p in train_img_list]
22 |
23 | for train_img_name in train_img_name_list:
24 | train_img_path = path.join(img_folder, train_img_name)
25 | train_mat_path = path.join(mat_folder, train_img_name)
26 | mat = scipy.io.loadmat(train_mat_path)["p2"] # [[x1,y1],[x2,y2]...]
27 | # print(mat)
28 | x_list, y_list = list(zip(*mat)) # [[x,x,...][y,y,...]]
29 | # Plot
30 | plt.style.use('grayscale')
31 | plt_img = plt.imread(train_img_path)
32 | plt.imshow(plt_img)
33 | plt.scatter(x_list, y_list, color='white', s=3)
34 | for i in range(len(x_list)):
35 | plt.annotate(i+1, (x_list[i]+15, y_list[i]), color='yellow', size=4)
36 | plt.axis("off")
37 |
38 | plt.savefig(path.join(f.plot, train_img_name), dpi=300)
39 | plt.clf()
40 | print(path.join(f.plot, train_img_name))
41 | # plt.show()
42 |
43 | # plot(f.train_img_flip, f.train_mat_flip)
44 | plot(f.train_img, f.train_mat)
45 | # plot(f.val_img, f.val_mat)
46 |
--------------------------------------------------------------------------------
/utils/bi_resize.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains image resize function.
3 | It resize an image to a specific size, then resize it back after detection.
4 | """
5 | import cv2
6 | import sys
7 | import numpy as np
8 |
9 | if sys.version_info[0] < 3:
10 | raise RuntimeError("Requires Python 3")
11 |
12 |
13 | def resize_img(src, dst_wh):
14 | """
15 | Resize opencv img src to dst, ratio kept
16 | Grayscale version
17 | :param src:
18 | :param dst_wh:
19 | :return: resized_image, record
20 | """
21 | # If not HWC image, quit
22 | if len(src.shape) != 2:
23 | raise ValueError("scr is not gray scale")
24 |
25 | sh = src.shape[0]
26 | sw = src.shape[1]
27 | dh = dst_wh[1]
28 | dw = dst_wh[0]
29 | # ratio: W/H
30 | ratio_src = sw / sh
31 | ratio_dst = dw / dh
32 |
33 | if ratio_src >= ratio_dst:
34 | # resize by W
35 | resize_ratio = dw / sw
36 | nw = dw
37 | nh = int(sh * resize_ratio)
38 | else:
39 | # resize by H
40 | resize_ratio = dh / sh
41 | nw = int(sw * resize_ratio)
42 | nh = dh
43 |
44 | resized_img = cv2.resize(src, (nw, nh), interpolation=cv2.INTER_CUBIC)
45 | black = np.zeros([dh, dw], dtype=np.uint8)
46 | left = (dw-nw)//2
47 | top = (dh-nh)//2
48 | black[top: top+nh, left: left+nw] = resized_img[...]
49 | result = black
50 |
51 | resize_record = (left, top, resize_ratio)
52 | return result, resize_record
53 |
54 |
55 | def resize_pt(xy, resize_record):
56 | """Compute resized point on image, point must be integer"""
57 | left, top, ratio = resize_record
58 | nx = xy[0] * ratio + left
59 | ny = xy[1] * ratio + top
60 | return nx, ny
61 |
62 |
63 | def reverse(xy, resize_record):
64 | """inverse resize apply to points (X,Y)"""
65 | left, top, ratio = resize_record
66 |
67 | nx = xy[0] - left
68 | nx = nx / ratio
69 |
70 | ny = xy[1] - top
71 | ny = ny / ratio
72 | return nx, ny
73 |
74 |
--------------------------------------------------------------------------------
/fliplr_and_points.py:
--------------------------------------------------------------------------------
1 | """
2 | If we flip left and right, the left top point will become right top point.
3 | So this can't be done at spine_augmentation.py.
4 | Run this script to fliplr and adjust points, and save to training folder.
5 | """
6 | import imgaug as ia
7 | import imgaug.augmenters as iaa
8 | from imgaug.augmentables.kps import KeypointsOnImage
9 | import numpy as np
10 | import os
11 | import os.path as path
12 | import glob
13 | import scipy.io
14 | import cv2
15 | import folders as f
16 |
17 | def flip_lr(img_folder, mat_folder, save_img_folder, save_label_folder):
18 | # List of path
19 | img_list = glob.glob(path.join(img_folder, "*"))
20 | # List of name
21 | img_name_list = [path.basename(p) for p in img_list]
22 |
23 | for img_name in img_name_list:
24 | img_basename, ext = path.splitext(img_name)
25 | img_path = path.join(img_folder, img_name)
26 | mat_path = path.join(mat_folder, img_name)
27 | mat = scipy.io.loadmat(mat_path)["p2"] # [[x1,y1],[x2,y2]...]
28 |
29 | cv_img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
30 | img_flip, keypoints_flip = iaa.Fliplr(1.0)(images=cv_img, keypoints=mat[np.newaxis])
31 | keypoints_flip = keypoints_flip[0] # The output has a batch size of 1
32 | # Recover left and right. 0->1, 1->0, 2->3, 3->2
33 | pts_recover = np.zeros_like(keypoints_flip)
34 | pts_recover[::2, :] = keypoints_flip[1::2, :]
35 | pts_recover[1::2, :] = keypoints_flip[::2, :]
36 | # Save mat back
37 | mat_recover = {"p2":pts_recover}
38 | flip_name = "%s_flip.jpg" % img_basename
39 | flip_img_path = path.join(save_img_folder, flip_name)
40 | flip_mat_path = path.join(save_label_folder, flip_name)
41 | scipy.io.savemat(flip_mat_path, mat_recover)
42 | cv2.imwrite(flip_img_path, img_flip)
43 | print(flip_img_path)
44 |
45 | def main():
46 | [os.makedirs(p, exist_ok=True) for p in [f.train_img_flip, f.train_mat_flip, f.val_img_flip, f.val_mat_flip]]
47 | flip_lr(f.train_img, f.train_mat, f.train_img_flip, f.train_mat_flip)
48 | flip_lr(f.val_img, f.val_mat, f.val_img_flip, f.val_mat_flip)
49 |
50 | if __name__ == '__main__':
51 | main()
52 |
--------------------------------------------------------------------------------
/folders.py:
--------------------------------------------------------------------------------
1 | import os.path as path
2 |
3 | # Fliplr
4 | data_root = path.join("..", "ScoliosisData")
5 | data_spine = path.join(data_root, "boostnet_labeldata")
6 |
7 | train_img = path.join(data_spine, "data", "training")
8 | train_mat = path.join(data_spine, "labels", "training")
9 |
10 | train_img_flip = path.join(data_spine, "image_flip", "training")
11 | train_mat_flip = path.join(data_spine, "labels_flip", "training")
12 |
13 | # Create train-val set for final training
14 | val_img_flip = path.join(data_spine, "image_flip", "test")
15 | val_mat_flip = path.join(data_spine, "labels_flip", "test")
16 |
17 | # Plot points
18 | plot = path.join(data_root, "plot_label_on_image")
19 |
20 | # Resize
21 | val_img = path.join(data_spine, "data", "test")
22 | val_mat = path.join(data_spine, "labels", "test")
23 |
24 | resized_data = path.join(data_root, "resized_data")
25 | resize_train_img = path.join(resized_data, "image", "training")
26 | resize_train_label = path.join(resized_data, "labels", "training")
27 | resize_test_img = path.join(resized_data, "image", "test")
28 | resize_test_label = path.join(resized_data, "labels", "test")
29 | submit_test_img = path.join(data_root, "submit_test_images")
30 | resize_submit_test_img = path.join(resized_data, "image", "submit_test")
31 |
32 | # Temporal folder for images with less head and leg areas
33 | submit_test_img_lesshead = path.join(data_root, "submit_test_images_lesshead")
34 |
35 |
36 | # Train
37 | train_results = path.join(data_root, "train_resutls") # Results output folder
38 | checkpoint = path.join(data_root, "checkpoint")
39 | checkpoint_heat_path = path.join(checkpoint, "checkpoint.pth") # -heat
40 | checkpoint_angle_path = path.join(checkpoint, "checkpoint-angle.pth")
41 | checkpoint_heat_trainval_path = path.join(checkpoint, "checkpoint-heat-trainval.pth")
42 | checkpoint_angle_trainval_path = path.join(checkpoint, "checkpoint-angle-trainval.path")
43 | checkpoint_box_path = path.join(checkpoint, "checkpoint-box.pth")
44 | checkpoint_box_trainval_path = path.join(checkpoint, "checkpoint-box-trainval.pth")
45 |
46 | # Eval
47 | validation_plot_out = path.join(data_root, "validation_heatmaps")
48 | submit_test_plot_out = path.join(data_root, "submit_test_heatmaps")
49 | submit_test_plot_pairs = path.join(data_root, "submit_test_plot_pairs")
50 |
51 | # Angle csv
52 | train_angle = path.join(data_spine, "labels", "training")
53 | val_angle = path.join(data_spine, "labels", "test")
54 | trainval_angle = path.join(data_root, "trainval-labels")
55 |
56 | # Submit
57 | resize_trainval_img = path.join(resized_data, "image", "train-val")
58 | resize_trainval_label = path.join(resized_data, "labels", "train-val")
59 |
60 | # Box
61 | train_box_results = path.join(data_root, "train_box_resutls")
62 | submit_test_box_plot = path.join(data_root, "submit_test_box_plot")
63 | submit_test_trim_images = path.join(data_root, "submit_test_trim_images")
64 |
65 | # Manual labeled
66 | manual_npy_submit_test = path.join(data_root, "manual_2")
67 | manual_fix_train = path.join(data_root, "manual_1")
--------------------------------------------------------------------------------
/spine_augmentation.py:
--------------------------------------------------------------------------------
1 | """
2 | Image augmentation module. Run this script to see augmentation results.
3 | """
4 | import imgaug as ia
5 | import imgaug.augmenters as iaa
6 | from imgaug.augmentables.kps import KeypointsOnImage
7 | import numpy as np
8 |
9 |
10 | def augment_batch_img(batch_img, batch_pts, plot=False):
11 | """
12 | Image augmentation, used when training
13 | :param batch_img: [B,H,W,C]
14 | :param batch_pts: [B,number,xy]
15 | :return: aug_b_img, aug_b_pts
16 | """
17 | sometimes = lambda aug: iaa.Sometimes(0.5, aug)
18 | seq = iaa.Sequential([
19 | iaa.CropAndPad(percent=((0., 0.), (-0.1, 0.1), (0., 0.), (-0.1, 0.1))),
20 | iaa.Affine(rotate=(-10, 10)),
21 | iaa.Add((-25, 25)) # change brightness
22 | ])
23 | aug_b_imgs, aug_b_pts = seq(images=batch_img, keypoints=batch_pts)
24 |
25 | if plot:
26 | import cv2
27 | batch_img = [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in batch_img]
28 | aug_b_imgs = [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in aug_b_imgs]
29 | for i in range(len(batch_img)):
30 | print("[Image #%d]" % (i,))
31 | keypoints_before = KeypointsOnImage.from_xy_array(
32 | batch_pts[i], shape=batch_img[i].shape)
33 | keypoints_after = KeypointsOnImage.from_xy_array(
34 | aug_b_pts[i], shape=aug_b_imgs[i].shape)
35 | image_before = keypoints_before.draw_on_image(batch_img[i])
36 | image_after = keypoints_after.draw_on_image(aug_b_imgs[i])
37 | ia.imshow(np.hstack([image_before, image_after]))
38 | return aug_b_imgs, aug_b_pts
39 |
40 | def augment_batch_img_for_box(batch_img, batch_pts, plot=False):
41 | """
42 | Image augmentation, used when training
43 | :param batch_img: [B,H,W,C]
44 | :param batch_pts: [B,number,xy]
45 | :return: aug_b_img, aug_b_pts
46 | """
47 | sometimes = lambda aug: iaa.Sometimes(0.5, aug)
48 | seq = iaa.Sequential([
49 | sometimes(iaa.Pad(percent=(0, 0.8))),
50 | iaa.Affine(rotate=(-5, 5)),
51 | iaa.Multiply((0.7, 1.3)) # change brightness
52 | ])
53 | aug_b_imgs, aug_b_pts = seq(images=batch_img, keypoints=batch_pts)
54 |
55 | if plot:
56 | import cv2
57 | batch_img = [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in batch_img]
58 | aug_b_imgs = [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in aug_b_imgs]
59 | for i in range(len(batch_img)):
60 | print("[Image #%d]" % (i,))
61 | keypoints_before = KeypointsOnImage.from_xy_array(
62 | batch_pts[i], shape=batch_img[i].shape)
63 | keypoints_after = KeypointsOnImage.from_xy_array(
64 | aug_b_pts[i], shape=aug_b_imgs[i].shape)
65 | image_before = keypoints_before.draw_on_image(batch_img[i])
66 | image_after = keypoints_after.draw_on_image(aug_b_imgs[i])
67 | ia.imshow(np.hstack([image_before, image_after]))
68 | return aug_b_imgs, aug_b_pts
69 |
70 | def augment_batch_img_for_angle(batch_img, batch_pts, plot=False):
71 | """
72 | Image augmentation, used when training
73 | :param batch_img: [B,H,W,C]
74 | :param batch_pts: [B,number,xy]
75 | :return: aug_b_img, aug_b_pts
76 | """
77 | sometimes = lambda aug: iaa.Sometimes(0.5, aug)
78 | seq = iaa.Sequential([
79 | iaa.CropAndPad(percent=((0., 0.), (-0.1, 0.1), (0., 0.), (-0.1, 0.1))),
80 | iaa.Add((-25, 25)) # change brightness
81 | ])
82 | aug_b_imgs, aug_b_pts = seq(images=batch_img, keypoints=batch_pts)
83 |
84 | return aug_b_imgs, aug_b_pts
85 |
86 | if __name__ == "__main__":
87 | # Run this script to see augmentation results
88 | import load_utils
89 | data_gen = load_utils.train_loader(5)
90 | for imgs, labels in data_gen:
91 | # augment_batch_img(imgs, labels, plot=True)
92 | augment_batch_img_for_box(imgs, labels, plot=True)
93 |
--------------------------------------------------------------------------------
/load_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Load images and corresponding npy labels from file.
3 | """
4 | import numpy as np
5 | import os.path as path
6 | import glob
7 | import os
8 | import random
9 | import cv2
10 | import folders as f
11 | import csv
12 |
13 |
14 | def load_imgs_labels(batch_size, label_folder, img_folder, rand, angle_folder=None):
15 | """
16 | Internal generator for loading train or test data
17 | :param batch_size:
18 | :param label_folder:
19 | :param img_folder:
20 | :param angle_folder: The folder that contains "angles.csv" and "filenames.csv"
21 | :return: imgs, labels
22 | """
23 | label_list = glob.glob(path.join(label_folder, "*"))
24 | total_size = len(label_list)
25 | loop_range = total_size - (total_size % batch_size)
26 | if angle_folder is not None: # Load filenames.csv angles.csv
27 | filenames, angles = load_filename_angle(angle_folder) # List of [filename, angles]
28 | else:
29 | filenames, angles = None, None
30 | while True:
31 | if rand:
32 | random.shuffle(label_list)
33 | for i in range(0, loop_range, batch_size):
34 | batch_label_path = label_list[i:i+batch_size]
35 | batch_label = [np.load(j) for j in batch_label_path]
36 | # label contains .npy, use splitext to delete it.
37 | batch_img_name = [path.splitext(path.basename(j))[0] for j in batch_label_path]
38 | batch_img_path = [path.join(img_folder, name) for name in batch_img_name]
39 | batch_img = [cv2.imread(p, cv2.IMREAD_GRAYSCALE) for p in batch_img_path]
40 | if angle_folder is not None:
41 | # Name to be matched with angles
42 |
43 | batch_basename = [path.basename(fi).replace(".npy", "") for fi in batch_label_path]
44 | batch_basename = [n.replace("_flip.jpg", ".jpg") for n in batch_basename]
45 |
46 | batch_angles = [angles[filenames.index(fi)] for fi in batch_basename]
47 | batch_angles = [list(map(float, a)) for a in batch_angles]
48 | yield batch_img, batch_label, batch_angles
49 | else:
50 | yield batch_img, batch_label
51 |
52 | def train_loader(batch_size, load_angle=False, use_trainval=False):
53 | """
54 | Training data generator
55 | :param batch_size:
56 | :return: batch_img, batch_label
57 | """
58 | if use_trainval: # Use all samples at final training
59 | img_folder = f.resize_trainval_img
60 | label_folder = f.resize_trainval_label
61 | else: # Use train set, val set remains for validation
62 | img_folder = f.resize_train_img
63 | label_folder = f.resize_train_label
64 |
65 | if load_angle:
66 | angle_folder = f.trainval_angle if use_trainval else f.train_angle
67 | loader = load_imgs_labels(batch_size, label_folder, img_folder, rand=True,
68 | angle_folder=angle_folder)
69 | else:
70 | loader = load_imgs_labels(batch_size, label_folder, img_folder, rand=True)
71 | for img_la in loader:
72 | yield img_la
73 |
74 |
75 | def test_loader(batch_size, load_angle=False):
76 | """
77 | Test data generator
78 | :param batch_size:
79 | :return: batch_img, batch_label
80 | """
81 | img_folder = f.resize_test_img
82 | label_folder = f.resize_test_label
83 | if load_angle:
84 | loader = load_imgs_labels(batch_size, label_folder, img_folder, rand=False,
85 | angle_folder=f.val_angle)
86 | else:
87 | loader = load_imgs_labels(batch_size, label_folder, img_folder, rand=False)
88 | for img_la in loader:
89 | yield img_la
90 |
91 |
92 | # CSV Loader
93 |
94 | def load_filename_angle(folder):
95 | """
96 | Load filename and corresponding angle
97 | :return: list of [ [[filename]], [[a1][a2][a3]] ]
98 | """
99 | angle_path = path.join(folder, "angles.csv")
100 | filename_path = path.join(folder, "filenames.csv")
101 |
102 | with open(angle_path, mode='r') as angle_csv, open(filename_path, mode='r') as filename_csv:
103 | csv_reader = csv.reader(filename_csv)
104 | filenames = list(csv_reader) # Each line is a list with 1 element
105 | filenames = list(map(lambda x: x[0], filenames))
106 | csv_reader = csv.reader(angle_csv)
107 | angles = list(csv_reader)
108 | assert len(filenames) == len(angles)
109 | return filenames, angles
110 |
111 |
112 |
--------------------------------------------------------------------------------
/train_angle.py:
--------------------------------------------------------------------------------
1 | """
2 | Train cobb angle value using heatmaps
3 | """
4 | import load_utils
5 | import argparse
6 | import part_affinity_field_net
7 | import folders as f
8 | import os.path as path
9 | import torch
10 | import torch.nn as nn
11 | import torch.optim as optim
12 | import numpy as np
13 | import spine_augmentation as aug
14 |
15 | if __name__ == "__main__":
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument("-s", default=5, type=int, required=False, help="batch size")
18 | parser.add_argument("--trainval", action='store_true', default=False)
19 | args = parser.parse_args()
20 | batch_size = args.s
21 | if args.trainval: # Final training, use train and val set
22 | train_data_loader = load_utils.train_loader(batch_size, load_angle=True, use_trainval=True)
23 | print("--- Using [train, val] set as training set!")
24 | else:
25 | train_data_loader = load_utils.train_loader(batch_size, load_angle=True)
26 | test_data_loader = load_utils.test_loader(batch_size, load_angle=True)
27 |
28 | net_heat = part_affinity_field_net.SpineModelPAF()
29 | net_heat.cuda()
30 | net_heat.eval()
31 | net_angle = part_affinity_field_net.CobbAngleModel()
32 | net_angle.cuda()
33 |
34 | # Load heatmap network checkpoint
35 | save_path_heat = f.checkpoint_heat_trainval_path if args.trainval else f.checkpoint_heat_path
36 | if path.exists(save_path_heat):
37 | net_heat.load_state_dict(torch.load(save_path_heat))
38 | else:
39 | raise FileNotFoundError("Heatmap model checkpoint not found: {}.".format(save_path_heat))
40 |
41 | # Load angle network checkpoint
42 | if not args.trainval:
43 | save_path_angle = f.checkpoint_angle_path
44 | if path.exists(save_path_angle):
45 | net_angle.load_state_dict(torch.load(save_path_angle))
46 | print("Load angle net checkpoint")
47 | else:
48 | print("Train angle net from scratch")
49 | else: # Trainval
50 | save_path_angle = f.checkpoint_angle_trainval_path
51 | if path.exists(save_path_angle):
52 | net_angle.load_state_dict(torch.load(save_path_angle))
53 | print("Load model weights from [trainval] checkpoint")
54 | elif path.exists(f.checkpoint_angle_path): # Transfer learning
55 | net_angle.load_state_dict(torch.load(f.checkpoint_angle_path))
56 | print("No [trainval] checkpoint but [train] checkpoint exists. Load [train]")
57 | else: # From scratch
58 | print("No [trainval] or [train] checkpoint, training [train, val] from scratch")
59 |
60 | optimizer = optim.Adam(net_angle.parameters(), lr=0.001)
61 | scheduler = optim.lr_scheduler.ReduceLROnPlateau(
62 | optimizer, patience=6000, verbose=True) # Be patient for n steps
63 |
64 | step = 0
65 | device = torch.device("cuda")
66 | for train_imgs, train_labels, train_angles in train_data_loader:
67 | train_imgs, train_labels = aug.augment_batch_img(train_imgs, train_labels) # TODO: rotate or not??
68 |
69 | criterion = nn.MSELoss()
70 | # To numpy, NCHW. normalize to [0, 1]
71 | norm_train_imgs = np.asarray(train_imgs, np.float32)[:, np.newaxis, :, :] / 255.0
72 | t_train_imgs = torch.from_numpy(norm_train_imgs).to(device)
73 | with torch.no_grad():
74 | out_pcm, out_paf, _, _= net_heat(t_train_imgs)
75 |
76 | np_train_angles = np.array(train_angles, dtype=np.float32)
77 | norm_train_angles = np_train_angles / 90.
78 | t_train_angles = torch.from_numpy(norm_train_angles).to(device)
79 |
80 | predict_angles = net_angle(out_paf)
81 |
82 | loss = criterion(predict_angles, t_train_angles)
83 |
84 | optimizer.zero_grad()
85 | loss.backward()
86 | optimizer.step()
87 | step = step + 1
88 | loss_value = loss.item()
89 | scheduler.step(loss_value)
90 | lr = optimizer.param_groups[0]['lr']
91 | print("Step: %d, Loss: %f, LR: %f" % (step, loss_value, lr))
92 | if lr < 10e-5:
93 | print("Stop on plateau")
94 | break
95 |
96 | # Check train acc
97 |
98 | norm_predict_angles = predict_angles.detach().cpu().numpy()
99 | angle_recover = norm_predict_angles * 90.
100 | print(np.mean(np.abs(np_train_angles - angle_recover)))
101 |
102 | # Save
103 | if step % 100 == 0:
104 | torch.save(net_angle.state_dict(), save_path_angle)
105 | print("Angle model saved")
106 |
107 | # Test
108 | if step % 100 == 0:
109 | net_angle.eval()
110 | with torch.no_grad():
111 | test_imgs, _, test_angles= next(test_data_loader)
112 | norm_test_imgs = np.asarray(test_imgs, np.float32)[:, np.newaxis, :, :] / 255.0
113 | t_test_imgs = torch.from_numpy(norm_test_imgs).to(device)
114 |
115 | out_pcm, out_paf, _, _= net_heat(t_test_imgs)
116 |
117 | np_test_angles = np.array(test_angles, dtype=np.float32)
118 | norm_test_angles = np_test_angles / 90.
119 | t_test_angles = torch.from_numpy(norm_test_angles).to(device)
120 |
121 | norm_predict_angles = net_angle(out_paf)
122 | norm_predict_angles = norm_predict_angles.detach().cpu().numpy()
123 | predict_angles = norm_predict_angles * 90.
124 |
125 | #Use SMAPE?
126 | print(np_test_angles - predict_angles)
127 |
128 | net_angle.train()
129 |
--------------------------------------------------------------------------------
/resize_images.py:
--------------------------------------------------------------------------------
1 | """
2 | Run this script to resize images and points and save to folder.
3 | Pad images to same dimension, do not change width:height
4 | also change point positions
5 | Width:Height of spine images are 1:3, approximately
6 | A typical pair would be 500:1500
7 | """
8 |
9 | import numpy as np
10 | import glob
11 | import os.path as path
12 | import scipy.io
13 | import matplotlib.pyplot as plt
14 | import os
15 | import utils.bi_resize as br
16 | import cv2
17 | import folders as f
18 | import argparse
19 | import shutil
20 | import fliplr_and_points
21 |
22 | def resize_save(dst_wh, img_folder, mat_folder, save_img_folder, save_label_folder, plot=False):
23 | """
24 | Select images and labels to resize
25 | :param img_folder:
26 | :param mat_folder:
27 | :param save_img_folder:
28 | :param save_label_folder:
29 | :return:
30 | """
31 | # List of paths
32 | img_list = glob.glob(path.join(img_folder, "*.jpg"))
33 | # List of name
34 | img_name_list = [path.basename(p) for p in img_list]
35 |
36 | for img_name in img_name_list:
37 | img_path = path.join(img_folder, img_name)
38 | cv_img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
39 | resized_image, rec = br.resize_img(cv_img, dst_wh)
40 |
41 | save_img_path = path.join(save_img_folder, img_name)
42 | cv2.imwrite(save_img_path, resized_image)
43 | print(save_img_path)
44 |
45 | if mat_folder is not None:
46 | mat_path = path.join(mat_folder, img_name)
47 | mat = scipy.io.loadmat(mat_path)["p2"] # [[x1,y1],[x2,y2]...]
48 |
49 | resized_pts = [br.resize_pt(xy, rec) for xy in mat]
50 | resized_pts = np.array(resized_pts, dtype=np.int)
51 |
52 | if plot:
53 | plot_image(resized_image, resized_pts)
54 |
55 | save_label_path = path.join(save_label_folder, img_name)
56 | np.save(save_label_path, resized_pts)
57 |
58 |
59 |
60 | def plot_image(img, mat):
61 | # Plot
62 | x_list, y_list = list(zip(*mat)) #[[x,x,...][y,y,...]]
63 | # plt.style.use('grayscale')
64 | plt.imshow(img, cmap='gray')
65 | plt.scatter(x_list, y_list, color='yellow', s=10)
66 | for i in range(len(x_list)):
67 | plt.annotate(i, (x_list[i], y_list[i]), color='yellow', size=5)
68 | plt.axis("off")
69 | if not path.isdir("plotted_fig"):
70 | os.mkdir("plotted_fig")
71 | plt.show()
72 | plt.clf()
73 |
74 | # def cut(img_folder):
75 |
76 | def less_head(img_folder):
77 | # Crop top and bottom area
78 | file_list = glob.glob(path.join(img_folder, "*.jpg"))
79 | for file in file_list:
80 | img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
81 | h = img.shape[0]
82 | head_area = int(0.15 * h)
83 | leg_area = int(0.15 * h)
84 | img = img[head_area:, :]
85 | img = img[:-leg_area, :]
86 | cv2.imwrite(file, img)
87 |
88 | def crop(img_folder):
89 | # Crop an image an save to it's original place
90 | file_list = glob.glob(path.join(img_folder, "*.jpg"))
91 | for file in file_list:
92 | img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
93 | img = img[88: 88+752, 32: 32+256]
94 | cv2.imwrite(file, img)
95 |
96 | if __name__ == "__main__":
97 |
98 | parser = argparse.ArgumentParser()
99 | parser.add_argument("--clean", action="store_true")
100 | args = parser.parse_args()
101 | if args.clean:
102 | print("Remove all generated folders")
103 | list(map(lambda x: shutil.rmtree(x, ignore_errors=True),
104 | [f.resize_train_img, f.resize_train_label,
105 | f.resize_test_img, f.resize_test_label,
106 | f.resize_submit_test_img, f.resize_trainval_img,
107 | f.resize_trainval_label, f.train_img_flip, f.train_mat_flip,
108 | f.val_img_flip, f.val_mat_flip]))
109 |
110 | [os.makedirs(f, exist_ok=True) for f in [f.resize_train_img, f.resize_train_label,
111 | f.resize_test_img, f.resize_test_label,
112 | f.resize_submit_test_img, f.resize_trainval_img,
113 | f.resize_trainval_label]]
114 |
115 | # Set (256, 752) to be able to divide by 16
116 | # Resize, crop submit test images
117 | # resize_save((384, 1120), f.submit_test_img, None, f.resize_submit_test_img, None) # Was (320, 928)
118 | resize_save((384, 1120), f.submit_test_trim_images, None, f.resize_submit_test_img, None) # Was (320, 928)
119 |
120 | print("flip lr")
121 | fliplr_and_points.main()
122 |
123 | # less_head(f.resize_submit_test_img) # For angle_net
124 | # crop(f.resize_submit_test_img) # For angle_net
125 | # Train-val folder for final training
126 | resize_save((256, 752), f.train_img, f.train_mat, f.resize_trainval_img, f.resize_trainval_label)
127 | resize_save((256, 752), f.val_img, f.val_mat, f.resize_trainval_img, f.resize_trainval_label)
128 | resize_save((256, 752), f.train_img_flip, f.train_mat_flip, f.resize_trainval_img, f.resize_trainval_label)
129 | resize_save((256, 752), f.val_img_flip, f.val_mat_flip, f.resize_trainval_img, f.resize_trainval_label)
130 |
131 |
132 | # Original training images
133 | resize_save((256, 752), f.train_img, f.train_mat,
134 | f.resize_train_img, f.resize_train_label)
135 | # Flipped training images
136 | resize_save((256, 752), f.train_img_flip, f.train_mat_flip,
137 | f.resize_train_img, f.resize_train_label)
138 | # Test images
139 | resize_save((256, 752), f.val_img, f.val_mat,
140 | f.resize_test_img, f.resize_test_label)
141 |
--------------------------------------------------------------------------------
/box_crop.py:
--------------------------------------------------------------------------------
1 | """
2 | Crop submit images
3 | 1. zoom original image, fix height to 512, let width adjust itself to keep ratio
4 | 2. predict box y_min, y_max percentage
5 | """
6 | import numpy as np
7 | import glob
8 | import torch
9 | import os.path as path
10 | import folders as f
11 | import cv2
12 | import os
13 |
14 | class Box():
15 | def __init__(self):
16 | import torchvision
17 | import torch.nn as nn
18 | import torch
19 | net = torchvision.models.densenet121()
20 | num_conv_features = net.features[-1].num_features
21 | classifier = nn.Sequential(nn.Linear(num_conv_features, 4), nn.Sigmoid())
22 | net.classifier = classifier
23 | net.eval().cuda()
24 |
25 | save_path = f.checkpoint_box_trainval_path
26 | if path.exists(save_path):
27 | net.load_state_dict(torch.load(save_path))
28 | print("Model loaded")
29 | else:
30 | raise FileNotFoundError()
31 |
32 | self.net = net
33 |
34 | def predict_box(self, img_gray):
35 | assert len(img_gray.shape) == 2
36 | assert np.max(img_gray) > 1.1, "expect uint8 image [0, 255]"
37 |
38 | img = [[img_gray]] # NCHW
39 | img = np.asarray(img, np.float32)
40 | img_01 = img / 255.0
41 | img_01 = img_01 * np.ones([1, 3, 1, 1], np.float32)
42 | test_imgs_tensor = torch.from_numpy(img_01).cuda()
43 | with torch.no_grad():
44 | pred_box = self.net(test_imgs_tensor) # NCHW
45 | pred_box = pred_box.detach().cpu().numpy()
46 | return pred_box[0]
47 |
48 |
49 | class TrimMachine():
50 | def __init__(self):
51 | self.box_predictor = Box()
52 |
53 | def trim_height(self, img_gray):
54 | # raise NotImplementedError("cobb angle parse will use image height, can't change it now.")
55 | assert len(img_gray.shape) == 2, "h, w"
56 | hw = img_gray.shape
57 | h, w = float(hw[0]), float(hw[1])
58 | # Zoom h to 1120 (set according to training image size)
59 | target_h = 752.
60 | zoom_rate = target_h / h
61 | target_w = w * zoom_rate
62 | zoom_img_gray = cv2.resize(img_gray, dsize=(int(target_w), int(target_h)), interpolation=cv2.INTER_CUBIC)
63 | box = self.box_predictor.predict_box(zoom_img_gray)
64 | _, _, y_min, y_max = box
65 | if y_max < 0.7:
66 | y_max = 0.7
67 | y_top = int(h * y_min)
68 | y_bottom = int(h * y_max + 0.05 * h)
69 | assert y_top < y_bottom
70 | # use zero to fill height
71 | img_gray[:y_top, :] = 0
72 | img_gray[y_bottom:, :] = 0
73 | return img_gray
74 |
75 | def trim_width(self, img_gray):
76 | assert len(img_gray.shape) == 2, "h, w"
77 |
78 | hw = img_gray.shape
79 | h, w = float(hw[0]), float(hw[1])
80 | target_w = 256.
81 | zoom_rate = target_w / w
82 | target_h = h * zoom_rate
83 |
84 | zoom_img_gray = cv2.resize(img_gray, dsize=(int(target_w), int(target_h)), interpolation=cv2.INTER_CUBIC)
85 | box = self.box_predictor.predict_box(zoom_img_gray)
86 | x_min, x_max, _, _ = box
87 | # Center after crop
88 | x_center = (w * x_min + w * x_max) / 2
89 | expected_w = h / 3
90 | x_left = max(0, x_center-(expected_w/2))
91 | x_right = min(w, x_center+(expected_w/2))
92 |
93 | crop_img = img_gray[:, int(x_left): int(x_right)]
94 | # x_left = int(w * x_min - 0.02 * h) # Use h, because w becomes wider if original image is wider.
95 | # x_right = int(w * x_max + 0.02 * h)
96 | # assert x_left < x_right
97 | # crop_img = img_gray[:, x_left: x_right]
98 | return crop_img
99 |
100 | def trim_width_height(self, img_gray):
101 |
102 | assert len(img_gray.shape) == 2, "h, w"
103 | hw = img_gray.shape
104 | h, w = float(hw[0]), float(hw[1])
105 | target_w = 256.
106 | zoom_rate = target_w / w
107 | target_h = h * zoom_rate
108 |
109 | zoom_img_gray = cv2.resize(img_gray, dsize=(int(target_w), int(target_h)), interpolation=cv2.INTER_CUBIC)
110 | box = self.box_predictor.predict_box(zoom_img_gray)
111 | x_min, x_max, y_min, y_max = box
112 | x_left = int(w * x_min - 0.03 * w)
113 | x_right = int(w * x_max + 0.03 * w)
114 | assert x_left < x_right
115 | img_gray = img_gray[:, x_left: x_right]
116 |
117 | y_top = int(h * y_min)
118 | y_bottom = int(h * y_max + 0.05 * h)
119 | assert y_top < y_bottom
120 | # use zero to fill height
121 | img_gray[:y_top, :] = 0
122 | img_gray[y_bottom:, :] = 0
123 | return img_gray
124 |
125 |
126 | def main():
127 | plot = False
128 | os.makedirs(f.submit_test_trim_images, exist_ok=True)
129 | trim_machine = TrimMachine()
130 | test_imgs = glob.glob(path.join(f.submit_test_img, '*.jpg')) # Wildcard of test images
131 | for img_path in test_imgs:
132 | img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # HW
133 | basename = path.basename(img_path)
134 | # Crop width, then crop height might be better, because
135 | # width crop is easier, and a trimmed width gives more budget to height in a fixed resize ratio (1: 3)
136 | crop_img = trim_machine.trim_width(img_gray)
137 | # crop_img = trim_machine.trim_height(img_gray)
138 | # crop_img = trim_machine.trim_width_height(img_gray)
139 |
140 | if plot:
141 | crop_img_show = cv2.resize(crop_img, dsize=(256, 752))
142 | img_gray_show = cv2.resize(img_gray, dsize=(256, 752))
143 | cv2.imshow("Ori", img_gray_show)
144 | cv2.imshow("Crop", crop_img_show)
145 | print(path.basename(img_path))
146 | cv2.waitKey(0)
147 | else:
148 | cv2.imwrite(path.join(f.submit_test_trim_images, basename), crop_img)
149 | print(basename)
150 |
151 | if __name__=="__main__":
152 | main()
--------------------------------------------------------------------------------
/part_affinity_field_net.py:
--------------------------------------------------------------------------------
1 | import torchvision.models as models
2 | import torch
3 | import torch.nn as nn
4 | from torch.nn.functional import interpolate
5 |
6 | # Tried downsample and refine, results not good
7 | class SpineModelPAF(nn.Module):
8 | def __init__(self):
9 | super(SpineModelPAF, self).__init__()
10 | self.pcm_n = 2
11 | self.paf_n = 1
12 |
13 | import torchvision.models.vgg as vgg
14 | vgg19 = vgg.vgg19_bn(pretrained=False)
15 | top_layers = list(list(vgg19.children())[0].children())
16 | top_layers[0] = nn.Conv2d(1, 64, kernel_size=3, padding=1)
17 | tops = top_layers[:33] # Top 10 (conv batch relu)*10 + maxpool * 3
18 | tops.pop(26) # delete third max pool
19 | [tops.append(l) for l in self.make_conv_layers(512, 256)]
20 | [tops.append(l) for l in self.make_conv_layers(256, 128)]
21 | self.model_0 = nn.Sequential(*tops) # out: 32, 94
22 |
23 | s1_pcm = lambda: self.stage1(self.pcm_n)
24 | s1_paf = lambda: self.stage1(self.paf_n)
25 | sn_pcm = lambda: self.stageN(self.pcm_n)
26 | sn_paf = lambda: self.stageN(self.paf_n)
27 |
28 | self.model1_1 = s1_pcm()
29 | self.model1_2 = s1_paf()
30 |
31 | self.model2_1 = sn_pcm()
32 | self.model2_2 = sn_paf()
33 |
34 | self.model3_1 = sn_pcm()
35 | self.model3_2 = sn_paf()
36 |
37 | self.model4_1 = sn_pcm()
38 | self.model4_2 = sn_paf()
39 |
40 | self.model5_1 = sn_pcm()
41 | self.model5_2 = sn_paf()
42 |
43 | def make_conv_layers(self, in_channels, out_channels, kernels=3, padding=1, ReLU=True):
44 | conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernels, padding=padding)
45 | if ReLU:
46 | layers = [conv2d, nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)]
47 | else:
48 | layers = [conv2d]
49 | return nn.Sequential(*layers)
50 |
51 | def stage1(self, out_channels):
52 | layers = []
53 | layers.append(self.make_conv_layers(128, 128))
54 | layers.append(nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1))
55 | layers.append(self.make_conv_layers(64, 64))
56 | layers.append(nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1))
57 | layers.append(self.make_conv_layers(32, 32))
58 | layers.append(self.make_conv_layers(32, 32))
59 | layers.append(self.make_conv_layers(32, 32, kernels=1, padding=0))
60 | layers.append(self.make_conv_layers(32, out_channels, kernels=1, padding=0, ReLU=False))
61 | return nn.Sequential(*layers)
62 |
63 | def stageN(self, out_channels):
64 | layers = []
65 | layers.append(self.make_conv_layers(128+self.pcm_n+self.paf_n, 128, kernels=7, padding=3))
66 | layers.append(nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1))
67 | layers.append(self.make_conv_layers(64, 64, kernels=7, padding=3))
68 | layers.append(nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1))
69 | layers.append(self.make_conv_layers(32, 32, kernels=7, padding=3))
70 | layers.append(self.make_conv_layers(32, 32, kernels=7, padding=3))
71 | layers.append(self.make_conv_layers(32, 32, kernels=1, padding=0))
72 | layers.append(self.make_conv_layers(32, out_channels, kernels=1, padding=0, ReLU=False))
73 | return nn.Sequential(*layers)
74 |
75 | def forward(self, x):
76 |
77 | def down(x):
78 | return nn.functional.interpolate(x, scale_factor=0.25)
79 | out1 = self.model_0(x)
80 | out1_1 = self.model1_1(out1)
81 | out1_2 = self.model1_2(out1)
82 |
83 |
84 | out2 = torch.cat([down(out1_1), down(out1_2), out1], dim=1)
85 | out2_1 = self.model2_1(out2)
86 | out2_2 = self.model2_2(out2)
87 |
88 | out3 = torch.cat([down(out2_1), down(out2_2), out1], dim=1)
89 | out3_1 = self.model3_1(out3)
90 | out3_2 = self.model3_2(out3)
91 |
92 | out4 = torch.cat([down(out3_1), down(out3_2), out1], dim=1)
93 | out4_1 = self.model4_1(out4)
94 | out4_2 = self.model4_2(out4)
95 |
96 | out5 = torch.cat([down(out4_1), down(out4_2), out1], dim=1)
97 | out5_1 = self.model5_1(out5)
98 | out5_2 = self.model5_2(out5)
99 |
100 | loss1_pcm_img = torch.stack([out1_1, out2_1, out3_1, out4_1, out5_1])
101 | loss2_paf_img = torch.stack([out1_2, out2_2, out3_2, out4_2, out5_2])
102 |
103 | loss1_pcm_img = torch.mean(loss1_pcm_img, dim=0)
104 | loss2_paf_img = torch.mean(loss2_paf_img, dim=0)
105 |
106 | return out5_1, out5_2, loss1_pcm_img, loss2_paf_img
107 |
108 |
109 | class CobbAngleModel(nn.Module):
110 | """
111 | This model is used to predict cobb angles from heatmaps
112 | """
113 |
114 | def __init__(self, in_channels=1):
115 |
116 | super(CobbAngleModel, self).__init__()
117 |
118 | cfgs = [256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 512]
119 |
120 | def make_conv_layers(in_channels, out_channels, kernels=3, padding=1, ReLU=True):
121 | conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernels, padding=padding)
122 | if ReLU:
123 | layer = nn.Sequential(conv2d, nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
124 | else:
125 | layer = nn.Sequential(conv2d)
126 | return layer
127 |
128 | self.conv_layers = nn.Sequential()
129 |
130 | for i, cfg in enumerate(cfgs):
131 | if cfg == 'M':
132 | layer = nn.MaxPool2d(kernel_size=2, stride=2)
133 | else:
134 | layer = make_conv_layers(in_channels, cfg)
135 | in_channels = cfg
136 | self.conv_layers.add_module("cfg{}".format(i), layer)
137 |
138 | self.conv_layers.add_module("avg", nn.AdaptiveAvgPool2d((4,4)))
139 | # Dense layers
140 | self.classifier = nn.Sequential(
141 | nn.Linear(512*4*4, 2048),
142 | nn.Dropout(),
143 | nn.ReLU(True),
144 | nn.Linear(2048, 2048),
145 | nn.ReLU(True),
146 | nn.Dropout(),
147 | nn.Linear(2048, 3),
148 | )
149 |
150 | def forward(self, x):
151 | x = self.conv_layers(x)
152 | x = x.view(x.size(0), -1)
153 | x = self.classifier(x)
154 | return x
155 |
156 |
157 |
158 |
--------------------------------------------------------------------------------
/redundant_bones_filter.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import os.path as path
4 | import box_crop
5 | import cv2
6 |
7 | def _get_filtered_pairs(pair_lr_value, evidences, low, high):
8 | # delete redundant pairs
9 | # evidences: according to what to delete pairs. e.g. length array, x value array etc.
10 | remain_indices = []
11 | for i, x in enumerate(evidences):
12 | if x > low and x < high:
13 | remain_indices.append(i)
14 | remain_indices = np.array(remain_indices)
15 | pair_lr_value = pair_lr_value[:, remain_indices,:]
16 | return pair_lr_value
17 |
18 | def filter(pair_lr_value):
19 | delete_pairs = True
20 | assert len(pair_lr_value.shape) == 3
21 | # TODO: Slope change
22 |
23 | result_dict = {}
24 | # ------------------------------------------------
25 | # Crop by x value of mid points
26 | # Midpoints
27 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
28 | hmids_x = hmids[:, 0] # [p] Midpoint x
29 |
30 | limit_factor = 2
31 | m = np.mean(hmids_x)
32 | dev = np.std(hmids_x)
33 | x_low = m - limit_factor * dev
34 | x_high = m + limit_factor * dev
35 | # print("mean: ", m)
36 | # print("deviation: ", dev)
37 | # print("+-{} dev: ".format(limit_factor), x_low*dev, x_high)
38 | # plt.hist(hmids_x, bins=20)
39 | result_dict["x_low"] = x_low
40 | result_dict["x_high"] = x_high
41 | # delete from pairs
42 | if delete_pairs:
43 | pair_lr_value = _get_filtered_pairs(pair_lr_value, hmids_x, x_low, x_high)
44 | # print(pair_lr_value.shape[1])
45 |
46 | # -----------------------------------------------------
47 | # Crop by length of bones
48 |
49 |
50 | limit_factor = 3
51 | bones = pair_lr_value[1] - pair_lr_value[0] # [p][xy]
52 | lens = np.linalg.norm(bones, axis=-1)
53 |
54 | m = np.mean(lens)
55 | dev = np.std(lens)
56 | len_low = m - limit_factor * dev
57 | len_high = m + limit_factor * dev
58 | # print("mean: ", m)
59 | # print("deviation: ", dev)
60 | # print("+-{} dev: ".format(limit_factor), m-limit_factor*dev, m+limit_factor*dev)
61 | result_dict["len_low"] = len_low
62 | result_dict["len_high"] = len_high
63 | # plt.hist(lens, bins=30)
64 |
65 | # delete from pairs
66 | if delete_pairs:
67 | pair_lr_value = _get_filtered_pairs(pair_lr_value, lens, len_low, len_high)
68 | # print(pair_lr_value.shape[1])
69 |
70 | # -----------------------------------------------------
71 | # Delete first/ last bone or not?
72 | # Crop by NEAREST Y INTERVAL (must proceed after other standards)
73 | # Suppose pair_lr_value is sorted by y
74 | assert pair_lr_value.shape[1] > 4, "not enough bones to sample and to trim first/last one"
75 |
76 | num_del = -1
77 | while num_del != 0: # do until no more crops
78 |
79 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
80 | hmids_y = hmids[:, 1] # [p] Midpoint y
81 | intervals = hmids_y[2:-1] - hmids_y[1:-2] # 1-0, 2-1, 3-2... 19-18
82 |
83 | limit_factor = 3
84 | m = np.mean(intervals)
85 | dev = np.std(intervals)
86 |
87 | int_high = m + limit_factor * dev
88 | # print("+-{} dev: ".format(limit_factor), int_high)
89 |
90 | result_dict["int_high"] = int_high
91 | # plt.hist(intervals, bins=30)
92 |
93 | first_bone_int = hmids_y[1] - hmids_y[0]
94 | last_bone_int = hmids_y[-1] - hmids_y[-2]
95 | # print("first/last", first_bone_int, last_bone_int)
96 | # delete from pairs
97 | prev_length = pair_lr_value.shape[1]
98 | if delete_pairs:
99 | if first_bone_int > int_high:
100 | pair_lr_value = pair_lr_value[:, 1:, :]
101 | if last_bone_int > int_high:
102 | pair_lr_value = pair_lr_value[:, :-1, :]
103 |
104 | current_length = pair_lr_value.shape[1]
105 | num_del = prev_length - current_length
106 | if num_del < 0:
107 | raise ValueError()
108 |
109 | # -----------------------------------------
110 | # If still more than 17, reduce redundant pairs from TOP
111 | if pair_lr_value.shape[1] > 16:
112 | pair_lr_value = pair_lr_value[:, -16:, :]
113 | result_dict["pair_lr_value"] = pair_lr_value
114 | return result_dict
115 |
116 | def simple_filter(pair_lr_value):
117 | # delete y < 190
118 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
119 | hmids_y = hmids[:, 1] # [p] Midpoint y
120 | pair_lr_value = _get_filtered_pairs(pair_lr_value, hmids_y, 190, 950)
121 | # Keep index 0 ~ 17
122 | if pair_lr_value.shape[1] > 17:
123 | pair_lr_value = pair_lr_value[:, :17, :]
124 | return pair_lr_value
125 |
126 | def centeroid(heat, gaussian_thresh = 0.5):
127 | # Parse center point of connected components
128 | # Return [p][xy]
129 | ret, heat = cv2.threshold(heat, gaussian_thresh, 1., cv2.THRESH_BINARY)
130 | heat = np.array(heat * 255., np.uint8)
131 | # num: point number + 1 background
132 | num, labels = cv2.connectedComponents(heat)
133 | coords = []
134 | for label in range(1, num):
135 | mask = np.zeros_like(labels, dtype=np.uint8)
136 | mask[labels == label] = 255
137 | M = cv2.moments(mask)
138 | cX = int(M["m10"] / M["m00"])
139 | cY = int(M["m01"] / M["m00"])
140 | coords.append([cX, cY])
141 | return coords
142 |
143 | def filter_by_spine_range(spine_range, pair_lr_value):
144 | h, w = spine_range.shape
145 | spine_range = spine_range[:h//2, 80:-80]
146 | cps = centeroid(spine_range, gaussian_thresh=0.3) # p, xy
147 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
148 | hmids_y = hmids[:, 1] # [p] Midpoint y
149 |
150 | if len(cps) != 0:
151 | cps = np.array(cps)
152 | cps_Y = cps[:, 1]
153 | largest_ind = np.argsort(cps_Y)[-1]
154 | largest_Y = cps_Y[largest_ind]
155 | pair_lr_value = _get_filtered_pairs(pair_lr_value, hmids_y, largest_Y, 1120)
156 | return pair_lr_value
157 |
158 | class BoxNetFilter():
159 | def __init__(self):
160 | self.box = box_crop.Box()
161 |
162 | def filter(self, pair_lr_value, image):
163 | assert len(image.shape)==2
164 | h, w = image.shape
165 | h, w = float(h), float(w)
166 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
167 | hmids_x = hmids[:, 0] # [p] Midpoint x
168 | box = self.box
169 | target_w = 256.
170 | zoom_rate = target_w / w
171 | target_h = h * zoom_rate
172 | zoom_img_gray = cv2.resize(image, dsize=(int(target_w), int(target_h)), interpolation=cv2.INTER_CUBIC)
173 |
174 | x_min, x_max, y_min, y_max = box.predict_box(zoom_img_gray)
175 | x_left = int(w * x_min)
176 | x_right = int(w * x_max)
177 | pair_lr_value = _get_filtered_pairs(pair_lr_value, hmids_x, x_left, x_right)
178 | return pair_lr_value
179 |
180 |
--------------------------------------------------------------------------------
/train_spine_box.py:
--------------------------------------------------------------------------------
1 | import torchvision.models
2 | import torch.nn as nn
3 | import numpy as np
4 | import load_utils
5 | import spine_augmentation as aug
6 | import confidence_map as cmap
7 | import part_affinity_field_net
8 | import ladder_shufflenet
9 | import torch.optim as optim
10 | import torch
11 | import os.path as path
12 | import torchvision
13 | import matplotlib.pyplot as plt
14 | import cv2
15 | from PIL import Image
16 | import folders as f
17 | import os
18 | import argparse
19 |
20 | def draw_box_on_image(image, box, file):
21 | assert len(image.shape) == 2, "hw"
22 | h, w = image.shape
23 | # x_min, x_max, y_min, y_max = box
24 | x_min, x_max = box[0:2] * w
25 | y_min, y_max = box[2:4] * h
26 | cv2.line(image, tuple([x_min, 0]), tuple([x_min, h]), (255), thickness=2)
27 | cv2.line(image, tuple([x_max, 0]), tuple([x_max, h]), (255), thickness=2)
28 | cv2.line(image, tuple([0, y_min]), tuple([w, y_min]), (255), thickness=2)
29 | cv2.line(image, tuple([0, y_max]), tuple([w, y_max]), (255), thickness=2)
30 | cv2.imwrite("{}.jpg".format(file), image)
31 |
32 | def label_normalize(batch_labels, batch_imgs):
33 | """
34 | Normalize pts to [0,1] for training the prediction network
35 | :param batch_labels:
36 | :param batch_imgs:
37 | :return:
38 | """
39 | hw = np.asarray(batch_imgs).shape[2:4]
40 | bl = np.array(batch_labels, np.float32)
41 | # Normalization
42 | bl[:, :, 0] = bl[:, :, 0] / hw[1]
43 | bl[:, :, 1] = bl[:, :, 1] / hw[0]
44 |
45 | return bl
46 |
47 | def get_box(labels):
48 | # labels : N P xy
49 | labels = np.array(labels)
50 | xs = labels[:, :, 0]
51 | x_max = np.max(xs, axis=1) # N
52 | x_min = np.min(xs, axis=1)
53 | ys = labels[:, :, 1]
54 | y_max = np.max(ys, axis=1)
55 | y_min = np.min(ys, axis=1)
56 | box = np.stack([x_min, x_max, y_min, y_max], axis=-1)
57 | return box
58 |
59 | def submit_test(net):
60 | import glob
61 | net.eval().cuda()
62 | test_imgs = glob.glob(path.join(f.resize_submit_test_img, '*')) # Wildcard of test images
63 | for img_path in test_imgs:
64 | base_name = path.basename(img_path)[:-4]
65 | img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # HW
66 | img = [[img_gray]] # NCHW
67 | img = np.asarray(img, np.float32)
68 | img_01 = img / 255.0
69 | img_01 = img_01 * np.ones([1,3,1,1], np.float32)
70 | test_imgs_tensor = torch.from_numpy(img_01).cuda()
71 | with torch.no_grad():
72 | pred_box = net(test_imgs_tensor) # NCHW
73 | pred_box = pred_box.detach().cpu().numpy()
74 | draw_box_on_image(img_gray, pred_box[0], path.join(f.submit_test_box_plot, base_name))
75 | print(base_name)
76 | exit(0)
77 |
78 |
79 | if __name__ == "__main__":
80 | parser = argparse.ArgumentParser(description='Train a box of spine.')
81 | parser.add_argument('-s', type=int, default=10, help='batch size')
82 | parser.add_argument("--trainval", action='store_true', default=False)
83 | parser.add_argument("--lr", type=float, default=0.001, help="initial learning rate")
84 | parser.add_argument("--submit_test", action="store_true")
85 | args = parser.parse_args()
86 |
87 | os.makedirs(f.train_box_results, exist_ok=True)
88 | os.makedirs(f.checkpoint, exist_ok=True)
89 | os.makedirs(f.submit_test_box_plot, exist_ok=True)
90 |
91 | net = torchvision.models.densenet121(pretrained=True)
92 | num_conv_features = net.features[-1].num_features
93 | classifier = nn.Sequential(nn.Linear(num_conv_features, 4), nn.Sigmoid())
94 | net.classifier = classifier
95 |
96 | if not torch.cuda.is_available():
97 | raise RuntimeError("GPU not available")
98 | batch_size = args.s
99 | print("Training with batch size: %d" % batch_size)
100 | if args.trainval: # Final training, use train and val set
101 | train_data_loader = load_utils.train_loader(batch_size, use_trainval=True)
102 | print("--- Using [train, val] set as training set!")
103 | else:
104 | train_data_loader = load_utils.train_loader(batch_size)
105 | test_data_loader = load_utils.test_loader(batch_size)
106 |
107 |
108 | # Load checkpoint
109 | # If in trainval mode, no "trainval" checkpoint found,
110 | # and the checkpoint for "train" mode exists,
111 | # then load the "train" checkpoint for "trainval" training
112 | if not args.trainval:
113 | save_path = f.checkpoint_box_path
114 | if path.exists(save_path):
115 | net.load_state_dict(torch.load(save_path))
116 | print("Model loaded")
117 | else:
118 | print("New model created")
119 | else: # Trainval mode
120 | save_path = f.checkpoint_box_trainval_path
121 | if path.exists(save_path):
122 | net.load_state_dict(torch.load(save_path))
123 | print("Load model weights from [trainval] checkpoint")
124 | elif path.exists(f.checkpoint_box_path):
125 | net.load_state_dict(torch.load(f.checkpoint_box_path))
126 | print("No [trainval] checkpoint but [train] checkpoint exists. Load [train]")
127 | else:
128 | print("No [trainval] or [train] checkpoint, training [train, val] from scratch")
129 |
130 | if args.submit_test:
131 | submit_test(net)
132 |
133 | net.cuda().train()
134 |
135 | optimizer = optim.Adam(net.parameters(), lr=args.lr)
136 | scheduler = optim.lr_scheduler.ReduceLROnPlateau(
137 | optimizer, patience=2000, verbose=True) # Be patient for n steps
138 |
139 | step = 0
140 | for train_imgs, train_labels in train_data_loader:
141 | train_imgs, train_labels = aug.augment_batch_img_for_box(train_imgs, train_labels)
142 | cm = cmap.ConfidenceMap()
143 | # Classify labels as (top left, top right, bottom left, bottom right, left center, right center)
144 |
145 | optimizer.zero_grad()
146 | criterion = nn.MSELoss()
147 | # To numpy, NCHW. normalize to [0, 1]
148 | train_imgs = np.asarray(train_imgs, np.float32)[:, np.newaxis, :, :] / 255.0
149 | # To 3 dim color images
150 | train_imgs = train_imgs * np.ones([1, 3, 1, 1], dtype=np.float32)
151 | # Normalize train labels to [0, 1] to predict them directly
152 | norm_labels = label_normalize(train_labels, train_imgs)
153 | box_labels = get_box(norm_labels)
154 | # To tensor
155 | t_train_imgs = torch.from_numpy(np.asarray(train_imgs)).cuda()
156 | t_train_labels = torch.from_numpy(box_labels).cuda()
157 |
158 | t_pred_labels = net(t_train_imgs)
159 |
160 | # Heatmap loss
161 | loss = criterion(t_train_labels, t_pred_labels)
162 | # point regression loss
163 | loss.backward()
164 | optimizer.step()
165 | step = step + 1
166 | loss_value = loss.item()
167 | scheduler.step(loss_value)
168 | lr = optimizer.param_groups[0]['lr']
169 | print("Step: %d, Loss: %f, LR: %f" % (step, loss_value, lr))
170 |
171 | # Save
172 | if step % 200 == 0:
173 | torch.save(net.state_dict(), save_path)
174 | print("Model saved")
175 |
176 | if lr <= 0.00005:
177 | print("Stop on plateau")
178 | break
179 |
180 | # Test
181 | if step % 200 == 1:
182 | net.eval()
183 | test_imgs, test_labels = next(test_data_loader)
184 | test_imgs = np.asarray(test_imgs, np.float32)[:, np.newaxis, :, :]
185 | test_imgs_01 = test_imgs / 255.0
186 | test_imgs_01 = test_imgs_01 * np.ones([1, 3, 1, 1], dtype=np.float32)
187 | test_norm_labels = label_normalize(test_labels, test_imgs)
188 | test_box_labels = get_box(test_norm_labels)
189 | with torch.no_grad():
190 | test_imgs_tensor = torch.from_numpy(test_imgs_01).cuda()
191 | t_test_pred_labels = net(test_imgs_tensor) # NCHW
192 | test_pred_labels = t_test_pred_labels.detach().cpu().numpy()
193 | print(test_pred_labels, test_box_labels, test_pred_labels-test_box_labels)
194 | # print(test_box_labels)
195 |
196 | test_img = test_imgs[0][0]
197 | test_box_labels = test_pred_labels[0]
198 | draw_box_on_image(test_img, test_box_labels, path.join(f.train_box_results, str(step)))
199 | net.train()
--------------------------------------------------------------------------------
/eval.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import ladder_shufflenet
3 | import part_affinity_field_net
4 | import torch
5 | import os
6 | import os.path as path
7 | import glob
8 | import folders as f
9 | import cv2
10 | import torch.nn.functional as F
11 | import argparse
12 |
13 | def centeroid(heat, gaussian_thresh = 0.5):
14 | # Parse center point of connected components
15 | # Return [p][xy]
16 | ret, heat = cv2.threshold(heat, gaussian_thresh, 1., cv2.THRESH_BINARY)
17 | heat = np.array(heat * 255., np.uint8)
18 | # num: point number + 1 background
19 | num, labels = cv2.connectedComponents(heat)
20 | coords = []
21 | for label in range(1, num):
22 | mask = np.zeros_like(labels, dtype=np.uint8)
23 | mask[labels == label] = 255
24 | M = cv2.moments(mask)
25 | cX = int(M["m10"] / M["m00"])
26 | cY = int(M["m01"] / M["m00"])
27 | coords.append([cX, cY])
28 | return coords
29 |
30 | def predict_heatmaps(img_folder, out_folder):
31 | os.makedirs(out_folder, exist_ok=True)
32 | test_imgs = glob.glob(path.join(img_folder, '*')) # Wildcard of test images
33 | device = torch.device("cuda") # CUDA
34 | net = ladder_shufflenet.LadderModelAdd()
35 | net.eval()
36 | net.cuda()
37 |
38 | if args.trainval:
39 | print("Load [train, val] checkpoint")
40 | save_path = f.checkpoint_heat_trainval_path
41 | else:
42 | print("Load [train] checkpoint")
43 | save_path = f.checkpoint_heat_path
44 |
45 | if path.exists(save_path):
46 | net.load_state_dict(torch.load(save_path))
47 | print("Heat Model loaded")
48 | else:
49 | raise FileNotFoundError("No checkpoint.pth at %s", save_path)
50 | print("images to be predict:" + str(test_imgs))
51 | for img_path in test_imgs:
52 | img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # HW
53 | img = [[img_gray]] # NCHW
54 | img = np.asarray(img, np.float32)
55 | img_01 = img / 255.0
56 | test_imgs_tensor = torch.from_numpy(img_01).to(device)
57 | with torch.no_grad():
58 | out_dict = net(test_imgs_tensor) # NCHW
59 | out_pcm, out_paf = out_dict["pcm"], out_dict["paf"]
60 |
61 | # Plot and save image (exclude neck)
62 | heats = torch.cat([test_imgs_tensor, out_pcm[:, 0:6], out_paf], dim=1)
63 | # heats = F.interpolate(heats, size=(test_imgs_tensor.size(2), test_imgs_tensor.size(3)), mode="bilinear")
64 | np_heats = heats.detach().cpu().numpy() # NCHW (0,1)
65 | np_heats = np.clip(np_heats, 0., 1.)[0] # Remove dim 'N'
66 | np_heats = np.transpose(np_heats, (1, 2, 0)) # HWC (0,1)
67 |
68 | # Plot on image
69 | # 6 corner 1 paf
70 | # RGB: White(original image), Blue, Yellow, Cyan, Magenta, Red, Lime, Green
71 | colors = np.array([(255, 255, 255), (0,0,255), (255,255,0), (0,255,255), (255,0,255), (255,0,0), (0,255,0), (0,128,0)], np.float32)
72 |
73 | bgr_colors = colors[:, ::-1] # [Channel][Color]
74 | np_heats_c = np_heats[..., np.newaxis] # HW[Channel][Color] [0,1]
75 | # Heat mask
76 | color_heats = np_heats_c * bgr_colors # HW[Channel][Color]
77 | # Image as background
78 | img_bgr = np.asarray(img_gray, np.float32)[..., np.newaxis][..., np.newaxis] # [H][W][Ch][Co] (0,255)
79 |
80 | img_heats = (img_bgr / 2.) + (color_heats / 2.)
81 | ch_HWCo = np.split(img_heats, img_heats.shape[2], axis=2) # CH [H W 1 CO]
82 | ch_HWCo = [np.squeeze(HW1Co, axis=2) for HW1Co in ch_HWCo] # CH [H W CO]
83 |
84 | ori_img = ch_HWCo[0]
85 | lt_rt_img = np.amax(ch_HWCo[1:3], axis=0)
86 | lb_rb_img = np.amax(ch_HWCo[3:5], axis=0)
87 | lc_rc_img = np.amax(ch_HWCo[5:7], axis=0)
88 | paf_img = ch_HWCo[7]
89 | # img_bgr = img_bgr[:,:,0,:] * np.ones([3]) # Expand color channels 1->3
90 | grid_image = np.concatenate([ori_img, lt_rt_img, lb_rb_img, lc_rc_img, paf_img], axis=1) # Concat to Width dim, H W Color
91 | grid_image = grid_image.astype(np.uint8)
92 | img_name = path.basename(img_path)
93 | cv2.imwrite(path.join(out_folder, img_name), grid_image)
94 | print(img_name)
95 | # cv2.imshow("image", grid_image)
96 | # cv2.waitKey()
97 | ############################
98 | # Gaussian to point
99 | # coord_list shape=(heatmaps, coords, xy)
100 | coord_list = [centeroid(np_heats[:, :, c]) for c in range(1, np_heats.shape[2])] # 1 is original image
101 | img_HWC = img_gray[:, :, np.newaxis] * np.ones((1,1,3))
102 | for i, coords in enumerate(coord_list): # Different heatmaps (corners)
103 | mark_color = bgr_colors[i+1]
104 | for coord in coords: # Same kind, different coordinate landmarks
105 | cv2.circle(img_HWC, center=tuple(coord), radius=3, color=tuple([int(c) for c in mark_color]))
106 | cv2.imwrite(path.join(out_folder, "7marks_" + img_name), img_HWC)
107 |
108 | coord_list = [centeroid(np_heats[:, :, c]) for c in range(1, 5)] # 1 is original image
109 | img_HWC = img_gray[:, :, np.newaxis] * np.ones((1, 1, 3))
110 | for i, coords in enumerate(coord_list): # Different heatmaps (corners)
111 | mark_color = bgr_colors[i + 1]
112 | for coord in coords: # Same kind, different coordinate landmarks
113 | cv2.circle(img_HWC, center=tuple(coord), radius=3, color=tuple([int(c) for c in mark_color]))
114 | cv2.imwrite(path.join(out_folder, "4marks_" + img_name), img_HWC)
115 |
116 |
117 |
118 |
119 | """
120 | def eval_submit_testset():
121 | import csv
122 | result_name_an123 = [] # Parsing results to be wrote
123 | submit_example = path.join(f.submit_test_img, "sample_submission.csv")
124 | with open(submit_example, 'r') as example:
125 | reader = csv.reader(example)
126 | example_content = list(reader)
127 | result_name_an123.append(example_content[0]) # Title line
128 | name_an123 = example_content[1:] # Exclude first title line "name, an1, an2, an3"
129 |
130 | net_heat = spine_model.SpineModelPAF()
131 | net_angle = spine_model.CobbAngleModel()
132 | net_heat.cuda()
133 | net_heat.eval()
134 | net_angle.cuda()
135 | net_angle.eval()
136 |
137 | save_path = f.checkpoint_heat_trainval_path if args.trainval else f.checkpoint_heat_path
138 | net_heat.load_state_dict(torch.load(save_path))
139 | save_path_a = f.checkpoint_angle_trainval_path if args.trainval else f.checkpoint_angle_path
140 | net_angle.load_state_dict(torch.load(save_path_a))
141 |
142 | device = torch.device("cuda") # Input device
143 |
144 | filename_list = list(zip(*name_an123))[0]
145 | for filename in filename_list:
146 | resize_filename = path.join(f.resize_submit_test_img, filename + ".jpg")
147 | np_img = cv2.imread(resize_filename, cv2.IMREAD_GRAYSCALE)
148 | np_img = [[np_img]] # NCHW
149 | np_img = np.asarray(np_img, np.float32)
150 |
151 | np_norm_img = np_img / 255.
152 | t_norm_img = torch.from_numpy(np_norm_img).to(device)
153 | with torch.no_grad():
154 | out_pcm, out_paf, _, _ = net_heat(t_norm_img)
155 | an123 = net_angle(out_paf)
156 | np_an123 = an123.detach().cpu().numpy()
157 | np_an123 = np_an123[0] * 90. # batch size 1
158 | np_an123 = np.clip(np_an123, a_min=0, a_max=100)
159 | result_line = [filename, np_an123[0], np_an123[1], np_an123[2]]
160 | result_name_an123.append(result_line)
161 | print(filename)
162 |
163 | with open(path.join(f.data_root, "submit_result.csv"), "w+", newline='') as result_csv_file:
164 | writer = csv.writer(result_csv_file)
165 | [writer.writerow(l) for l in result_name_an123]
166 | """
167 |
168 | if __name__ == '__main__':
169 | parser = argparse.ArgumentParser()
170 | parser.add_argument("--trainval", action='store_true', default=False)
171 | parser.add_argument("--dataset", type=str, help="Which set to predict? (val, test)", default="val")
172 | args = parser.parse_args()
173 |
174 | if args.dataset == "val":
175 | # Validation set
176 | predict_heatmaps(f.resize_test_img, f.validation_plot_out)
177 | elif args.dataset == "test":
178 | # Submit test set
179 | predict_heatmaps(f.resize_submit_test_img, f.submit_test_plot_out)
180 | else:
181 | print("Invalid dataset argument")
182 |
183 |
184 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | """
2 | Run this script to train the spine keypoint network
3 | """
4 | import numpy as np
5 | import load_utils
6 | import spine_augmentation as aug
7 | import confidence_map as cmap
8 | import part_affinity_field_net
9 | import ladder_shufflenet
10 | import torch.optim as optim
11 | import torch.nn as nn
12 | import torch
13 | import os.path as path
14 | import torchvision
15 | import matplotlib.pyplot as plt
16 | import cv2
17 | from PIL import Image
18 | import folders as f
19 | import os
20 | import argparse
21 |
22 | def save_grid_images(img, gau, name):
23 | # gau = F.interpolate(gau, size=(img.size(2), img.size(3)), mode="bilinear")
24 | gau_img = torch.cat((gau, img), dim=0)
25 | gau_img = torchvision.utils.make_grid(gau_img, nrow=batch_size)
26 |
27 | npimg = gau_img.detach().cpu().numpy()
28 | npimg = np.clip(npimg, 0., 1.)
29 | npimg = np.transpose(npimg, (1, 2, 0))
30 | npimg = (npimg*255.).astype(np.uint8)
31 | # npimg = cv2.resize(npimg, None, fx=4, fy=4) # Gaussian
32 | cv2.imwrite(path.join(f.train_results, "%s.jpg" % name), npimg)
33 |
34 | def label_normalize_flatten(batch_labels, batch_imgs):
35 | """
36 | Normalize pts to [0,1] for training the prediction network
37 | :param batch_labels:
38 | :param batch_imgs:
39 | :return:
40 | """
41 | hw = np.asarray(batch_imgs).shape[2:4]
42 | bl = np.array(batch_labels, np.float32)
43 | # Normalization
44 | bl[:, :, 0] = bl[:, :, 0] / hw[1]
45 | bl[:, :, 1] = bl[:, :, 1] / hw[0]
46 | # Flatten
47 | bl = bl.reshape((bl.shape[0], -1))
48 | return bl
49 |
50 |
51 | def plot_norm_pts(batch_imgs, batch_norm_pts, name):
52 | hw = batch_imgs.shape[2:4]
53 | plt.style.use('grayscale')
54 | batch_norm_pts = batch_norm_pts.detach().cpu().numpy()
55 | batch_norm_pts = batch_norm_pts.reshape((batch_imgs.shape[0], 68, 2)) # Batchsize, joints*4, xy
56 | for i in range(batch_imgs.shape[0]):
57 | img = batch_imgs[i,0] # NCHW -> HW
58 | # img = np.repeat(img[..., np.newaxis], 3, axis=2) # HWC
59 | img = img / 255.
60 | plt_img = Image.fromarray(img)
61 | plt.imshow(plt_img)
62 |
63 | xy_list = batch_norm_pts[i] # [J][XY]
64 | xy_list *= np.array((hw[1], hw[0]), np.float32)
65 | x_list, y_list = np.transpose(xy_list, axes=[1, 0]).tolist() # [XY][J]
66 | plt.scatter(x_list, y_list, color='yellow', s=9)
67 | for j in range(len(x_list)):
68 | plt.annotate(j, (x_list[j], y_list[j]), color='red', size=5)
69 | plt.axis("off")
70 | plt.savefig(path.join(f.train_results, "%s_%d_pts.jpg" % (name, i)), dpi=400)
71 | plt.clf()
72 |
73 | if __name__ == "__main__":
74 | parser = argparse.ArgumentParser(description='Train network.')
75 | parser.add_argument('-s', type=int, default=4, help='batch size')
76 | parser.add_argument("--trainval", action='store_true', default=False)
77 | parser.add_argument("--lr", type=float, default=0.001, help="initial learning rate")
78 | parser.add_argument("--patience", type=int, default=5000, help="patience for decrease lr on plateau")
79 | args = parser.parse_args()
80 |
81 | os.makedirs(f.train_results, exist_ok=True)
82 | os.makedirs(f.checkpoint, exist_ok=True)
83 |
84 | net = ladder_shufflenet.LadderModelAdd()
85 |
86 | if not torch.cuda.is_available():
87 | raise RuntimeError("GPU not available")
88 | batch_size = args.s
89 | print("Training with batch size: %d" % batch_size)
90 | if args.trainval: # Final training, use train and val set
91 | train_data_loader = load_utils.train_loader(batch_size, use_trainval=True)
92 | print("--- Using [train, val] set as training set!")
93 | else:
94 | train_data_loader = load_utils.train_loader(batch_size)
95 | test_data_loader = load_utils.test_loader(batch_size)
96 | device = torch.device("cuda")
97 |
98 |
99 | # Load checkpoint
100 | # If in trainval mode, no "trainval" checkpoint found,
101 | # and the checkpoint for "train" mode exists,
102 | # then load the "train" checkpoint for "trainval" training
103 | if not args.trainval:
104 | save_path = f.checkpoint_heat_path
105 | if path.exists(save_path):
106 | net.load_state_dict(torch.load(save_path))
107 | print("Model loaded")
108 | else:
109 | print("New model created")
110 | else: # Trainval mode
111 | save_path = f.checkpoint_heat_trainval_path
112 | if path.exists(save_path):
113 | net.load_state_dict(torch.load(save_path))
114 | print("Load model weights from [trainval] checkpoint")
115 | elif path.exists(f.checkpoint_heat_path):
116 | net.load_state_dict(torch.load(f.checkpoint_heat_path))
117 | print("No [trainval] checkpoint but [train] checkpoint exists. Load [train]")
118 | else:
119 | print("No [trainval] or [train] checkpoint, training [train, val] from scratch")
120 |
121 | net.cuda().train()
122 |
123 | optimizer = optim.Adam(net.parameters(), lr=args.lr)
124 | scheduler = optim.lr_scheduler.ReduceLROnPlateau(
125 | optimizer, patience=args.patience, verbose=True) # Be patient for n steps
126 |
127 | step = 0
128 | for train_imgs, train_labels in train_data_loader:
129 | train_imgs, train_labels = aug.augment_batch_img(train_imgs, train_labels)
130 | cm = cmap.ConfidenceMap()
131 | # Classify labels as (top left, top right, bottom left, bottom right, left center, right center)
132 | heat_scale = 1
133 | heat_hw = np.asarray(train_imgs).shape[1:3]
134 | NCHW_corner_gau = cm.batch_gaussian_split_corner(train_imgs, train_labels, heat_scale)
135 | NCHW_center_gau = cm.batch_gaussian_LRCenter(train_imgs, train_labels, heat_scale)
136 | # NCHW_t_lines = cm.batch_lines_LRTop(heat_hw, train_labels)
137 | NCHW_c_lines = cm.batch_lines_LRCenter(heat_hw, train_labels, heat_scale)
138 | # NCHW_b_lines = cm.batch_lines_LRBottom(heat_hw, train_labels)
139 | NCHW_first_lrpt = cm.batch_gaussian_first_lrpt(train_imgs, train_labels)
140 | # NCHW_last_lrpt = cm.batch_gaussian_last_lrpt(train_imgs, train_labels)
141 | NCHW_paf = NCHW_c_lines
142 | NCHW_pcm = np.concatenate((NCHW_corner_gau, NCHW_center_gau, NCHW_first_lrpt), axis=1)
143 |
144 | # NCHW_spine_mask = cm.batch_spine_mask(heat_hw, train_labels)
145 |
146 |
147 | optimizer.zero_grad()
148 | criterion = nn.MSELoss()
149 | # To numpy, NCHW. normalize to [0, 1]
150 | train_imgs = np.asarray(train_imgs, np.float32)[:, np.newaxis, :, :] / 255.0
151 | # Normalize train labels to [0, 1] to predict them directly
152 | # norm_labels = label_normalize_flatten(train_labels, train_imgs)
153 | # To tensor
154 | train_imgs = torch.from_numpy(np.asarray(train_imgs)).cuda()
155 | tensor_gt_pcm = torch.from_numpy(np.asarray(NCHW_pcm)).cuda()
156 | tensor_gt_paf = torch.from_numpy(np.asarray(NCHW_paf)).cuda()
157 | # tensor_gt_mask = torch.from_numpy(np.asarray(NCHW_spine_mask)).cuda()
158 |
159 | res_dict = net(train_imgs)
160 | out_pcm, out_paf = res_dict["pcm"], res_dict["paf"]
161 |
162 | # Loss
163 | loss1 = criterion(out_pcm, tensor_gt_pcm)
164 | loss2 = criterion(out_paf, tensor_gt_paf)
165 | # loss3 = criterion(out_mask, tensor_gt_mask)
166 | loss = loss1 + (loss2 / 5) # + (loss3 / 50) # pcm + paf + mask
167 | loss.backward()
168 | optimizer.step()
169 | step = step + 1
170 | loss_value = loss.item()
171 | scheduler.step(loss_value)
172 | lr = optimizer.param_groups[0]['lr']
173 | print("Step: %d, Loss: %f, LR: %f" % (step, loss_value, lr))
174 |
175 | # Save
176 | if step % 200 == 0:
177 | torch.save(net.state_dict(), save_path)
178 | print("Model saved")
179 |
180 | if lr <= 0.00005:
181 | print("Stop on plateau")
182 | break
183 |
184 | # Test
185 | if step % 200 == 0:
186 | net.eval()
187 | test_imgs, test_labels = next(test_data_loader)
188 | test_imgs = np.asarray(test_imgs, np.float32)[:, np.newaxis, :, :]
189 | test_imgs_01 = test_imgs / 255.0
190 | with torch.no_grad():
191 | test_imgs_tensor = torch.from_numpy(test_imgs_01).to(device)
192 | test_res_dict = net(test_imgs_tensor) # NCHW
193 | out_paf = test_res_dict["paf"]
194 | save_grid_images(test_imgs_tensor, out_paf[:, 0:1, ...], str(step))
195 | # plot_norm_pts(test_imgs, test_out_pts, str(step))
196 | net.train()
197 |
--------------------------------------------------------------------------------
/ladder_shufflenet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | def channel_shuffle(x, groups):
5 | batchsize, num_channels, height, width = x.data.size()
6 | channels_per_group = num_channels // groups
7 |
8 | # reshape
9 | x = x.view(batchsize, groups,
10 | channels_per_group, height, width)
11 |
12 | x = torch.transpose(x, 1, 2).contiguous()
13 |
14 | # flatten
15 | x = x.view(batchsize, -1, height, width)
16 |
17 | return x
18 |
19 | def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False, dilation=1):
20 | return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i, dilation=dilation)
21 |
22 | class InvertedResidual(nn.Module):
23 | def __init__(self, inp, oup, stride):
24 | super(InvertedResidual, self).__init__()
25 |
26 | if not (1 <= stride <= 3):
27 | raise ValueError('illegal stride value')
28 | self.stride = stride
29 |
30 | branch_features = oup // 2
31 | assert (self.stride != 1) or (inp == branch_features << 1)
32 |
33 | if self.stride > 1:
34 | self.branch1 = nn.Sequential(
35 | depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
36 | nn.BatchNorm2d(inp),
37 | nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
38 | nn.BatchNorm2d(branch_features),
39 | nn.ReLU(inplace=True),
40 | )
41 |
42 | self.branch2 = nn.Sequential(
43 | nn.Conv2d(inp if (self.stride > 1) else branch_features,
44 | branch_features, kernel_size=1, stride=1, padding=0, bias=False),
45 | nn.BatchNorm2d(branch_features),
46 | nn.ReLU(inplace=True),
47 | depthwise_conv(branch_features, branch_features, kernel_size=5, stride=self.stride, padding=4, dilation=2),
48 | nn.BatchNorm2d(branch_features),
49 | nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
50 | nn.BatchNorm2d(branch_features),
51 | nn.ReLU(inplace=True),
52 | )
53 |
54 |
55 | def forward(self, x):
56 | if self.stride == 1:
57 | x1, x2 = x.chunk(2, dim=1)
58 | out = torch.cat((x1, self.branch2(x2)), dim=1)
59 | else:
60 | out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
61 |
62 | out = channel_shuffle(out, 2)
63 |
64 | return out
65 |
66 |
67 | class InvertedResidualUpsample(nn.Module):
68 | def __init__(self, inp, oup, stride):
69 | super(InvertedResidualUpsample, self).__init__()
70 |
71 | if not (1 <= stride <= 3):
72 | raise ValueError('illegal stride value')
73 | self.stride = stride
74 |
75 | branch_features = oup // 2
76 | assert (self.stride != 1) or (inp == branch_features << 1)
77 |
78 | if self.stride > 1:
79 | self.branch1 = nn.Sequential(
80 | self.depthwise_conv_upsample(inp, inp, kernel_size=4, stride=self.stride, padding=1),
81 | nn.BatchNorm2d(inp),
82 | nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
83 | nn.BatchNorm2d(branch_features),
84 | nn.ReLU(inplace=True),
85 | )
86 |
87 | self.branch2 = nn.Sequential(
88 | nn.Conv2d(inp if (self.stride > 1) else branch_features,
89 | branch_features, kernel_size=1, stride=1, padding=0, bias=False),
90 | nn.BatchNorm2d(branch_features),
91 | nn.ReLU(inplace=True),
92 | self.depthwise_conv_upsample(branch_features, branch_features, kernel_size=4, stride=self.stride, padding=1),
93 | nn.BatchNorm2d(branch_features),
94 | nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
95 | nn.BatchNorm2d(branch_features),
96 | nn.ReLU(inplace=True),
97 | )
98 | else: # stride == 1
99 | self.branch2 = nn.Sequential(
100 | nn.Conv2d(inp if (self.stride > 1) else branch_features,
101 | branch_features, kernel_size=1, stride=1, padding=0, bias=False),
102 | nn.BatchNorm2d(branch_features),
103 | nn.ReLU(inplace=True),
104 | # depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
105 | depthwise_conv(branch_features, branch_features, kernel_size=5, stride=self.stride, padding=4, dilation=2),
106 | nn.BatchNorm2d(branch_features),
107 | nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
108 | nn.BatchNorm2d(branch_features),
109 | nn.ReLU(inplace=True),
110 | )
111 |
112 | @staticmethod
113 | def depthwise_conv_upsample(i, o, kernel_size, stride, padding, bias=False):
114 | return nn.ConvTranspose2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
115 |
116 | def forward(self, x):
117 | if self.stride == 1:
118 | x1, x2 = x.chunk(2, dim=1)
119 | out = torch.cat((x1, self.branch2(x2)), dim=1)
120 | else:
121 | out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
122 |
123 | out = channel_shuffle(out, 2)
124 |
125 | return out
126 |
127 |
128 | class LadderModelAdd(nn.Module):
129 | # Use resnet style add when merging layers
130 | # Use 3 type pcm
131 |
132 | def __init__(self, in_channels=1): # out: pcm paf mask
133 | super(LadderModelAdd, self).__init__()
134 | out_channels = 7 + 1 # 6 corner 1 neck 1 paf
135 | self._stage_out_channels = [64, 64, 128, 256, 1024] # init, e1, e2, e3, e4
136 |
137 | self._stage_in_channels_dec = [1024, 256, 128, 64, 64] # in: d4, d3, d2, d1, final
138 | self._stage_out_channels_dec = [256, 128, 64, 64, out_channels] # out: d4, d3, d2, d1, final
139 |
140 | self.initial = nn.Sequential(
141 | nn.Conv2d(in_channels, self._stage_out_channels[0], 3, 1, 1, bias=False),
142 | nn.BatchNorm2d(self._stage_out_channels[0]),
143 | nn.ReLU(inplace=True),
144 | )
145 |
146 | # Encoder
147 | input_channels = self._stage_out_channels[0]
148 | stage_names = ['encoder{}'.format(i) for i in [1, 2, 3, 4]]
149 | stages_repeats = [4, 4, 8, 4]
150 | for name, repeats, output_channels in zip(
151 | stage_names, stages_repeats, self._stage_out_channels[1:]):
152 | seq = [InvertedResidual(input_channels, output_channels, 2)]
153 | for i in range(repeats - 1):
154 | seq.append(InvertedResidual(output_channels, output_channels, 1))
155 | setattr(self, name, nn.Sequential(*seq))
156 | input_channels = output_channels
157 |
158 | # Decoder
159 | stage_names = ['decoder{}'.format(i) for i in [4, 3, 2, 1]]
160 | stages_repeats = [4, 8, 4, 4]
161 | for name, repeats, input_channels, output_channels in zip(
162 | stage_names, stages_repeats,self._stage_in_channels_dec[:4], self._stage_out_channels_dec[:4]):
163 | seq = [InvertedResidualUpsample(input_channels, output_channels, 2)]
164 | for i in range(repeats - 1):
165 | seq.append(InvertedResidualUpsample(output_channels, output_channels, 1))
166 | setattr(self, name, nn.Sequential(*seq))
167 |
168 | # Final Block
169 | input_channels = self._stage_in_channels_dec[-1]
170 | output_channels = self._stage_out_channels_dec[-1]
171 | self.final = nn.Sequential(
172 | nn.Conv2d(input_channels, input_channels, 1, 1, 0, bias=False),
173 | nn.BatchNorm2d(input_channels),
174 | nn.ReLU(inplace=True),
175 |
176 | nn.Conv2d(input_channels, output_channels, 1, 1, 0)
177 | )
178 |
179 | def forward(self, x):
180 | init = self.initial(x)
181 | e1 = self.encoder1(init)
182 | e2 = self.encoder2(e1)
183 | e3 = self.encoder3(e2)
184 | e4 = self.encoder4(e3)
185 |
186 | d4 = self.decoder4(e4)
187 | d4_cat = torch.add(d4, e3)
188 | d3 = self.decoder3(d4_cat)
189 | d3_cat = torch.add(d3, e2)
190 | d2 = self.decoder2(d3_cat)
191 | d2_cat = torch.add(d2, e1)
192 | d1 = self.decoder1(d2_cat)
193 |
194 | final = self.final(d1)
195 | # pcm, paf, spine-mask
196 | res_dict = {"pcm": final[:, 0:7], "paf": final[:, 7:8]}
197 | return res_dict
198 |
199 |
200 | if __name__=="__main__":
201 | import numpy as np
202 | ladder = LadderModelAdd().cuda()
203 | print(ladder)
204 | input = np.zeros([2, 1, 256, 256], np.float32)
205 | t_input = torch.from_numpy(input).cuda()
206 | out = ladder(t_input)
207 |
--------------------------------------------------------------------------------
/cobb_angle_eval.py:
--------------------------------------------------------------------------------
1 | # Run cobb angle evaluation script based on center point heatmaps
2 | import numpy as np
3 | import load_utils
4 | import ladder_shufflenet
5 | import torch
6 | import os.path as path
7 | import cv2
8 | import folders as f
9 | import os
10 | import argparse
11 | import cobb_angle_parse as cap
12 | import csv
13 |
14 |
15 | # Run evaluation on submit test set
16 | def run_on_submit_test(net_heat):
17 | os.makedirs(f.submit_test_plot_pairs, exist_ok=True)
18 | result_name_an123 = [] # Parsing results to be wrote
19 | submit_example = path.join(f.submit_test_img, "sample_submission.csv")
20 | with open(submit_example, 'r') as example:
21 | reader = csv.reader(example)
22 | example_content = list(reader)
23 | result_name_an123.append(example_content[0]) # Title line
24 | name_an123 = example_content[1:] # Exclude first title line "name, an1, an2, an3"
25 |
26 | save_path = f.checkpoint_heat_trainval_path
27 | # save_path = f.checkpoint_heat_path
28 | net_heat.load_state_dict(torch.load(save_path))
29 |
30 | filename_list = list(zip(*name_an123))[0]
31 | for filename in filename_list:
32 | #if '88' not in filename:
33 | # continue
34 | resize_filepath = path.join(f.resize_submit_test_img, filename + ".jpg")
35 | np_img_ori = cv2.imread(resize_filepath, cv2.IMREAD_GRAYSCALE)
36 | np_img = [[np_img_ori]] # NCHW
37 | np_img = np.asarray(np_img, np.float32)
38 |
39 | np_norm_img = np_img / 255.
40 | t_norm_img = torch.from_numpy(np_norm_img).cuda()
41 | with torch.no_grad():
42 | out_dict = net_heat(t_norm_img)
43 |
44 |
45 | np_pcm_lrcenter = out_dict["pcm"].detach().cpu().numpy()[0, 4:6]
46 | np_paf_center = out_dict["paf"].detach().cpu().numpy()[0, 0:1]
47 | np_neck = out_dict["pcm"].detach().cpu().numpy()[0, 6]
48 |
49 | cobb_dict = cap.cobb_angles(np_pcm_lrcenter, np_paf_center, np_img_ori, np_neck, use_filter=True)
50 | pred_angles, pairs_img, pairs_lr_value = cobb_dict["angles"], cobb_dict["pairs_img"], cobb_dict["pair_lr_value"]
51 | np.save(path.join(f.validation_plot_out, "{}.npy".format(filename)), pairs_lr_value)
52 | result_line = [filename, float(pred_angles[0]), float(pred_angles[1]), float(pred_angles[2])]
53 | result_name_an123.append(result_line)
54 | print(filename)
55 | # cap.cvsave(pairs_img, "{}".format(filename))
56 | cv2.imwrite(path.join(f.submit_test_plot_pairs, "{}.jpg".format(filename)), pairs_img)
57 |
58 | with open(path.join(f.data_root, "submit_result.csv"), "w+", newline='') as result_csv_file:
59 | writer = csv.writer(result_csv_file)
60 | [writer.writerow(l) for l in result_name_an123]
61 |
62 | def run_on_validation(net_heat):
63 | # Run on validation set
64 | save_path = f.checkpoint_heat_path
65 | net_heat.load_state_dict(torch.load(save_path))
66 | test_data_loader = load_utils.test_loader(1, load_angle=True)
67 | avg_smape = []
68 | for step in range(128):
69 | test_imgs, test_labels, test_angles = next(test_data_loader)
70 | test_imgs_f = np.asarray(test_imgs, np.float32)[:, np.newaxis, :, :]
71 | test_imgs_01 = test_imgs_f / 255.0
72 | test_imgs_tensor = torch.from_numpy(test_imgs_01).cuda()
73 | with torch.no_grad():
74 | out_pcm, out_paf, _, _ = net_heat(test_imgs_tensor) # NCHW
75 | np_pcm = out_pcm.detach().cpu().numpy()
76 | np_paf = out_paf.detach().cpu().numpy()
77 |
78 | cobb_dict = cap.cobb_angles(np_pcm[0, 4:6], np_paf[0], test_imgs[0], np_pcm[0, 6], use_filter=False)
79 | pred_angles, pairs_img, pairs_lr_value = cobb_dict["angles"], cobb_dict["pairs_img"], cobb_dict["pair_lr_value"]
80 | smape = cap.SMAPE(pred_angles, test_angles[0])
81 | avg_smape.append(smape)
82 | print(step, smape)
83 | print(pred_angles - test_angles[0])
84 | cap.cvsave(pairs_img, "{}".format(step))
85 | print("end-----------------------------")
86 | print("SMAPE:", np.mean(avg_smape))
87 |
88 |
89 | def parse_cobb_angle_by_annotated_points():
90 | # Use annotated corner points to parse cobb angle
91 | # so as to test cobb_angle_parser
92 | import confidence_map as cm
93 | test_data_loader = load_utils.test_loader(1, load_angle=True)
94 | avg_smape = []
95 | counter_isS = 0
96 | counter_notS = 0
97 | for step in range(128):
98 | test_imgs, test_labels, test_angles = next(test_data_loader)
99 | # gt_a1, gt_a2, gt_a3 = test_angles[0]
100 | # gt center points
101 | # [lr][N][17(joint)][xy]
102 | l_bcs, r_bcs = cm.ConfidenceMap()._find_LCenter_RCenter(test_labels)
103 | gt_lc, gt_rc = l_bcs[0], r_bcs[0]
104 | pair_lr_value = gt_lc, gt_rc
105 |
106 | # -----------------------------Use angle_parse from here
107 | # Sort pairs by y
108 | pair_lr_value = cap.sort_pairs_by_y(pair_lr_value)
109 | # Use sigma of x, interval, length to delete wrong pairs
110 | # pair_lr_value = rbf.simple_filter(pair_lr_value)
111 | # rbf_dict = rbf.filter(pair_lr_value)
112 | # pair_lr_value = rbf_dict["pair_lr_value"]
113 | # pair_lr_value = reduce_redundant_paris(pair_lr_value)
114 | # [p_len][xy] vector coordinates. (sorted by bone confidence, not up to bottom)
115 | bones = cap.bone_vectors(pair_lr_value)
116 | # Index1(higher), index2(lower) of max angle; a1: max angle value
117 | max_ind1, max_ind2, a1 = cap.max_angle_indices(bones, pair_lr_value)
118 |
119 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2
120 | if not cap.isS(hmids):
121 | a2 = np.rad2deg(np.arccos(cap.cos_angle(bones[max_ind1], bones[0]))) # Use first bone
122 | a3 = np.rad2deg(np.arccos(
123 | cap.cos_angle(bones[max_ind2], bones[-1]))) # Note: use last bone on submit test set gains better results
124 |
125 | # print(max_ind1, max_ind2)
126 | else: # isS
127 | a2, a3 = cap.handle_isS_branch(pair_lr_value, max_ind1, max_ind2, test_imgs[0].shape[0])
128 | sub = np.array([a1, a2, a3]) - test_angles[0]
129 | print(step)
130 | print(sub)
131 | print("------------end---------------")
132 | # print(np.mean(avg_smape))
133 | # print("number of isS-notS:", counter_isS, counter_notS)
134 |
135 |
136 | def gen_manual_img_label():
137 | """Generate img, label of manually marked test set"""
138 | import glob
139 | # npy file, point range 0~1
140 | npy_list = glob.glob(path.join(f.manual_npy_submit_test, "*.npy"))
141 | name_list = [path.splitext(path.basename(npy))[0] for npy in npy_list]
142 | img_list = [path.join(f.resize_submit_test_img, npy)+".jpg" for npy in name_list]
143 | for i in range(len(npy_list)):
144 | npy = np.load(npy_list[i]) #[p][xy]
145 | img_path = img_list[i]
146 | img = cv2.imread(img_path, flags=cv2.IMREAD_GRAYSCALE)
147 | ori_npy = npy * [img.shape[1], img.shape[0]]
148 | img_name = path.basename(img_path).replace(".jpg", "")
149 | yield img, ori_npy, img_name
150 |
151 | def eval_manually_marked_submit_test():
152 | import confidence_map as cmap
153 | # Prepare the submit csv file
154 | os.makedirs(f.submit_test_plot_pairs, exist_ok=True)
155 | result_name_an123 = [] # Parsing results to be wrote
156 | submit_example = path.join(f.submit_test_img, "sample_submission.csv")
157 | with open(submit_example, 'r') as example:
158 | reader = csv.reader(example)
159 | example_content = list(reader)
160 | result_name_an123.append(example_content[0]) # Title line
161 | name_an123 = example_content[1:] # Exclude first title line "name, an1, an2, an3"
162 |
163 | # Read manually annotated npy
164 | gen = gen_manual_img_label()
165 | for img, manual_label, filename in gen:
166 | # Manually marked labels
167 | cm = cmap.ConfidenceMap()
168 | # Classify labels as (top left, top right, bottom left, bottom right, left center, right center)
169 | heat_scale = 1
170 | img = [img] # batch, h, w
171 | manual_label = [manual_label]
172 | heat_hw = np.asarray(img).shape[1:3]
173 | NCHW_corner_gau = cm.batch_gaussian_split_corner(img, manual_label, heat_scale)
174 | NCHW_center_gau = cm.batch_gaussian_LRCenter(img, manual_label, heat_scale)
175 | NCHW_c_lines = cm.batch_lines_LRCenter(heat_hw, manual_label, heat_scale)
176 | NCHW_first_lrpt = cm.batch_gaussian_first_lrpt(img, manual_label)
177 | NCHW_paf = NCHW_c_lines
178 | NCHW_pcm = np.concatenate((NCHW_corner_gau, NCHW_center_gau, NCHW_first_lrpt), axis=1)
179 |
180 | np_pcm_lrcenter = NCHW_pcm[0, 4:6]
181 | np_paf_center = NCHW_paf[0, 0:1]
182 | np_neck = NCHW_pcm[0, 6]
183 |
184 | cobb_dict = cap.cobb_angles(np_pcm_lrcenter, np_paf_center, img[0], np_neck, use_filter=False)
185 | pred_angles, pairs_img, pairs_lr_value = cobb_dict["angles"], cobb_dict["pairs_img"], cobb_dict["pair_lr_value"]
186 | np.save(path.join(f.validation_plot_out, "{}.npy".format(filename)), pairs_lr_value)
187 | result_line = [filename, float(pred_angles[0]), float(pred_angles[1]), float(pred_angles[2])]
188 | result_name_an123.append(result_line)
189 | print(filename)
190 | # cap.cvsave(pairs_img, "{}".format(filename))
191 | cv2.imwrite(path.join(f.submit_test_plot_pairs, "{}.jpg".format(filename)), pairs_img)
192 |
193 | with open(path.join(f.data_root, "submit_result.csv"), "w+", newline='') as result_csv_file:
194 | writer = csv.writer(result_csv_file)
195 | [writer.writerow(l) for l in result_name_an123]
196 |
197 |
198 | if __name__ == "__main__":
199 | net = ladder_shufflenet.LadderModelAdd()
200 | net.eval()
201 | net.cuda()
202 | os.makedirs(f.validation_plot_out, exist_ok=True)
203 |
204 | # run_on_validation(net)
205 | run_on_submit_test(net)
206 | # eval_manually_marked_submit_test()
207 |
--------------------------------------------------------------------------------
/confidence_map.py:
--------------------------------------------------------------------------------
1 | """
2 | Create 2D Gaussian map from keypoints
3 | Run this script to show the created maps
4 | """
5 | import numpy as np
6 | import torch
7 | import cv2
8 | """
9 | def gaussian_2d(img, pt):
10 | sigma = 5
11 | assert len(img.shape) == 2 # grayscale image: H,W
12 | h, w = img.shape
13 | x, y = np.meshgrid(np.arange(w), np.arange(h))
14 | x_d, y_d = x-pt[0], y-pt[1] # Distance of each point on map to keypoint pt
15 | d_2 = (x_d*x_d + y_d*y_d) # Square of Straight distance
16 | g = np.exp(-d_2 / (2.0*sigma**2))
17 | return g
18 |
19 |
20 | def gaussian_2d_pts(img, pts):
21 | maps = [gaussian_2d(img, pt) for pt in pts] # Multiple maps with 1 gaussian on each of them
22 | cmap = np.amax(maps, axis=0) # One map with multiple gaussian circles
23 | print("1map")
24 | return cmap
25 | """
26 |
27 | class ConfidenceMap():
28 | def __init__(self, sigma=4.0, thickness=6):
29 | self.sigma = sigma
30 | self.thickness = thickness
31 |
32 | def _gaussian_2d_torch(self, hw, pt):
33 | """
34 | Create an image with 1 gaussian circle
35 | :param hw:
36 | :param pt:
37 | :return:
38 | """
39 | # Use cuda for big pictures (256:752), use CPU for smaller ones (64: 188)
40 | # device = torch.device("cuda:0") # small img: 9.05
41 | device = torch.device("cuda") # small img: 3.35
42 | assert len(hw) == 2 # grayscale image: H,W
43 | h, w = hw
44 | i, j = torch.meshgrid([torch.arange(h, dtype=torch.float, device=device), torch.arange(w, dtype=torch.float, device=device)])
45 | i_d, j_d = i-pt[1], j-pt[0]
46 | d_square = (i_d*i_d + j_d*j_d)
47 | g = torch.exp((-d_square / (2.0*self.sigma**2)))
48 | return g
49 |
50 |
51 | def _gaussian_2d_pts_torch(self, hw, pts):
52 | """
53 | Create an image with multiple gaussian circles
54 | :param img:
55 | :param pts: A list of points [pts][xy]
56 | :return:
57 | """
58 |
59 | maps = [self._gaussian_2d_torch(hw, pt) for pt in pts] # Multiple maps with 1 gaussian on each of them
60 | maps = torch.stack(maps)
61 | cmap = torch.max(maps, dim=0)
62 |
63 | return cmap[0].cpu().numpy()
64 |
65 | def _batch_gaussian(self, hw, pts):
66 | """
67 | Create a batch of gaussian images
68 | :param hw:
69 | :param pts:
70 | :return: List of gaussian images, range: [0,1]
71 | """
72 | b = [self._gaussian_2d_pts_torch(hw, p) for p in pts]
73 | return b
74 |
75 | def _split_labels_by_corner(self, batch_labels):
76 | """
77 | Index of: Top left, Top right, Bottom left, Bottom right
78 | :param batch_labels: [batch][pts][xy]
79 | :return: [4(tl tr bl br)][batch][17(joint)][xy]
80 | """
81 | batch_labels = np.asarray(batch_labels)
82 | ind_1 = np.array(list(range(0, 68, 4))) # 0, 4, 8...
83 | # [4(tl tr bl br)][N][17(joint)][xy]
84 | four_corner = [np.take(batch_labels, ind, axis=1).tolist() for ind in (ind_1, ind_1+1, ind_1+2, ind_1+3)]
85 | return four_corner
86 |
87 | def batch_gaussian_split_corner(self, imgs, pts, zoom):
88 | """
89 | Generate gaussian for batch images
90 | Split four corner to different maps
91 | :param imgs:
92 | :param pts:
93 | :param zoom: size of input/output
94 | :return: NCHW format gaussian map, C for corner
95 | """
96 |
97 | hw = np.asarray(np.asarray(imgs).shape[1:3])
98 | if np.all(hw % zoom) == 0:
99 | hw = hw // zoom
100 | else:
101 | raise RuntimeError("Image size can not be divided by %d" % zoom)
102 | pts = np.array(pts) / zoom
103 | pts_corner = self._split_labels_by_corner(pts) # CNJO, C for corner, J for joint, O for coordinate xy
104 | CNHW = [self._batch_gaussian(hw, pts) for pts in pts_corner]
105 | NCHW = np.asarray(CNHW).transpose([1, 0, 2, 3])
106 | return NCHW
107 |
108 | def _find_LCenter_RCenter(self, batch_labels):
109 | """
110 | Find two centers: center of left top and left bottom, center of right top and right bottom
111 | :param batch_labels:
112 | :return: l_center r_center [lr][N][17(joint)][xy]
113 | """
114 | # [4(tl tr bl br)][N][17(joint)][xy]
115 | four_corner = np.asarray(self._split_labels_by_corner(batch_labels))
116 | l_center = (four_corner[0, ...] + four_corner[2, ...]) / 2. # N J xy
117 | r_center = (four_corner[1, ...] + four_corner[3, ...]) / 2.
118 | return l_center, r_center
119 |
120 | def batch_gaussian_LRCenter(self, imgs, pts, zoom):
121 | """
122 | Generate gaussian images of L R Center
123 | :return:
124 | """
125 | hw = np.asarray(np.asarray(imgs).shape[1:3])
126 | if np.all(hw % zoom) == 0:
127 | hw = hw // zoom
128 | else:
129 | raise RuntimeError("Image size can not be divided by %d" % zoom)
130 | pts = np.array(pts) / zoom
131 |
132 | pts_centers = self._find_LCenter_RCenter(pts) # CNJO, C for lr centers, J for joint, O for coordinate xy
133 | CNHW = [self._batch_gaussian(hw, pts) for pts in pts_centers]
134 | NCHW = np.asarray(CNHW).transpose([1, 0, 2, 3])
135 | return NCHW
136 |
137 | def _lines_on_img(self, hw, l_cs, r_cs):
138 | paf_img = np.zeros(hw, dtype=np.uint8)
139 |
140 | lr_cs = zip(l_cs, r_cs)
141 | [cv2.line(paf_img, tuple(p1.astype(np.int32)), tuple(p2.astype(np.int32)), 255, self.thickness) for p1, p2 in lr_cs]
142 | # Convert to [0,1]
143 | paf_img = paf_img.astype(np.float32)
144 | paf_img = paf_img / 255.
145 | return paf_img
146 |
147 | def batch_lines(self, heat_hw, l_pts, r_pts):
148 | l_pts, r_pts = np.array(l_pts), np.array(r_pts)
149 | heat_hw = np.array(heat_hw)
150 | assert l_pts.shape[0] == r_pts.shape[0]
151 | assert len(heat_hw) == 2
152 | paf_imgs = [] # NHW
153 | for i in range(l_pts.shape[0]):
154 | paf_img = self._lines_on_img(heat_hw, l_pts[i], r_pts[i])
155 | paf_imgs.append(paf_img)
156 | paf_imgs = np.asarray(paf_imgs)[:, np.newaxis] # NCHW
157 | return paf_imgs
158 |
159 | def batch_lines_LRTop(self, heat_hw, batch_labels):
160 | """
161 | Draw part affinity field for Left Right Top centers
162 | :param batch_labels:
163 | :return: pafs shape: NCHW
164 | """
165 | batch_labels = np.array(batch_labels)
166 | lps = batch_labels[:, 0::4, :]
167 | rps = batch_labels[:, 1::4, :]
168 | pafs = self.batch_lines(heat_hw, lps, rps)
169 | return pafs
170 |
171 | def batch_lines_LRBottom(self, heat_hw, batch_labels):
172 | batch_labels = np.array(batch_labels)
173 | lps = batch_labels[:, 2::4, :]
174 | rps = batch_labels[:, 3::4, :]
175 | pafs = self.batch_lines(heat_hw, lps, rps)
176 | return pafs
177 |
178 | def batch_lines_LRCenter(self, heat_hw, pts, zoom):
179 | """
180 | Draw Part Affinity Fields (no direction, 1 dim) between each 2 center points.
181 | :param imgs:
182 | :param pts:
183 | :param zoom:
184 | :return:
185 | """
186 | hw = np.array(heat_hw)
187 | if np.all(hw % zoom) == 0:
188 | hw = hw // zoom
189 | else:
190 | raise RuntimeError("Image size can not be divided by %d" % zoom)
191 | pts = np.array(pts) / zoom
192 | l_bcs, r_bcs = self._find_LCenter_RCenter(pts) # [N][17][xy]
193 | return self.batch_lines(hw, l_bcs, r_bcs)
194 |
195 | def batch_gaussian_first_lrpt(self, imgs, batch_labels):
196 |
197 | imgs = np.asarray(imgs)
198 | assert len(imgs.shape) == 3, "(N, h, w)"
199 | batch_labels = np.array(batch_labels)
200 |
201 | four_corner = np.asarray(self._split_labels_by_corner(batch_labels))
202 | first_lrpt = four_corner[0:2, :, 0:1, :] # [tl tr][batch][Joint][xy]
203 | # first_lrpt = np.transpose(first_lrpt, [1, 0, 2]) # [batch][tl tr][xy]
204 |
205 | lrNHW = np.array([self._batch_gaussian(imgs.shape[1:3], pts) for pts in first_lrpt])
206 | NHW = np.max(lrNHW, axis=0)
207 |
208 | NCHW = NHW[:, np.newaxis, :, :] # NCHW.
209 | return NCHW
210 |
211 | def batch_gaussian_last_lrpt(self, imgs, batch_labels):
212 | # [4(tl tr bl br)][batch][17(joint)][xy]
213 | imgs = np.asarray(imgs)
214 | assert len(imgs.shape) == 3, "(N, h, w)"
215 | batch_labels = np.array(batch_labels)
216 |
217 | def find_max_Y_index(pts):
218 | assert len(pts.shape) == 3
219 | pts_Y = pts[:, :, 1]
220 | sorted_Y_ind = np.argsort(pts_Y)
221 | max_Y_ind = sorted_Y_ind[:, -1]
222 | return max_Y_ind
223 |
224 | left_pts = batch_labels[:, 0::2, :]
225 | right_pts = batch_labels[:, 1::2, :]
226 |
227 | left_indices = find_max_Y_index(left_pts)
228 | right_indices = find_max_Y_index(right_pts) # indices on batch
229 |
230 | l_max = np.array([left_pts[b, ind, :] for b, ind in enumerate(left_indices)])
231 | r_max = np.array([right_pts[b, ind, :] for b, ind in enumerate(right_indices)])
232 |
233 | lrpt = np.stack([l_max, r_max], axis=0)[:, :, np.newaxis, :] # [tl tr][batch][Joint][xy]
234 | lrNHW = np.array([self._batch_gaussian(imgs.shape[1:3], pts) for pts in lrpt])
235 | NHW = np.max(lrNHW, axis=0)
236 | NCHW = NHW[:, np.newaxis, :, :]
237 | return NCHW
238 |
239 | def batch_spine_mask(self, heat_hw, batch_labels):
240 | batch_labels = np.asarray(batch_labels)
241 | assert len(heat_hw) == 2
242 | assert len(batch_labels.shape) == 3, "(N, P, xy)"
243 | def draw_polygon(hw, labels):
244 | assert len(hw) == 2, "(h, w)"
245 | assert len(labels.shape) == 2, "(P, xy)"
246 | mask = np.zeros(hw, dtype=np.uint8)
247 | for p in range(0, labels.shape[0], 4):
248 | p1234 = labels[p:p+4, :].astype(np.int32)
249 | p1243 = np.stack([p1234[0, :], p1234[1, :], p1234[3, :], p1234[2, :]],
250 | axis=0)
251 | cv2.fillPoly(mask, [p1243], 255)
252 | return mask
253 | batch_mask = [draw_polygon(heat_hw, labels) for labels in batch_labels[:]]
254 | batch_mask = np.asarray(batch_mask, np.float32)
255 | batch_mask = batch_mask / 255.
256 | batch_mask = batch_mask[:, np.newaxis, :, :]
257 | return batch_mask
258 |
259 | def batch_spine_mask_top3(self, heat_hw, batch_labels):
260 | pass
261 |
262 |
263 |
264 | def main():
265 | import load_utils
266 | import cv2
267 | import time
268 | train_data_loader = load_utils.train_loader(10)
269 | train_imgs, train_labels = next(train_data_loader)
270 | ts = time.time()
271 | cm = ConfidenceMap()
272 | heat_scale = 1
273 | heat_hw = np.asarray(train_imgs).shape[1:3]
274 | NCHW_corner_gau = cm.batch_gaussian_split_corner(train_imgs, train_labels, heat_scale)
275 | NCHW_center_gau = cm.batch_gaussian_LRCenter(train_imgs, train_labels, heat_scale)
276 | NCHW_c_lines = cm.batch_lines_LRCenter(heat_hw, train_labels, heat_scale)
277 | NCHW_t_lines = cm.batch_lines_LRTop(heat_hw, train_labels)
278 | NCHW_b_lines = cm.batch_lines_LRBottom(heat_hw, train_labels)
279 | NCHW_spine_mask = cm.batch_spine_mask(heat_hw, train_labels)
280 | NCHW_first_lrpt = cm.batch_gaussian_first_lrpt(train_imgs, train_labels)
281 | NCHW_last_lrpt = cm.batch_gaussian_last_lrpt(train_imgs, train_labels)
282 | NCHW_gaussian = np.concatenate((NCHW_first_lrpt, NCHW_last_lrpt, NCHW_spine_mask), axis=1)#NCHW_corner_gau, NCHW_center_gau, NCHW_lines, NCHW_first_lrpt), axis=1)
283 | te = time.time()
284 | print("Duration for gaussians: %f" % (te-ts)) # Time duration for generating gaussians
285 | for n in range(NCHW_gaussian.shape[0]):
286 | for c in range(NCHW_gaussian.shape[1]):
287 | assert NCHW_gaussian.max() < 1.5, "expect normalized values"
288 | cv2.imshow("Image", train_imgs[n])
289 | g = NCHW_gaussian[n, c]
290 | g = cv2.resize(g, dsize=None, fx=heat_scale, fy=heat_scale)
291 | cv2.imshow("Image Heat", np.amax([train_imgs[n].astype(np.float32)/255, g], axis=0))
292 | cv2.imshow("Heat Only", g)
293 | cv2.waitKey()
294 |
295 | if __name__ == "__main__":
296 | main()
297 |
--------------------------------------------------------------------------------
/cobb_angle_parse.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os.path as path
3 | import cv2
4 | import folders as f
5 | import argparse
6 | import redundant_bones_filter as rbf
7 |
8 | def cvshow(img):
9 | assert len(img.shape)==2
10 | #img = cv2.resize(img, dsize=None, fx=4, fy=4, interpolation=cv2.INTER_NEAREST)
11 | cv2.imshow("img", img)
12 | cv2.waitKey(0)
13 | cv2.destroyAllWindows()
14 |
15 | def cvsave(img, name):
16 | assert len(img.shape)==2
17 | cv2.imwrite(path.join(f.validation_plot_out,"{}.jpg".format(name)), img)
18 |
19 | def centeroid(heat, gaussian_thresh = 0.5):
20 | # Parse center point of connected components
21 | # Return [p][xy]
22 | ret, heat = cv2.threshold(heat, gaussian_thresh, 1., cv2.THRESH_BINARY)
23 | heat = np.array(heat * 255., np.uint8)
24 | # num: point number + 1 background
25 | num, labels = cv2.connectedComponents(heat)
26 | coords = []
27 | for label in range(1, num):
28 | mask = np.zeros_like(labels, dtype=np.uint8)
29 | mask[labels == label] = 255
30 | M = cv2.moments(mask)
31 | cX = int(M["m10"] / M["m00"])
32 | cY = int(M["m01"] / M["m00"])
33 | coords.append([cX, cY])
34 | return coords
35 |
36 |
37 | def line_mask(pt1, pt2_list, hw):
38 | # Return images with a line from pt1 to each pts in pt2_list
39 | # Return image pixel value range: [0,1], nparray.
40 | assert len(hw) == 2
41 | zeros = np.zeros([len(pt2_list), hw[0], hw[1]], dtype=np.uint8)
42 | masks_with_line = [cv2.line(zeros[i_pt2], tuple(pt1), tuple(pt2), 255) for i_pt2, pt2 in enumerate(pt2_list)]
43 | masks_01 = np.array(masks_with_line, dtype=np.float32) / 255.
44 |
45 | return masks_01
46 |
47 | def line_dist(pt1, pt2_list):
48 | # Return distances of a point to a list of points.
49 | # Return numpy array
50 | pt1 = np.array(pt1)
51 | pt2_list = np.array(pt2_list)
52 | dist_1d = pt2_list-pt1
53 | dist_2d = np.linalg.norm(dist_1d, axis=-1)
54 | return dist_2d
55 |
56 | def coincidence_rate(paf, line_masks, distances):
57 | # Return confidences of a point connects to a list of points
58 | # Return nparray, range from 0 to around 5 (not 0-1 due to opencv line divides distance not equal to 1.0)
59 | assert len(paf.shape)==2
60 | assert len(line_masks.shape)==3
61 | assert len(distances.shape)==1
62 | coincidence = line_masks * paf # [p2_len][h][w]
63 | co_sum = np.sum(coincidence, axis=(1,2))
64 | co_rate = co_sum / distances
65 | return co_rate
66 |
67 | def center_coords(lcrc_pcm):
68 | # Return left center coordinates, right center coordinates.
69 | assert len(lcrc_pcm.shape)==3, "expected shape: (lr, h, w)"
70 | assert lcrc_pcm.shape[0]==2, "1st dim of pcm should have 2 elements: l and r"
71 | lcrc_coord = [centeroid(c) for c in lcrc_pcm[:]] # lc_coord: [p][xy]
72 | return lcrc_coord
73 |
74 | def coincidence_rate_from_pcm_paf(lcrc_coord, hw, paf):
75 | # Return confidences nparray with shape: [p1_len][p2_len]
76 | assert len(np.array(lcrc_coord[0]).shape)==2, "expected shape: (p, xy). length of lc, rc list can be different"
77 | assert len(hw)==2, "expected shape length: 2 for h and w"
78 | assert len(paf.shape)==2, "paf shape length should be 2"
79 | lc_coord, rc_coord = lcrc_coord
80 | coins = [] # coincidence rate list, shape: [pt1_len][pt2_len]
81 | for lc_pt in lc_coord[:]:
82 | p1_masks = line_mask(lc_pt, rc_coord, hw) #[p2_len][h][w]
83 | p1_dist = line_dist(lc_pt, rc_coord)
84 | coin = coincidence_rate(paf, p1_masks,p1_dist)
85 | coins.append(coin)
86 | return np.array(coins)
87 |
88 |
89 | def pairs_with_highest_confidence(coincidence_rate, confidence_lowerbound):
90 | # Return: 2 lists contains paired points. e.g.[3,4,5] and [3,4,6] means l3->r3, l4->r4, l6->r6
91 | pair_l, pair_r = [], []
92 | args_1d = np.argsort(coincidence_rate, axis=None)
93 | lc_args, rc_args = np.unravel_index(args_1d, coincidence_rate.shape)
94 | for i_arg in reversed(
95 | range(len(lc_args))): # reverse: default argsort gives min->max sort, we want max->min results
96 | al = lc_args[i_arg] # index of left center list
97 | ar = rc_args[i_arg] # index of right center list
98 |
99 | if (al not in pair_l) and (ar not in pair_r): # Best pair among all
100 |
101 | # Check if confidence too low (e.g. 2 wrong points at top and bottom).
102 | # Real pair should have cofidence of around 4.5
103 | if coincidence_rate[al][ar] > confidence_lowerbound:
104 | pair_l.append(al)
105 | pair_r.append(ar)
106 | else:
107 | # At least one point already had a better pair.
108 | pass
109 | assert len(pair_l) == len(pair_r)
110 | return (pair_l, pair_r)
111 |
112 |
113 | def pair_args_to_value(pair_lr_args, lr_coords):
114 | # Convert pairs of xy args to pairs of xy values
115 | al, ar = np.array(pair_lr_args, dtype=np.int)[:]
116 | cl, cr = lr_coords
117 | # There may be single points with out pair. In that case , lengthes are different, lr_coords can't be converted to a numpy array,
118 | # cause vanilla python list error: "only integer scalar arrays can be converted to a scalar index".
119 | cl, cr = list(map(np.array, [cl, cr]))
120 |
121 | xy_l = cl[al]
122 | xy_r = cr[ar]
123 | return xy_l, xy_r
124 |
125 | def bone_vectors(pair_lr_value):
126 | # Return vector of bones (for angle computation)
127 | # Shape [bone][xy]
128 | pair_lr_value = np.array(pair_lr_value)
129 | assert len(pair_lr_value.shape)==3, "shape should be:(lr, bones, xy)"
130 | assert pair_lr_value.shape[0]==2, "length of first dim should be 2 for l and r"
131 | l, r = pair_lr_value
132 | return r-l
133 |
134 |
135 | def cos_angle(v1, v2):
136 | assert v1.shape == (2,)
137 | assert v2.shape == (2,)
138 | dot = np.dot(v1, v2)
139 | len1, len2 = list(map(np.linalg.norm, [v1, v2]))
140 | an_cos = dot / (len1 * len2)
141 |
142 | an_cos = an_cos.clip(-1., 1.)
143 | return an_cos
144 |
145 | def angle_matrix(bone_vectors):
146 | # Return angle matrix: A[i][j]
147 | # Return degree of each 2 vectors, shape: [bone1][bone2]
148 | bone_vectors = np.array(bone_vectors)
149 | assert len(bone_vectors.shape)==2, "expected shape: (bone, xy)"
150 |
151 | num_bones = bone_vectors.shape[0]
152 | an_matrix = np.zeros((num_bones, num_bones))
153 | for i in range(num_bones):
154 | for j in range(num_bones):
155 | v1, v2=bone_vectors[i], bone_vectors[j]
156 | an_cos = cos_angle(v1, v2)
157 | an_matrix[i, j] = an_cos
158 | # cos_angle some times larger than 1 due to numerical precision
159 | an_matrix = np.clip(an_matrix, a_min=-1., a_max=1.)
160 | an_matrix = np.arccos(an_matrix)
161 | an_matrix = np.rad2deg(an_matrix)
162 | return an_matrix
163 |
164 | def draw_pairs(lr_values,heat_hw, img):
165 | # Draw the line between pairs on image
166 | assert len(np.asarray(lr_values).shape)==3, "shape: (lr, p, xy)"
167 | assert len(img.shape)==2, "shape: (h,w)"
168 | lv, rv = lr_values
169 | draw_layer = np.zeros(heat_hw, dtype=np.uint8)
170 | for i in range(len(lv)):
171 | pt1 = lv[i]
172 | pt2 = rv[i]
173 | cv2.line(draw_layer, tuple(pt1), tuple(pt2), 255, 5)
174 | cv2.putText(draw_layer, str(i), tuple(pt2), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
175 | draw_layer = cv2.resize(draw_layer, tuple(reversed(img.shape)), interpolation=cv2.INTER_NEAREST)
176 | img = np.maximum(img, draw_layer)
177 | return img
178 |
179 | def make_ind1_upper(ind1, ind2, pair_lr_value):
180 | # Check if ind1 is upper bone. If not, exchange ind1 and 2
181 | pair_lr_value = np.array(pair_lr_value)
182 | assert len(pair_lr_value.shape) == 3
183 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
184 | hmids_y = hmids[:, 1] # [p]
185 | mid1 = hmids_y[ind1] # Relies on leftpoint, y coord
186 | mid2 = hmids_y[ind2]
187 | if mid2 > mid1: # ind2 is lower
188 | pass
189 | else: # ind1 is lower
190 | temp = ind1
191 | ind1 = ind2
192 | ind2 = temp
193 | return ind1, ind2
194 |
195 | def sort_pairs_by_y(pair_lr_value):
196 | # pairs was originally sorted by confidence, reorder them to sort by y value
197 | pair_lr_value = np.array(pair_lr_value)
198 | assert len(pair_lr_value.shape) == 3
199 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
200 | hmids_y = hmids[:, 1] # [p]
201 | order_y = np.argsort(hmids_y, axis=None) # Index of confidence array, shows if array was sorted by y.
202 | pair_lr_value = pair_lr_value[:, order_y, :]
203 | return pair_lr_value
204 |
205 |
206 | def max_angle_indices(bones, pair_lr_value):
207 | # 2 indices which compose the largest angle. ind1 >= ind2
208 | assert len(bones.shape) == 2, "expect [b][xy]"
209 | # [len_b][len_b] angle matrix
210 | am = angle_matrix(bones)
211 | sort_indices = np.unravel_index(np.argsort(am, axis=None), am.shape)
212 | # Two indices that composed the largest angle
213 | max_ind1, max_ind2 = sort_indices[0][-1], sort_indices[1][-1]
214 | max_angle_value = am[max_ind1, max_ind2]
215 |
216 | # Find out which one is upper bone
217 | max_ind1, max_ind2 = make_ind1_upper(max_ind1, max_ind2, pair_lr_value)
218 | return max_ind1, max_ind2, max_angle_value
219 |
220 |
221 |
222 |
223 | box_filter = rbf.BoxNetFilter()
224 | def cobb_angles(np_pcm, np_paf, img, spine_range, use_filter=True):
225 | # Return np array of [a1, a2, a3]
226 | paf_confidence_lowerbound = 0.7
227 | assert len(np_pcm.shape) == 3, "expected shape: (c,h,w)"
228 | assert np_pcm.shape[0] == 2, "expect 2 channels at dim 0 for l and r"
229 | assert len(np_paf.shape) == 3, "expected shape: (c,h,w)"
230 | assert np_paf.shape[0] == 1, "expect 1 channel at dim 0 for paf"
231 | assert len(img.shape) == 2, "expected shape: (h,w)"
232 | assert len(spine_range.shape) == 2, "(h,w)"
233 | heat_hw = np_pcm.shape[1:3]
234 | # [lr][xy] coordinate values
235 | lcrc_coords = center_coords(np_pcm)
236 | # [p1_len][p2_len] coincidence rate of a point to another point
237 | coins = coincidence_rate_from_pcm_paf(lcrc_coords, heat_hw, np_paf[0])
238 | # [lr][p_len] pairs of points, types are index values in lcrc_coords. equal length.
239 | pair_lr = pairs_with_highest_confidence(coins, confidence_lowerbound=paf_confidence_lowerbound)
240 | # [lr][p_len][xy], coordinate values. (sorted by bone confidence, not up to bottom)
241 | pair_lr_value = pair_args_to_value(pair_lr, lcrc_coords)
242 | # Sort pairs by y
243 | pair_lr_value = sort_pairs_by_y(pair_lr_value)
244 | if use_filter:
245 | pair_lr_value = box_filter.filter(pair_lr_value, img) # Left Right
246 | pair_lr_value = rbf.filter_by_spine_range(spine_range, pair_lr_value)
247 | # Leave 16 pairs. Must be the last filter
248 | pair_lr_value = rbf.simple_filter(pair_lr_value) # Top pixels
249 |
250 | #rbf_dict = rbf.filter(pair_lr_value)
251 | #pair_lr_value = rbf_dict["pair_lr_value"]
252 | # [p_len][xy] vector coordinates. (sorted by bone confidence, not up to bottom)
253 | bones = bone_vectors(pair_lr_value)
254 | # Index1(higher), index2(lower) of max angle; a1: max angle value
255 | max_ind1, max_ind2, a1 = max_angle_indices(bones, pair_lr_value)
256 |
257 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2
258 | if not isS(hmids):
259 | # a2 = np.rad2deg(np.arccos(cos_angle(bones[max_ind1], np.array([1, 0]))))
260 | # a3 = np.rad2deg(np.arccos(cos_angle(bones[max_ind2], np.array([1, 0])))) # but the last bone is hard to detect, so use horizontal one?
261 | a2 = np.rad2deg(np.arccos(cos_angle(bones[max_ind1], bones[0]))) # Use first bone
262 | a3 = np.rad2deg(np.arccos(cos_angle(bones[max_ind2], bones[-1]))) # Note: use last bone on submit test set gains better results
263 | # print(a1, a2, a3)
264 | # print(max_ind1, max_ind2)
265 | else: # isS
266 | a2, a3 = handle_isS_branch(pair_lr_value, max_ind1, max_ind2, np_paf.shape[1])
267 |
268 | result_dict = {"angles": np.array([a1, a2, a3]), "pair_lr_value": pair_lr_value}
269 | if img is not None:
270 | assert len(img.shape) == 2, "expected shape: (h,w)"
271 | plot_img = draw_pairs(pair_lr_value, heat_hw, img)
272 | result_dict["pairs_img"] = plot_img
273 |
274 | return result_dict
275 |
276 | def SMAPE(pred_angles, true_angles):
277 | # symmetric mean absolute percentage error
278 | pred_angles = np.array(pred_angles)
279 | true_angles = np.array(true_angles)
280 | assert pred_angles.shape==(3,)
281 | assert true_angles.shape==(3,)
282 | minus = np.sum(np.abs(pred_angles-true_angles))
283 | sums = np.sum(pred_angles+true_angles)
284 | APE = minus/sums
285 | return APE*100.
286 |
287 | def isS(mid_p):
288 | # Reimplementation of "isS" function in matlab file
289 | # Input: horizontal mid point list. (size: 68/2)
290 | # Input should be horizontal mid point of each left right point,
291 | # but we use "horizontal mid point of vertical midpoints" for convenience purpose
292 |
293 | def linefun(p):
294 | num = mid_p.shape[0] # number of total points
295 | ll = np.zeros([num-2, 1], dtype=np.float32) # 2-dim matrix (so we can use matrix multiplication later)
296 | for i in range(num-2):
297 | # formula: A - B
298 | # formula left part A: (p(i,2)-p(num,2))/(p(1,2)-p(num,2))
299 | # 1,2 in matlab correspond to 0,1 in python (x,y)
300 | if (p[0, 1] - p[num-1, 1])!=0:
301 | left_part = (p[i, 1] - p[num-1, 1]) / (p[0, 1] - p[num-1, 1])
302 | else:
303 | left_part = 0
304 |
305 | # formula right part B:(p(i,1)-p(num,1))/(p(1,1)-p(num,1))
306 | if (p[0, 0] - p[num-1,0])!=0:
307 | right_part = (p[i, 0] - p[num-1, 0]) / (p[0, 0] - p[num-1,0])
308 | else:
309 | right_part = 0
310 |
311 | # formula: result = A - B
312 | ll[i] = left_part - right_part
313 | return ll
314 |
315 | # isS
316 | mid_p = np.array(mid_p)
317 | ll = linefun(mid_p)
318 | ll_trans = np.transpose(ll, [1, 0])
319 | matrix_product = np.matmul(ll, ll_trans)
320 | flag = np.sum(matrix_product) != np.sum(np.abs(matrix_product))
321 | return flag
322 |
323 | def handle_isS_branch(pair_lr_value, max_ind1, max_ind2, heat_height):
324 | pair_lr_value = np.array(pair_lr_value)
325 | assert len(pair_lr_value.shape) == 3
326 | hmids = (pair_lr_value[0] + pair_lr_value[1]) / 2 # Horizontal mid points, [p][xy]
327 | hmids_y = hmids[:, 1] # [p]
328 | bones = bone_vectors(pair_lr_value)
329 | # Max angle in the upper part
330 | if (hmids_y[max_ind1] + hmids_y[max_ind2]) < heat_height:
331 | print("max angle in upper part")
332 | # From 1st to ind1, largest angle
333 | if max_ind1==0:
334 | print("max_ind1 is already the first bone")
335 | top_bones = bones[:max_ind1+1] # Bones already sorted by y
336 | # [len_b][len_b] angle matrix
337 | am = angle_matrix(top_bones)
338 | # We want comparison of each top bones with just "ind1"
339 | # av: angle vector of each top bones with ind1
340 | av = am[-1]
341 | a2 = np.max(av)
342 |
343 | # From last to ind2, largest angle
344 | if max_ind2 == bones.shape[0]-1:
345 | print("max_ind2 is already the last bone")
346 | end_bones = bones[max_ind2:]
347 | am = angle_matrix(end_bones)
348 | av = am[0]
349 | a3 = np.max(av)
350 | return a2, a3
351 | else: # Max angle in lower part
352 | print("max angle in lower part")
353 | if max_ind1==0: print("max_ind1 is already the first bone")
354 | top_bones = bones[:max_ind1+1] # Bones already sorted by y
355 | am = angle_matrix(top_bones)
356 | av = am[-1]
357 | a2 = np.max(av)
358 | arg_order = np.argsort(av)
359 | top_max_index = arg_order[-1] # pos1_1
360 | # First to pos1_1
361 | top_bones = bones[:top_max_index+1]
362 | am = angle_matrix(top_bones)
363 | av = am[-1]
364 | a3 = np.max(av)
365 | return a2, a3
366 |
367 |
--------------------------------------------------------------------------------
/GaussianToPoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import load_utils\n",
11 | "import spine_augmentation as aug\n",
12 | "import confidence_map as cmap\n",
13 | "import ladder_shufflenet\n",
14 | "import torch.optim as optim\n",
15 | "import torch.nn as nn\n",
16 | "import torch\n",
17 | "import os.path as path\n",
18 | "import torchvision\n",
19 | "import matplotlib.pyplot as plt\n",
20 | "import cv2\n",
21 | "import torch.nn.functional as F\n",
22 | "from PIL import Image\n",
23 | "import folders as f\n",
24 | "import os\n",
25 | "import argparse\n"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 3,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "\n",
35 | "\n"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "## Gaussian to point"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 4,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": []
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 5,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "\n",
59 | " "
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": 6,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": []
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 7,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": []
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 8,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": []
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 9,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": []
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": 10,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": []
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 11,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": []
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": 12,
107 | "metadata": {},
108 | "outputs": [],
109 | "source": []
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": 13,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": []
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": 14,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": []
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": 15,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": []
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": 16,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": []
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": 17,
142 | "metadata": {},
143 | "outputs": [],
144 | "source": []
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": 24,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": []
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 19,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": []
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "Traverse all test images"
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": 25,
170 | "metadata": {},
171 | "outputs": [
172 | {
173 | "name": "stdout",
174 | "output_type": "stream",
175 | "text": [
176 | "01-July-2019-1\n",
177 | "01-July-2019-10\n",
178 | "01-July-2019-11\n",
179 | "01-July-2019-12\n"
180 | ]
181 | },
182 | {
183 | "ename": "KeyboardInterrupt",
184 | "evalue": "",
185 | "traceback": [
186 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
187 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
188 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mwriter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwriterow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ml\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ml\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mresult_name_an123\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0mrun_on_submit_test\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnet\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
189 | "\u001b[0;32m\u001b[0m in \u001b[0;36mrun_on_submit_test\u001b[0;34m(net_heat)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0mnp_paf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mout_paf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 33\u001b[0;31m \u001b[0mpred_angles\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mplot_img\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcobb_angles\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp_pcm\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m6\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp_paf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp_img_ori\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 34\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0mresult_line\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpred_angles\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpred_angles\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpred_angles\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
190 | "\u001b[0;32m\u001b[0m in \u001b[0;36mcobb_angles\u001b[0;34m(np_pcm, np_paf, img)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mlcrc_coords\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcenter_coords\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp_pcm\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# [p1_len][p2_len] coincidence rate of a point to another point\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mcoins\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcoincidence_rate_from_pcm_paf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlcrc_coords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheat_hw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp_paf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;31m# [lr][p_len] pairs of points, types are index values in lcrc_coords. equal length.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mpair_lr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpairs_with_highest_confidence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcoins\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
191 | "\u001b[0;32m\u001b[0m in \u001b[0;36mcoincidence_rate_from_pcm_paf\u001b[0;34m(lcrc_coord, hw, paf)\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mcoins\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# coincidence rate list, shape: [pt1_len][pt2_len]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mlc_pt\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mlc_coord\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mp1_masks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mline_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlc_pt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrc_coord\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhw\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m#[p2_len][h][w]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0mp1_dist\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mline_dist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlc_pt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrc_coord\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0mcoin\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcoincidence_rate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpaf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mp1_masks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mp1_dist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
192 | "\u001b[0;32m\u001b[0m in \u001b[0;36mline_mask\u001b[0;34m(pt1, pt2_list, hw)\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mzeros\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpt2_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhw\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhw\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muint8\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mmasks_with_line\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi_pt2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpt1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpt2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m255\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi_pt2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpt2\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpt2_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mmasks_01\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmasks_with_line\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0;36m255.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmasks_01\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
193 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
194 | ],
195 | "output_type": "error"
196 | }
197 | ],
198 | "source": [
199 | "\n",
200 | "\n",
201 | "run_on_submit_test(net)"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": 26,
207 | "metadata": {},
208 | "outputs": [
209 | {
210 | "name": "stdout",
211 | "output_type": "stream",
212 | "text": [
213 | "0 10.745457581700691\n",
214 | "[ 1.5084058 -1.94397769 6.94378349]\n",
215 | "1 19.505581095447607\n",
216 | "[ -1.5091869 -6.05721344 -10.75297346]\n",
217 | "2 25.99161922309971\n",
218 | "[-4.91196055 12.44701481 12.57052464]\n",
219 | "3 4.0159966221110395\n",
220 | "[2.61067779 1.34372897 1.53394882]\n",
221 | "4 16.933961157711614\n",
222 | "[ 2.80004315 18.53869197 -12.67894882]\n",
223 | "5 19.82118632206795\n",
224 | "[ -3.18643054 14.9860202 -33.05345073]\n",
225 | "6 13.62765856438369\n",
226 | "[0.02795464 3.67275107 1.65210357]\n",
227 | "7 2.8786704819389777\n",
228 | "[-1.37602277 2.67725468 1.78472255]\n",
229 | "8 27.169516602857545\n",
230 | "[-1.00420189 9.16234705 2.40185107]\n",
231 | "9 14.833186440049372\n",
232 | "[-6.72782107 -8.96875354 -3.08006753]\n",
233 | "10 4.929105413110176\n",
234 | "[-1.10448639 6.22656479 -0.95305118]\n",
235 | "11 7.230083025630425\n",
236 | "[-2.56380318 9.29010044 -2.52090362]\n",
237 | "12 12.507501146623442\n",
238 | "[ 1.48680927 -9.56819229 2.36900156]\n",
239 | "13 28.10537993331108\n",
240 | "[-1.005293 10.57833828 -8.78594828]\n",
241 | "14 8.357694016052232\n",
242 | "[ 1.282 -4.70391976 -10.19608024]\n",
243 | "15 18.321386007978766\n",
244 | "[-0.60275295 -2.29739465 3.67294169]\n",
245 | "16 6.750799290273641\n",
246 | "[ 1.21549809 5.29354927 -4.92505118]\n",
247 | "17 13.750750459595878\n",
248 | "[-0.70729966 5.93920235 2.33009799]\n",
249 | "18 9.154651108193468\n",
250 | "[ 2.45546686 -1.59534085 -4.08419229]\n",
251 | "19 17.14059215975687\n",
252 | "[ 1.1758457 -21.97591006 14.09075577]\n",
253 | "20 17.314635655429907\n",
254 | "[ 0.77233903 8.82772224 12.99461679]\n",
255 | "21 6.029745563598688\n",
256 | "[-0.39090873 2.3352479 -0.51565663]\n",
257 | "22 10.124044470330409\n",
258 | "[-1.05290592 -0.69583839 9.35253247]\n",
259 | "23 16.993204818456267\n",
260 | "[-0.28545162 6.24314057 18.97040781]\n",
261 | "24 7.241039252565634\n",
262 | "[-5.95580743 -1.49913505 -7.65467238]\n",
263 | "25 9.27753628414751\n",
264 | "[ -1.75069414 -3.64894882 -13.96374532]\n",
265 | "26 5.112801600591693\n",
266 | "[-0.37135185 0.84445641 -1.36850825]\n",
267 | "27 29.99641214080683\n",
268 | "[-1.85818702 8.30535528 -3.5743423 ]\n",
269 | "28 2.3356689785966855\n",
270 | "[1.72367182 3.28257544 0.03609638]\n",
271 | "29 2.8725579995902266\n",
272 | "[ 1.7919015 3.00289348 -0.14999197]\n",
273 | "30 9.212798675762041\n",
274 | "[ 2.60472344 0.48024347 -8.00452003]\n",
275 | "31 10.212364066535438\n",
276 | "[ 0.679943 7.25794882 -7.45100582]\n",
277 | "32 6.193460188881626\n",
278 | "[ 0.03320522 -4.55852806 3.30173328]\n",
279 | "33 5.316479393196196\n",
280 | "[-2.69312972 2.34982406 -3.43995378]\n",
281 | "34 8.16372810368776\n",
282 | "[ 4.55559314 0.95354196 -11.54294882]\n",
283 | "35 18.710082518941807\n",
284 | "[ 1.54673796 14.03624347 -12.03150551]\n",
285 | "36 35.94122064728563\n",
286 | "[-10.07596457 -13.98135771 8.58369314]\n",
287 | "37 26.42869619885479\n",
288 | "[-21.07539361 -6.31232762 -3.08266599]\n",
289 | "38 13.767768131954591\n",
290 | "[-1.87129636 -5.54848008 -7.76681627]\n",
291 | "39 26.321054615267165\n",
292 | "[-1.11957288 -5.5956 1.84352712]\n",
293 | "40 38.9688881943194\n",
294 | "[11.27307879 -0.58162296 13.77730174]\n",
295 | "41 20.447468883775397\n",
296 | "[-3.78559383 1.91854122 22.61986495]\n",
297 | "42 7.789351329359364\n",
298 | "[ 1.39292731 7.63369281 -5.5087655 ]\n",
299 | "43 13.41410164770214\n",
300 | "[ 5.51123833 -4.29200514 -5.63175653]\n",
301 | "44 28.354208535580504\n",
302 | "[-12.30228639 -22.87348008 6.3974937 ]\n",
303 | "45 7.8411006018736575\n",
304 | "[ -4.1379294 -3.22230713 -11.34362227]\n",
305 | "46 10.438217257178776\n",
306 | "[ 6.11771416 8.22512365 -4.87740949]\n",
307 | "47 11.799797432043512\n",
308 | "[ 2.04041875 -18.99069281 -4.53088844]\n",
309 | "48 11.174061804054523\n",
310 | "[ -8.76759203 4.77605118 -11.63764321]\n",
311 | "49 30.537706919932845\n",
312 | "[-11.68642081 -15.82498365 5.82406284]\n",
313 | "50 15.964715946546693\n",
314 | "[ -3.63879771 -13.41735679 9.34555909]\n",
315 | "51 6.71670800773936\n",
316 | "[ 7.27820226 -2.1353959 10.05359816]\n",
317 | "52 9.976083878940555\n",
318 | "[-5.66089765 6.86605713 -9.50195478]\n",
319 | "53 13.30846872225065\n",
320 | "[ -2.31119688 7.14210652 -11.6033034 ]\n",
321 | "54 14.729404122987798\n",
322 | "[-1.38856944 19.98156561 -9.07213505]\n",
323 | "55 18.32231236874439\n",
324 | "[ 2.4220964 -19.02195478 13.63405118]\n",
325 | "56 28.086787409529563\n",
326 | "[-4.07815723 6.00900596 -5.08916319]\n",
327 | "57 2.554185489592741\n",
328 | "[-2.10824237 3.21005118 -1.19729355]\n",
329 | "58 15.941630685229699\n",
330 | "[ 5.38909302 -8.15600514 -6.20390184]\n",
331 | "59 11.682072580270997\n",
332 | "[-0.46661894 -9.94975577 -9.17186317]\n",
333 | "60 5.025544252269767\n",
334 | "[-3.47249476 -0.58308772 7.15359297]\n",
335 | "61 8.988254777234795\n",
336 | "[ -4.51824063 5.39475354 -10.74799418]\n",
337 | "62 14.736526508545053\n",
338 | "[-1.71951297 -1.92941823 24.94390526]\n",
339 | "63 26.88860851521796\n",
340 | "[-5.91645389 6.00900596 -0.62605985]\n",
341 | "64 11.536519687899666\n",
342 | "[ 2.10126048 4.45113683 -15.48587635]\n",
343 | "65 9.956745031888053\n",
344 | "[-4.21108522 -6.49814057 12.47105534]\n",
345 | "66 8.728285132030805\n",
346 | "[-0.28504819 15.14119528 -6.55724347]\n",
347 | "67 15.4406988179882\n",
348 | "[-2.31191786 -8.57600514 -8.07791272]\n",
349 | "68 22.336764742945956\n",
350 | "[ -6.16483241 -19.88833465 26.27310225]\n",
351 | "69 15.631186572542829\n",
352 | "[ 2.80487871 -8.84267779 2.7012565 ]\n",
353 | "70 10.11222472457242\n",
354 | "[-0.64464133 7.47475744 14.17560123]\n",
355 | "71 24.061964465614167\n",
356 | "[-8.43753805 -7.84567779 11.31453974]\n",
357 | "72 20.334622201671387\n",
358 | "[ -4.63370413 -15.65105982 -3.97964431]\n",
359 | "73 7.900566240507902\n",
360 | "[ 1.62697715 5.66565494 -1.40667779]\n",
361 | "74 11.606751402206303\n",
362 | "[ 1.33954309 -15.43234582 3.34888891]\n",
363 | "75 15.306055778623396\n",
364 | "[-14.79629826 -19.58499486 0.8976966 ]\n",
365 | "76 7.3074661633390505\n",
366 | "[ 0.86752269 -1.67824292 3.30106561]\n",
367 | "77 22.56436502601879\n",
368 | "[ -2.92622522 -23.52910652 23.6126813 ]\n",
369 | "78 14.75838616849099\n",
370 | "[ 5.49625006 3.61424423 12.67000582]\n",
371 | "79 30.966621738372147\n",
372 | "[-5.2932604 -4.75789477 -1.50186563]\n",
373 | "80 23.924508883345812\n",
374 | "[-2.81375167 15.0173359 8.16685243]\n",
375 | "81 41.55991188196195\n",
376 | "[-11.74160509 12.09475708 15.70863783]\n",
377 | "82 19.735824108782847\n",
378 | "[ -4.39106407 -19.62808772 12.62312366]\n",
379 | "83 9.719077345251321\n",
380 | "[-0.52783683 0.38716161 10.11980156]\n",
381 | "84 10.897852754444054\n",
382 | "[ -3.11054784 13.09818544 -13.68973328]\n",
383 | "85 13.103251466779916\n",
384 | "[ 1.75634759 -10.83560123 -8.16605118]\n",
385 | "86 4.6292179947577425\n",
386 | "[-0.14875584 0.20164179 -1.85219763]\n",
387 | "87 10.753007390358185\n",
388 | "[ -0.49088646 -17.32684124 -3.04904522]\n",
389 | "88 25.995615272361803\n",
390 | "[ 2.69348823 17.76675653 30.89525169]\n",
391 | "89 24.66109487642596\n",
392 | "[ -6.16861208 -16.92772308 5.519111 ]\n",
393 | "90 23.299262251400563\n",
394 | "[ -7.4531367 -10.21666796 9.92573126]\n",
395 | "91 6.175949884933661\n",
396 | "[-5.35810913 0.76664442 -2.87575354]\n",
397 | "92 13.87421384415754\n",
398 | "[-0.07100537 8.96196063 -5.253366 ]\n",
399 | "93 19.36312391492729\n",
400 | "[ -5.2043791 -18.79760604 14.93842694]\n",
401 | "94 17.39046291975259\n",
402 | "[-9.79361413 -8.41645073 13.28873661]\n",
403 | "95 5.111566798111219\n",
404 | "[-3.10155422 -1.630508 -4.92204622]\n",
405 | "96 14.859698232884524\n",
406 | "[ 5.50069664 -4.35193736 -3.996366 ]\n",
407 | "97 14.077914311745385\n",
408 | "[ -2.64483677 -15.66408024 1.98824347]\n",
409 | "98 18.21041489590537\n",
410 | "[ -3.05928863 -10.49964431 -9.10564431]\n",
411 | "99 24.62031416416703\n",
412 | "[ -0.37830632 -12.55579529 23.96248897]\n",
413 | "100 14.7675146342503\n",
414 | "[-0.48623762 5.0667059 21.19405648]\n",
415 | "101 6.063747151164773\n",
416 | "[ 2.89955896 7.11156479 -0.05300582]\n",
417 | "102 13.069282427696717\n",
418 | "[-0.32606753 -3.32450825 2.42244073]\n",
419 | "103 13.508319750569447\n",
420 | "[6.56652032 4.54213683 4.60198349]\n",
421 | "104 14.53029530593496\n",
422 | "[ -8.48600007 19.88465797 -14.02365804]\n",
423 | "105 20.852093402157177\n",
424 | "[-1.93003459 -3.47063695 6.17850235]\n",
425 | "106 8.784312370107319\n",
426 | "[-4.38823034 14.54870213 -4.41893247]\n",
427 | "107 9.205558168774047\n",
428 | "[ 1.3431187 13.43506753 -6.66194882]\n",
429 | "108 4.601629454078718\n",
430 | "[ 1.46491462 -4.37800514 -0.10408024]\n",
431 | "109 30.249584149759283\n",
432 | "[ -4.50920498 20.29763468 -22.05345967]\n",
433 | "110 19.530482330611548\n",
434 | "[ -2.05701635 -17.15637773 -17.55663862]\n",
435 | "111 31.437060696404153\n",
436 | "[-17.67987399 -2.69682281 18.43494882]\n",
437 | "112 10.976255263989954\n",
438 | "[ 4.83756945 7.5234344 -6.71486495]\n",
439 | "113 5.65863974228098\n",
440 | "[-1.96837955 -4.21332837 2.23094882]\n",
441 | "114 3.880363505871312\n",
442 | "[-1.75486145 -1.72091263 3.57505118]\n",
443 | "115 7.520531115446914\n",
444 | "[-0.31729502 -8.14959051 6.07529549]\n",
445 | "116 12.851673745328718\n",
446 | "[ 3.43051469 -16.99950551 10.2000202 ]\n",
447 | "117 4.927491459259981\n",
448 | "[ 0.07957276 5.58722694 -2.90965418]\n",
449 | "118 7.678569276917835\n",
450 | "[-5.62129849 -3.724889 -3.53640949]\n",
451 | "119 9.05538114497923\n",
452 | "[7.97220374 3.6157168 5.37148694]\n",
453 | "120 5.180790880545596\n",
454 | "[-0.18964064 7.2936914 -6.22233204]\n",
455 | "121 17.221514366950363\n",
456 | "[ 1.40750292 -9.11711388 -6.66538321]\n",
457 | "122 14.74633470443486\n",
458 | "[ 4.44692733 -9.12806753 -2.14000514]\n",
459 | "123 15.989481609421155\n",
460 | "[ -3.33724872 -14.15197769 -5.53027103]\n",
461 | "124 16.0279564767287\n",
462 | "[-14.93434705 -12.35175653 -0.07559051]\n",
463 | "125 11.302879897681024\n",
464 | "[ -1.67405994 -12.5143034 1.65724347]\n",
465 | "126 9.999161109220013\n",
466 | "[-0.16189087 -4.35594259 1.64025172]\n",
467 | "127 29.9847566827745\n",
468 | "[ -9.61770535 -22.05475653 20.40335118]\n",
469 | "14.711995701076575\n"
470 | ]
471 | }
472 | ],
473 | "source": [
474 | "\n"
475 | ]
476 | },
477 | {
478 | "cell_type": "code",
479 | "execution_count": 27,
480 | "metadata": {},
481 | "outputs": [
482 | {
483 | "name": "stdout",
484 | "output_type": "stream",
485 | "text": [
486 | "14.711995701076575\n"
487 | ]
488 | }
489 | ],
490 | "source": [
491 | "print(np.mean(avg_smape))"
492 | ]
493 | },
494 | {
495 | "cell_type": "markdown",
496 | "metadata": {},
497 | "source": [
498 | "validation set\n",
499 | "compair with (1,0) straight line: 13.8 \n",
500 | "compair with (1,0) straight line: 14.7 (delete some bones)\n",
501 | "compair with most upper and lower: 20.21 (delete some bones)"
502 | ]
503 | }
504 | ],
505 | "metadata": {
506 | "kernelspec": {
507 | "display_name": "Python 3",
508 | "language": "python",
509 | "name": "python3"
510 | },
511 | "language_info": {
512 | "codemirror_mode": {
513 | "name": "ipython",
514 | "version": 3
515 | },
516 | "file_extension": ".py",
517 | "mimetype": "text/x-python",
518 | "name": "python",
519 | "nbconvert_exporter": "python",
520 | "pygments_lexer": "ipython3",
521 | "version": "3.7.3"
522 | }
523 | },
524 | "nbformat": 4,
525 | "nbformat_minor": 2
526 | }
527 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------