├── CSANet ├── datasets │ └── dataset_CSANet.py ├── loss_function.py ├── networks │ ├── __pycache__ │ │ ├── vit_seg_configs.cpython-39.pyc │ │ ├── vit_seg_modeling.cpython-39.pyc │ │ ├── vit_seg_modeling32.cpython-39.pyc │ │ ├── vit_seg_modeling4.cpython-39.pyc │ │ ├── vit_seg_modeling4_wm.cpython-39.pyc │ │ ├── vit_seg_modeling8.cpython-39.pyc │ │ ├── vit_seg_modeling_SA.cpython-39.pyc │ │ ├── vit_seg_modeling_og.cpython-39.pyc │ │ ├── vit_seg_modeling_resnet_skip.cpython-39.pyc │ │ └── vit_seg_modeling_wcs.cpython-39.pyc │ ├── vit_seg_configs.py │ ├── vit_seg_modeling.py │ └── vit_seg_modeling_resnet_skip.py ├── test.py ├── train.py ├── trainer.py ├── utils.py └── visualization.py ├── LICENSE ├── README.md ├── data └── README.md ├── model └── vit_checkpoint │ └── imagenet21k │ └── README.md ├── preprocessing.py ├── requirements.txt └── utils └── lists ├── test_vol.txt ├── train_image.txt └── train_mask.txt /CSANet/datasets/dataset_CSANet.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import h5py 4 | import numpy as np 5 | import torch 6 | from scipy import ndimage 7 | from scipy.ndimage.interpolation import zoom 8 | from torch.utils.data import Dataset 9 | import SimpleITK as sitk 10 | import os 11 | import SimpleITK as sitk 12 | from PIL import Image 13 | import numpy as np 14 | import cv2 15 | 16 | def random_horizontal_flip(image,next_image, prev_image, segmentation): 17 | # Generate a random number to decide whether to flip or not 18 | flip = random.choice([True, False]) 19 | 20 | # Perform horizontal flipping if flip is True 21 | if flip: 22 | flipped_image = np.fliplr(image) 23 | flipped_next_image = np.fliplr(next_image) 24 | flipped_prev_image = np.fliplr(prev_image) 25 | flipped_segmentation = np.fliplr(segmentation) 26 | else: 27 | flipped_image = image 28 | flipped_next_image = next_image 29 | flipped_prev_image = prev_image 30 | flipped_segmentation = segmentation 31 | 32 | return flipped_image,flipped_next_image,flipped_prev_image,flipped_segmentation 33 | 34 | 35 | 36 | class RandomGenerator(object): 37 | """ 38 | Applies random transformations to a sample including horizontal flips and resizing to a target size. 39 | 40 | Parameters: 41 | output_size (tuple): Desired output dimensions (height, width) for the images and labels. 42 | """ 43 | def __init__(self, output_size): 44 | self.output_size = output_size 45 | 46 | def __call__(self, sample): 47 | # Unpack the sample dictionary to individual components 48 | image, label = sample['image'], sample['label'] 49 | next_image, prev_image = sample['next_image'], sample['prev_image'] 50 | 51 | # Apply a random horizontal flip to the images and label 52 | image,next_image, prev_image, label = random_horizontal_flip(image, next_image, prev_image, label) 53 | # Check if the current size matches the desired output size 54 | x, y = image.shape 55 | if x != self.output_size[0] or y != self.output_size[1]: 56 | # Rescale images to match the specified output size using cubic interpolation 57 | image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3? 58 | next_image = zoom(next_image, (self.output_size[0] / x, self.output_size[1] / y), order=3) 59 | prev_image = zoom(prev_image, (self.output_size[0] / x, self.output_size[1] / y), order=3) 60 | # Rescale the label using nearest neighbor interpolation (order=0) to avoid creating new labels 61 | label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0) 62 | 63 | # Convert numpy arrays to PyTorch tensors and add a channel dimension to images 64 | image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) 65 | label = torch.from_numpy(label.astype(np.float32)) 66 | next_image = torch.from_numpy(next_image.astype(np.float32)).unsqueeze(0) 67 | prev_image = torch.from_numpy(prev_image.astype(np.float32)).unsqueeze(0) 68 | # Return the modified sample as a dictionary 69 | sample = {'image': image, 'next_image': next_image, 'prev_image': prev_image, 'label': label.long()} 70 | return sample 71 | 72 | 73 | def extract_and_increase_number(file_name): 74 | """ 75 | Generates the filenames for the next and previous sequence by incrementing and decrementing the numerical part of a given filename. 76 | 77 | Parameters: 78 | file_name (str): The original filename from which to derive the next and previous filenames. 79 | The filename must end with a numerical value preceded by an underscore. 80 | 81 | Returns: 82 | tuple: Contains two strings, the first being the next filename in sequence and the second 83 | the previous filename in sequence. If the original number is 0, the previous filename 84 | will also use 0 to avoid negative numbering. 85 | """ 86 | parts = file_name.rsplit("_", 1) 87 | parts_next = parts[0] 88 | parts_prev = parts[0] 89 | number = int(parts[1]) 90 | 91 | next_number = number + 1 92 | prev_number = number - 1 93 | if prev_number== -1: 94 | pre_number = 0 95 | 96 | next_numbers = str(next_number) 97 | prev_numbers = str(prev_number) 98 | next_file_name = parts_next+"_"+str(next_numbers) 99 | prev_file_name = parts_prev+"_"+str(prev_numbers) 100 | 101 | return next_file_name,prev_file_name 102 | 103 | 104 | 105 | def check_and_create_file(file_name, image_name, folder_path): 106 | file_path = os.path.join(folder_path, "trainingImages", file_name+'.npy') 107 | if os.path.exists(file_path): 108 | return file_name 109 | else: 110 | available_name = image_name 111 | return available_name 112 | 113 | 114 | class CSANet_dataset(Dataset): 115 | """ 116 | Dataset handler for CSANet, designed to manage image and mask data for training and testing phases. 117 | 118 | Attributes: 119 | base_dir (str): Directory where image and mask data are stored. 120 | list_dir (str): Directory where the lists of data splits are located. 121 | split (str): The current dataset split, indicating training or testing phase. 122 | transform (callable, optional): A function/transform to apply to the samples. 123 | 124 | Note: 125 | This class expects directory structures and file naming conventions that match the specifics 126 | given in the initialization arguments. 127 | """ 128 | 129 | def __init__(self, base_dir, list_dir, split, transform=None): 130 | self.transform = transform # using transform in torch! 131 | self.split = split 132 | self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines() 133 | self.image_sample_list = open(os.path.join(list_dir, 'train_image.txt')).readlines() 134 | self.mask_sample_list = open(os.path.join(list_dir, 'train_mask.txt')).readlines() 135 | self.data_dir = base_dir 136 | 137 | def __len__(self): 138 | return len(self.sample_list) 139 | 140 | def __getitem__(self, idx): 141 | if self.split == "train_image" or self.split == "train_image_train" or self.split == "train_image_test": 142 | 143 | slice_name = self.image_sample_list[idx].strip('\n') 144 | image_data_path = os.path.join(self.data_dir, "trainingImages", slice_name+'.npy') 145 | image = np.load(image_data_path) 146 | #print("##################################### image path = ", image_data_path) 147 | # Manage sequence continuity by fetching adjacent slices 148 | next_file_name, prev_file_name = extract_and_increase_number(slice_name) 149 | 150 | next_file_name = check_and_create_file (next_file_name, slice_name, self.data_dir) 151 | prev_file_name = check_and_create_file (prev_file_name, slice_name, self.data_dir) 152 | 153 | 154 | next_image_path = os.path.join(self.data_dir, "trainingImages", next_file_name +'.npy') 155 | prev_image_path = os.path.join(self.data_dir, "trainingImages", prev_file_name +'.npy') 156 | 157 | next_image = np.load(next_image_path) 158 | prev_image = np.load(prev_image_path) 159 | 160 | 161 | mask_name = self.mask_sample_list[idx].strip('\n') 162 | label_data_path = os.path.join(self.data_dir, "trainingMasks", mask_name+'.npy') 163 | #print("############################################# label path = ", label_data_path) 164 | label = np.load(label_data_path) 165 | 166 | sample = {'image': image, 'next_image': next_image, 'prev_image': prev_image, 'label': label} 167 | 168 | if self.transform: 169 | sample = self.transform(sample) # Apply transformations if specified 170 | sample['case_name'] = self.sample_list[idx].strip('\n') 171 | return sample 172 | else: 173 | # Handling testing data, assuming single volume processing 174 | vol_name = self.sample_list[idx].strip('\n') 175 | image_data_path = os.path.join(self.data_dir, "testVol", vol_name) 176 | label_data_path = os.path.join(self.data_dir, "testMask", vol_name) 177 | 178 | image_new = sitk.ReadImage(image_data_path) 179 | img = sitk.GetArrayFromImage(image_new) 180 | 181 | 182 | next_image = sitk.GetArrayFromImage(image_new).astype(np.float64) 183 | prev_image = sitk.GetArrayFromImage(image_new).astype(np.float64) 184 | 185 | # Preprocess image data for testing phase 186 | combined_slices = sitk.GetArrayFromImage(image_new).astype(np.float64) 187 | 188 | 189 | for i in range(img.shape[0]): 190 | img_array = img[i, :, :].astype(np.uint8) 191 | p1 = np.percentile(img_array, 1) 192 | p99 = np.percentile(img_array, 99) 193 | 194 | normalized_img = (img_array - p1) / (p99 - p1) 195 | normalized_img = np.clip(normalized_img, 0, 1) 196 | 197 | combined_slices[i,:,:] = normalized_img 198 | 199 | if i-1 > -1 : 200 | next_image[i-1,:,:] = combined_slices[i,:,:] 201 | 202 | if i-1<0: 203 | prev_image[i,:,:] = combined_slices[i,:,:] 204 | else : 205 | prev_image[i,:,:] = combined_slices[i-1,:,:] 206 | 207 | next_image[img.shape[0]-1,:,:] = combined_slices[img.shape[0]-1,:,:] 208 | 209 | segmentation = sitk.ReadImage(label_data_path) 210 | label = sitk.GetArrayFromImage(segmentation) 211 | sample = {'image': combined_slices, 'next_image': next_image, 'prev_image': prev_image, 'label': label} 212 | if self.transform: 213 | sample = self.transform(sample) # Apply transformations if specified 214 | num_string = self.sample_list[idx].strip('\n') 215 | case_num = num_string.split('.')[0] 216 | sample['case_name'] = case_num 217 | return sample 218 | 219 | 220 | -------------------------------------------------------------------------------- /CSANet/loss_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from medpy import metric 4 | from scipy.ndimage import zoom 5 | import torch.nn as nn 6 | import SimpleITK as sitk 7 | import os 8 | import nibabel as nib 9 | from skimage.measure import label, regionprops 10 | import scipy.ndimage as ndi 11 | import math 12 | 13 | class DiceLoss(nn.Module): 14 | """ 15 | Implements a Dice loss for evaluating segmentation performance, where Dice loss is a measure of overlap 16 | between two samples and can be used as a loss function for training deep learning models for segmentation tasks. 17 | 18 | Attributes: 19 | - n_classes (int): Number of classes for segmentation. 20 | """ 21 | def __init__(self, n_classes): 22 | """ 23 | Initializes the DiceLoss module with the number of classes. 24 | 25 | Parameters: 26 | - n_classes (int): Number of segmentation classes. 27 | """ 28 | super(DiceLoss, self).__init__() 29 | self.n_classes = n_classes 30 | 31 | def _one_hot_encoder(self, input_tensor): 32 | """ 33 | Converts a tensor of indices of a categorical variable into a one-hot encoded format. 34 | 35 | Parameters: 36 | - input_tensor (torch.Tensor): Tensor containing indices that will be one-hot encoded. 37 | 38 | Returns: 39 | - torch.Tensor: One-hot encoded tensor. 40 | """ 41 | tensor_list = [] 42 | for i in range(self.n_classes): 43 | temp_prob = input_tensor == i 44 | tensor_list.append(temp_prob.unsqueeze(1)) 45 | output_tensor = torch.cat(tensor_list, dim=1) 46 | return output_tensor.float() 47 | 48 | def _dice_loss(self, score, target): 49 | """ 50 | Computes the Dice loss between the predicted scores and the one-hot encoded target. 51 | 52 | Parameters: 53 | - score (torch.Tensor): Predicted scores for each class. 54 | - target (torch.Tensor): One-hot encoded true labels. 55 | 56 | Returns: 57 | - float: Dice loss value. 58 | """ 59 | target = target.float() 60 | smooth = 1e-5 61 | intersect = torch.sum(score * target) 62 | y_sum = torch.sum(target * target) 63 | z_sum = torch.sum(score * score) 64 | loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth) 65 | loss = 1 - loss 66 | return loss 67 | 68 | def forward(self, inputs, target, weight=None, softmax=False): 69 | """ 70 | Forward pass for calculating Dice loss for multiple classes. 71 | 72 | Parameters: 73 | - inputs (torch.Tensor): Input logits or softmax predictions. 74 | - target (torch.Tensor): Ground truth labels. 75 | - weight (list of float, optional): Class weights. 76 | - softmax (bool, optional): Whether to apply softmax to inputs before calculating loss. 77 | 78 | Returns: 79 | - float: Mean Dice loss across all classes. 80 | """ 81 | if softmax: 82 | inputs = torch.softmax(inputs, dim=1) 83 | target = self._one_hot_encoder(target) 84 | if weight is None: 85 | weight = [1] * self.n_classes 86 | assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size()) 87 | class_wise_dice = [] 88 | loss = 0.0 89 | for i in range(0, self.n_classes): 90 | dice = self._dice_loss(inputs[:, i], target[:, i]) 91 | class_wise_dice.append(1.0 - dice.item()) 92 | loss += dice 93 | return loss / (self.n_classes - 1) -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_configs.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_configs.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling32.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling32.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling4.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling4.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling4_wm.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling4_wm.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling8.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling8.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling_SA.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling_SA.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling_og.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling_og.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling_resnet_skip.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling_resnet_skip.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/__pycache__/vit_seg_modeling_wcs.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mirthAI/CSA-Net/9be2dbe8d2247ab91d03f18bd8af92448a675ff9/CSANet/networks/__pycache__/vit_seg_modeling_wcs.cpython-39.pyc -------------------------------------------------------------------------------- /CSANet/networks/vit_seg_configs.py: -------------------------------------------------------------------------------- 1 | import ml_collections 2 | 3 | def get_b16_config(): 4 | """Returns the ViT-B/16 configuration.""" 5 | config = ml_collections.ConfigDict() 6 | config.patches = ml_collections.ConfigDict({'size': (16, 16)}) 7 | config.hidden_size = 768 8 | config.transformer = ml_collections.ConfigDict() 9 | config.transformer.mlp_dim = 3072 10 | config.transformer.num_heads = 12 11 | config.transformer.num_layers = 12 12 | config.transformer.attention_dropout_rate = 0.0 13 | config.transformer.dropout_rate = 0.1 14 | 15 | config.classifier = 'seg' 16 | config.representation_size = None 17 | config.resnet_pretrained_path = None 18 | config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_16.npz' 19 | config.patch_size = 16 20 | 21 | config.decoder_channels = (256, 128, 64, 16) 22 | config.n_classes = 2 23 | config.activation = 'softmax' 24 | return config 25 | 26 | 27 | def get_testing(): 28 | """Returns a minimal configuration for testing.""" 29 | config = ml_collections.ConfigDict() 30 | config.patches = ml_collections.ConfigDict({'size': (16, 16)}) 31 | config.hidden_size = 1 32 | config.transformer = ml_collections.ConfigDict() 33 | config.transformer.mlp_dim = 1 34 | config.transformer.num_heads = 1 35 | config.transformer.num_layers = 1 36 | config.transformer.attention_dropout_rate = 0.0 37 | config.transformer.dropout_rate = 0.1 38 | config.classifier = 'token' 39 | config.representation_size = None 40 | return config 41 | 42 | def get_r50_b16_config(): 43 | """Returns the Resnet50 + ViT-B/16 configuration.""" 44 | config = get_b16_config() 45 | config.patches.grid = (16, 16) 46 | config.resnet = ml_collections.ConfigDict() 47 | config.resnet.num_layers = (3, 4, 9) 48 | config.resnet.width_factor = 1 49 | 50 | config.classifier = 'seg' 51 | config.pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz' 52 | config.decoder_channels = (256, 128, 64, 16) 53 | config.skip_channels = [512, 256, 64, 16] 54 | config.n_classes = 2 55 | config.n_skip = 3 56 | config.activation = 'softmax' 57 | 58 | return config 59 | 60 | 61 | def get_b32_config(): 62 | """Returns the ViT-B/32 configuration.""" 63 | config = get_b16_config() 64 | config.patches.size = (32, 32) 65 | config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_32.npz' 66 | return config 67 | 68 | 69 | def get_l16_config(): 70 | """Returns the ViT-L/16 configuration.""" 71 | config = ml_collections.ConfigDict() 72 | config.patches = ml_collections.ConfigDict({'size': (16, 16)}) 73 | config.hidden_size = 1024 74 | config.transformer = ml_collections.ConfigDict() 75 | config.transformer.mlp_dim = 4096 76 | config.transformer.num_heads = 16 77 | config.transformer.num_layers = 24 78 | config.transformer.attention_dropout_rate = 0.0 79 | config.transformer.dropout_rate = 0.1 80 | config.representation_size = None 81 | 82 | # custom 83 | config.classifier = 'seg' 84 | config.resnet_pretrained_path = None 85 | config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-L_16.npz' 86 | config.decoder_channels = (256, 128, 64, 16) 87 | config.n_classes = 2 88 | config.activation = 'softmax' 89 | return config 90 | 91 | 92 | def get_r50_l16_config(): 93 | """Returns the Resnet50 + ViT-L/16 configuration. customized """ 94 | config = get_l16_config() 95 | config.patches.grid = (16, 16) 96 | config.resnet = ml_collections.ConfigDict() 97 | config.resnet.num_layers = (3, 4, 9) 98 | config.resnet.width_factor = 1 99 | 100 | config.classifier = 'seg' 101 | config.resnet_pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz' 102 | config.decoder_channels = (256, 128, 64, 16) 103 | config.skip_channels = [512, 256, 64, 16] 104 | config.n_classes = 2 105 | config.activation = 'softmax' 106 | return config 107 | 108 | 109 | def get_l32_config(): 110 | """Returns the ViT-L/32 configuration.""" 111 | config = get_l16_config() 112 | config.patches.size = (32, 32) 113 | return config 114 | 115 | 116 | def get_h14_config(): 117 | """Returns the ViT-L/16 configuration.""" 118 | config = ml_collections.ConfigDict() 119 | config.patches = ml_collections.ConfigDict({'size': (14, 14)}) 120 | config.hidden_size = 1280 121 | config.transformer = ml_collections.ConfigDict() 122 | config.transformer.mlp_dim = 5120 123 | config.transformer.num_heads = 16 124 | config.transformer.num_layers = 32 125 | config.transformer.attention_dropout_rate = 0.0 126 | config.transformer.dropout_rate = 0.1 127 | config.classifier = 'token' 128 | config.representation_size = None 129 | 130 | return config 131 | -------------------------------------------------------------------------------- /CSANet/networks/vit_seg_modeling.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | 6 | import copy 7 | import logging 8 | import math 9 | import torch.nn.functional as F 10 | from os.path import join as pjoin 11 | import torch 12 | import torch.nn as nn 13 | import numpy as np 14 | from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm 15 | from torch.nn.modules.utils import _pair 16 | from scipy import ndimage 17 | from . import vit_seg_configs as configs 18 | from .vit_seg_modeling_resnet_skip import ResNetV2 19 | 20 | 21 | logger = logging.getLogger(__name__) 22 | 23 | 24 | ATTENTION_Q = "MultiHeadDotProductAttention_1/query" 25 | ATTENTION_K = "MultiHeadDotProductAttention_1/key" 26 | ATTENTION_V = "MultiHeadDotProductAttention_1/value" 27 | ATTENTION_OUT = "MultiHeadDotProductAttention_1/out" 28 | FC_0 = "MlpBlock_3/Dense_0" 29 | FC_1 = "MlpBlock_3/Dense_1" 30 | ATTENTION_NORM = "LayerNorm_0" 31 | MLP_NORM = "LayerNorm_2" 32 | 33 | 34 | def np2th(weights, conv=False): 35 | """Possibly convert HWIO to OIHW.""" 36 | if conv: 37 | weights = weights.transpose([3, 2, 0, 1]) 38 | return torch.from_numpy(weights) 39 | 40 | 41 | def swish(x): 42 | return x * torch.sigmoid(x) 43 | 44 | 45 | ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish} 46 | 47 | # Attention module definition 48 | class Attention(nn.Module): 49 | def __init__(self, config, vis): 50 | super(Attention, self).__init__() 51 | self.vis = vis 52 | self.num_attention_heads = config.transformer["num_heads"] 53 | self.attention_head_size = int(config.hidden_size / self.num_attention_heads) 54 | self.all_head_size = self.num_attention_heads * self.attention_head_size 55 | 56 | self.query = Linear(config.hidden_size, self.all_head_size) 57 | self.key = Linear(config.hidden_size, self.all_head_size) 58 | self.value = Linear(config.hidden_size, self.all_head_size) 59 | 60 | self.out = Linear(config.hidden_size, config.hidden_size) 61 | self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"]) 62 | self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"]) 63 | 64 | self.softmax = Softmax(dim=-1) 65 | 66 | def transpose_for_scores(self, x): 67 | new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) 68 | x = x.view(*new_x_shape) 69 | return x.permute(0, 2, 1, 3) 70 | 71 | def forward(self, hidden_states): 72 | mixed_query_layer = self.query(hidden_states) 73 | mixed_key_layer = self.key(hidden_states) 74 | mixed_value_layer = self.value(hidden_states) 75 | 76 | query_layer = self.transpose_for_scores(mixed_query_layer) 77 | key_layer = self.transpose_for_scores(mixed_key_layer) 78 | value_layer = self.transpose_for_scores(mixed_value_layer) 79 | 80 | attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) 81 | attention_scores = attention_scores / math.sqrt(self.attention_head_size) 82 | attention_probs = self.softmax(attention_scores) 83 | weights = attention_probs if self.vis else None 84 | attention_probs = self.attn_dropout(attention_probs) 85 | 86 | context_layer = torch.matmul(attention_probs, value_layer) 87 | context_layer = context_layer.permute(0, 2, 1, 3).contiguous() 88 | new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) 89 | context_layer = context_layer.view(*new_context_layer_shape) 90 | attention_output = self.out(context_layer) 91 | attention_output = self.proj_dropout(attention_output) 92 | return attention_output, weights 93 | 94 | # Multi-Layer Perceptron (MLP) module definitio 95 | class Mlp(nn.Module): 96 | def __init__(self, config): 97 | super(Mlp, self).__init__() 98 | self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"]) 99 | self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size) 100 | self.act_fn = ACT2FN["gelu"] 101 | self.dropout = Dropout(config.transformer["dropout_rate"]) 102 | 103 | self._init_weights() 104 | 105 | def _init_weights(self): 106 | nn.init.xavier_uniform_(self.fc1.weight) 107 | nn.init.xavier_uniform_(self.fc2.weight) 108 | nn.init.normal_(self.fc1.bias, std=1e-6) 109 | nn.init.normal_(self.fc2.bias, std=1e-6) 110 | 111 | def forward(self, x): 112 | x = self.fc1(x) 113 | x = self.act_fn(x) 114 | x = self.dropout(x) 115 | x = self.fc2(x) 116 | x = self.dropout(x) 117 | return x 118 | 119 | # Non-Local Block for multi-cross attention 120 | class NLBlockND_multicross_block(nn.Module): 121 | """ 122 | Non-Local Block for multi-cross attention. 123 | 124 | Args: 125 | in_channels (int): Number of input channels. 126 | inter_channels (int, optional): Number of intermediate channels. Defaults to None. 127 | 128 | Attributes: 129 | in_channels (int): Number of input channels. 130 | inter_channels (int): Number of intermediate channels. 131 | g (nn.Conv2d): Convolutional layer for the 'g' branch. 132 | final (nn.Conv2d): Final convolutional layer. 133 | W_z (nn.Sequential): Sequential block containing a convolutional layer followed by batch normalization for weight 'z'. 134 | theta (nn.Conv2d): Convolutional layer for the 'theta' branch. 135 | phi (nn.Conv2d): Convolutional layer for the 'phi' branch. 136 | 137 | Methods: 138 | forward(x_thisBranch, x_otherBranch): Forward pass of the non-local block. 139 | 140 | """ 141 | def __init__(self, in_channels, inter_channels=None): 142 | super(NLBlockND_multicross_block, self).__init__() 143 | self.in_channels = in_channels 144 | self.inter_channels = inter_channels 145 | 146 | if self.inter_channels is None: 147 | self.inter_channels = in_channels // 2 148 | if self.inter_channels == 0: 149 | self.inter_channels = 1 150 | 151 | conv_nd = nn.Conv2d 152 | max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) 153 | bn = nn.BatchNorm2d 154 | 155 | self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1) 156 | self.final = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1) 157 | self.W_z = nn.Sequential( 158 | conv_nd(in_channels=self.inter_channels, out_channels=self.inter_channels, kernel_size=1), 159 | bn(self.inter_channels) 160 | ) 161 | 162 | nn.init.constant_(self.W_z[1].weight, 0) 163 | nn.init.constant_(self.W_z[1].bias, 0) 164 | 165 | self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1) 166 | self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1) 167 | 168 | def forward(self, x_thisBranch, x_otherBranch): 169 | batch_size = x_thisBranch.size(0) 170 | g_x = self.g(x_thisBranch).view(batch_size, self.inter_channels, -1) 171 | g_x = g_x.permute(0, 2, 1) 172 | 173 | theta_x = self.theta(x_thisBranch).view(batch_size, self.inter_channels, -1) 174 | phi_x = self.phi(x_otherBranch).view(batch_size, self.inter_channels, -1) 175 | phi_x = phi_x.permute(0, 2, 1) 176 | 177 | f = torch.matmul(phi_x, theta_x) 178 | f_div_C = F.softmax(f, dim=-1) 179 | 180 | y = torch.matmul(f_div_C, g_x) 181 | y = y.permute(0, 2, 1).contiguous() 182 | y = y.view(batch_size, self.inter_channels, *x_thisBranch.size()[2:]) 183 | 184 | z = self.W_z(y) 185 | return z 186 | 187 | # Multi-Cross Attention Block 188 | class NLBlockND_multicross(nn.Module): 189 | 190 | def __init__(self, in_channels, inter_channels=None): 191 | super(NLBlockND_multicross, self).__init__() 192 | self.in_channels = in_channels 193 | self.inter_channels = inter_channels 194 | 195 | if self.inter_channels is None: 196 | self.inter_channels = in_channels // 2 197 | if self.inter_channels == 0: 198 | self.inter_channels = 1 199 | self.cross_attention = NLBlockND_multicross_block(in_channels=1024, inter_channels=64) 200 | def forward(self, x_thisBranch, x_otherBranch): 201 | outputs = [] 202 | for i in range(16): 203 | cross_attention = NLBlockND_multicross_block(in_channels=1024, inter_channels=64) 204 | cross_attention = cross_attention.to('cuda') 205 | output = cross_attention(x_thisBranch,x_otherBranch) 206 | 207 | outputs.append(output) 208 | final_output = torch.cat(outputs, dim=1) 209 | #final_output = final_output + x_thisBranch #Changed 210 | return final_output 211 | 212 | 213 | class DoubleConv(nn.Module): 214 | def __init__(self, in_channels, out_channels, mid_channels=None): 215 | super().__init__() 216 | if not mid_channels: 217 | mid_channels = out_channels 218 | self.double_conv = nn.Sequential( 219 | nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False), 220 | nn.BatchNorm2d(mid_channels), 221 | nn.ReLU(inplace=True), 222 | nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False), 223 | nn.BatchNorm2d(out_channels), 224 | nn.ReLU(inplace=True) 225 | ) 226 | 227 | def forward(self, x): 228 | return self.double_conv(x) 229 | 230 | 231 | 232 | class DownCross(nn.Module): 233 | 234 | 235 | def __init__(self, in_channels, out_channels): 236 | super().__init__() 237 | self.maxpool_conv = nn.Sequential( 238 | DoubleConv(in_channels, out_channels) 239 | ) 240 | 241 | def forward(self, x): 242 | return self.maxpool_conv(x) 243 | 244 | 245 | # Embeddings module for constructing embeddings from patches and position embeddings 246 | class Embeddings(nn.Module): 247 | """Construct the embeddings from patch, position embeddings. 248 | """ 249 | def __init__(self, config, img_size, in_channels=3): 250 | super(Embeddings, self).__init__() 251 | self.hybrid = None 252 | self.hybrid_prev = None 253 | self.hybrid_next = None 254 | self.config = config 255 | img_size = _pair(img_size) 256 | 257 | self.cross_attention_multi_1 = NLBlockND_multicross(in_channels=1024, inter_channels=512) 258 | self.cross_attention_multi_2 = NLBlockND_multicross(in_channels=1024, inter_channels=512) 259 | self.cross_attention_multi_3 = NLBlockND_multicross(in_channels=1024, inter_channels=512) 260 | self.downcross_three = (DownCross(3072, 1024)) 261 | 262 | 263 | if config.patches.get("grid") is not None: # ResNet 264 | grid_size = config.patches["grid"] 265 | patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1]) 266 | patch_size_real = (patch_size[0] * 16, patch_size[1] * 16) 267 | n_patches = (img_size[0] // patch_size_real[0]) * (img_size[1] // patch_size_real[1]) 268 | self.hybrid = True 269 | else: 270 | patch_size = _pair(config.patches["size"]) 271 | n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) 272 | self.hybrid = False 273 | self.hybrid_next = False 274 | self.hybrid_prev = False 275 | if self.hybrid: 276 | self.hybrid_model = ResNetV2(block_units=config.resnet.num_layers, width_factor=config.resnet.width_factor) 277 | self.hybrid_model_prev = ResNetV2(block_units=config.resnet.num_layers, width_factor=config.resnet.width_factor) 278 | self.hybrid_model_next = ResNetV2(block_units=config.resnet.num_layers, width_factor=config.resnet.width_factor) 279 | in_channels = self.hybrid_model.width * 16 280 | self.patch_embeddings = Conv2d(in_channels=in_channels, 281 | out_channels=config.hidden_size, 282 | kernel_size=patch_size, 283 | stride=patch_size) 284 | self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size)) 285 | 286 | self.dropout = Dropout(config.transformer["dropout_rate"]) 287 | 288 | 289 | def forward(self, x_prev,x,x_next): 290 | if self.hybrid: 291 | 292 | x, features = self.hybrid_model(x) 293 | x_prev, features1 = self.hybrid_model(x_prev) 294 | x_next, features2 = self.hybrid_model(x_next) 295 | else: 296 | features = None 297 | 298 | 299 | xt1 = self.cross_attention_multi_1(x,x_next) 300 | xt2 = self.cross_attention_multi_2(x,x_prev) 301 | xt3 = self.cross_attention_multi_3(x,x) 302 | 303 | xt = torch.cat([xt1,xt3,xt2], dim=1) 304 | x = self.downcross_three(xt) 305 | 306 | x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2)) 307 | x = x.flatten(2) 308 | x = x.transpose(-1, -2) # (B, n_patches, hidden) 309 | 310 | embeddings = x + self.position_embeddings 311 | embeddings = self.dropout(embeddings) 312 | return embeddings, features 313 | 314 | 315 | # Transformer Block 316 | class Block(nn.Module): 317 | def __init__(self, config, vis): 318 | super(Block, self).__init__() 319 | self.hidden_size = config.hidden_size 320 | self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6) 321 | self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6) 322 | self.ffn = Mlp(config) 323 | self.attn = Attention(config, vis) 324 | 325 | def forward(self, x): 326 | h = x 327 | x = self.attention_norm(x) 328 | x, weights = self.attn(x) 329 | x = x + h 330 | 331 | h = x 332 | x = self.ffn_norm(x) 333 | x = self.ffn(x) 334 | x = x + h 335 | return x, weights 336 | 337 | def load_from(self, weights, n_block): 338 | ROOT = f"Transformer/encoderblock_{n_block}" 339 | with torch.no_grad(): 340 | query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t() 341 | key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t() 342 | value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t() 343 | out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t() 344 | 345 | query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1) 346 | key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1) 347 | value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1) 348 | out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1) 349 | 350 | self.attn.query.weight.copy_(query_weight) 351 | self.attn.key.weight.copy_(key_weight) 352 | self.attn.value.weight.copy_(value_weight) 353 | self.attn.out.weight.copy_(out_weight) 354 | self.attn.query.bias.copy_(query_bias) 355 | self.attn.key.bias.copy_(key_bias) 356 | self.attn.value.bias.copy_(value_bias) 357 | self.attn.out.bias.copy_(out_bias) 358 | 359 | mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t() 360 | mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t() 361 | mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t() 362 | mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t() 363 | 364 | self.ffn.fc1.weight.copy_(mlp_weight_0) 365 | self.ffn.fc2.weight.copy_(mlp_weight_1) 366 | self.ffn.fc1.bias.copy_(mlp_bias_0) 367 | self.ffn.fc2.bias.copy_(mlp_bias_1) 368 | 369 | self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")])) 370 | self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")])) 371 | self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")])) 372 | self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")])) 373 | 374 | 375 | # Transformer Encoder 376 | class Encoder(nn.Module): 377 | def __init__(self, config, vis): 378 | super(Encoder, self).__init__() 379 | self.vis = vis 380 | self.layer = nn.ModuleList() 381 | self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6) 382 | for _ in range(config.transformer["num_layers"]): 383 | layer = Block(config, vis) 384 | self.layer.append(copy.deepcopy(layer)) 385 | 386 | def forward(self, hidden_states): 387 | attn_weights = [] 388 | for layer_block in self.layer: 389 | hidden_states, weights = layer_block(hidden_states) 390 | if self.vis: 391 | attn_weights.append(weights) 392 | encoded = self.encoder_norm(hidden_states) 393 | return encoded, attn_weights 394 | 395 | # Transformer architecture 396 | class Transformer(nn.Module): 397 | def __init__(self, config, img_size, vis): 398 | super(Transformer, self).__init__() 399 | self.embeddings = Embeddings(config, img_size=img_size) 400 | self.encoder = Encoder(config, vis) 401 | 402 | def forward(self, x_prev,x,x_next): 403 | embedding_output, features = self.embeddings(x_prev,x,x_next) 404 | encoded, attn_weights = self.encoder(embedding_output) 405 | return encoded, attn_weights, features 406 | 407 | # Conv2dReLU module 408 | class Conv2dReLU(nn.Sequential): 409 | def __init__( 410 | self, 411 | in_channels, 412 | out_channels, 413 | kernel_size, 414 | padding=0, 415 | stride=1, 416 | use_batchnorm=True, 417 | ): 418 | conv = nn.Conv2d( 419 | in_channels, 420 | out_channels, 421 | kernel_size, 422 | stride=stride, 423 | padding=padding, 424 | bias=not (use_batchnorm), 425 | ) 426 | relu = nn.ReLU(inplace=True) 427 | 428 | bn = nn.BatchNorm2d(out_channels) 429 | 430 | super(Conv2dReLU, self).__init__(conv, bn, relu) 431 | 432 | # Decoder block for the segmentation head 433 | class DecoderBlock(nn.Module): 434 | def __init__( 435 | self, 436 | in_channels, 437 | out_channels, 438 | skip_channels=0, 439 | use_batchnorm=True, 440 | ): 441 | super().__init__() 442 | self.conv1 = Conv2dReLU( 443 | in_channels + skip_channels, 444 | out_channels, 445 | kernel_size=3, 446 | padding=1, 447 | use_batchnorm=use_batchnorm, 448 | ) 449 | self.conv2 = Conv2dReLU( 450 | out_channels, 451 | out_channels, 452 | kernel_size=3, 453 | padding=1, 454 | use_batchnorm=use_batchnorm, 455 | ) 456 | self.up = nn.UpsamplingBilinear2d(scale_factor=2) 457 | 458 | def forward(self, x, skip=None): 459 | x = self.up(x) 460 | if skip is not None: 461 | x = torch.cat([x, skip], dim=1) 462 | x = self.conv1(x) 463 | x = self.conv2(x) 464 | return x 465 | 466 | # Segmentation head module 467 | class SegmentationHead(nn.Sequential): 468 | 469 | def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1): 470 | conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2) 471 | upsampling = nn.UpsamplingBilinear2d(scale_factor=upsampling) if upsampling > 1 else nn.Identity() 472 | super().__init__(conv2d, upsampling) 473 | 474 | # DecoderCup module 475 | class DecoderCup(nn.Module): 476 | def __init__(self, config): 477 | super().__init__() 478 | self.config = config 479 | head_channels = 512 480 | self.conv_more = Conv2dReLU( 481 | config.hidden_size, 482 | head_channels, 483 | kernel_size=3, 484 | padding=1, 485 | use_batchnorm=True, 486 | ) 487 | decoder_channels = config.decoder_channels 488 | in_channels = [head_channels] + list(decoder_channels[:-1]) 489 | out_channels = decoder_channels 490 | 491 | if self.config.n_skip != 0: 492 | skip_channels = self.config.skip_channels 493 | for i in range(4-self.config.n_skip): # re-select the skip channels according to n_skip 494 | skip_channels[3-i]=0 495 | 496 | else: 497 | skip_channels=[0,0,0,0] 498 | 499 | blocks = [ 500 | DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels) 501 | ] 502 | self.blocks = nn.ModuleList(blocks) 503 | 504 | def forward(self, hidden_states, features=None): 505 | B, n_patch, hidden = hidden_states.size() # reshape from (B, n_patch, hidden) to (B, h, w, hidden) 506 | h, w = int(np.sqrt(n_patch)), int(np.sqrt(n_patch)) 507 | x = hidden_states.permute(0, 2, 1) 508 | x = x.contiguous().view(B, hidden, h, w) 509 | x = self.conv_more(x) 510 | for i, decoder_block in enumerate(self.blocks): 511 | if features is not None: 512 | skip = features[i] if (i < self.config.n_skip) else None 513 | else: 514 | skip = None 515 | x = decoder_block(x, skip=skip) 516 | return x 517 | 518 | 519 | 520 | # Vision Transformer model 521 | class VisionTransformer(nn.Module): 522 | def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False): 523 | super(VisionTransformer, self).__init__() 524 | self.num_classes = num_classes 525 | self.zero_head = zero_head 526 | self.classifier = config.classifier 527 | self.transformer = Transformer(config, img_size, vis) 528 | self.decoder = DecoderCup(config) 529 | self.segmentation_head = SegmentationHead( 530 | in_channels=config['decoder_channels'][-1], 531 | out_channels=config['n_classes'], 532 | kernel_size=3, 533 | ) 534 | self.config = config 535 | 536 | def forward(self, x_prev,x,x_next): 537 | if x.size()[1] == 1: 538 | x = x.repeat(1,3,1,1) 539 | x_prev = x_prev.repeat(1,3,1,1) 540 | x_next = x_next.repeat(1,3,1,1) 541 | x, attn_weights, features = self.transformer(x_prev,x,x_next) # (B, n_patch, hidden) 542 | x = self.decoder(x, features) 543 | logits = self.segmentation_head(x) 544 | return logits 545 | 546 | def load_from(self, weights): 547 | with torch.no_grad(): 548 | 549 | res_weight = weights 550 | self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True)) 551 | self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"])) 552 | 553 | self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"])) 554 | self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"])) 555 | 556 | posemb = np2th(weights["Transformer/posembed_input/pos_embedding"]) 557 | 558 | posemb_new = self.transformer.embeddings.position_embeddings 559 | if posemb.size() == posemb_new.size(): 560 | self.transformer.embeddings.position_embeddings.copy_(posemb) 561 | elif posemb.size()[1]-1 == posemb_new.size()[1]: 562 | posemb = posemb[:, 1:] 563 | self.transformer.embeddings.position_embeddings.copy_(posemb) 564 | else: 565 | logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size())) 566 | ntok_new = posemb_new.size(1) 567 | if self.classifier == "seg": 568 | _, posemb_grid = posemb[:, :1], posemb[0, 1:] 569 | gs_old = int(np.sqrt(len(posemb_grid))) 570 | gs_new = int(np.sqrt(ntok_new)) 571 | print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new)) 572 | posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1) 573 | zoom = (gs_new / gs_old, gs_new / gs_old, 1) 574 | posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1) # th2np 575 | posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1) 576 | posemb = posemb_grid 577 | self.transformer.embeddings.position_embeddings.copy_(np2th(posemb)) 578 | 579 | # Encoder whole 580 | for bname, block in self.transformer.encoder.named_children(): 581 | for uname, unit in block.named_children(): 582 | unit.load_from(weights, n_block=uname) 583 | 584 | if self.transformer.embeddings.hybrid: 585 | self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(res_weight["conv_root/kernel"], conv=True)) 586 | gn_weight = np2th(res_weight["gn_root/scale"]).view(-1) 587 | gn_bias = np2th(res_weight["gn_root/bias"]).view(-1) 588 | self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight) 589 | self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias) 590 | 591 | for bname, block in self.transformer.embeddings.hybrid_model.body.named_children(): 592 | for uname, unit in block.named_children(): 593 | unit.load_from(res_weight, n_block=bname, n_unit=uname) 594 | 595 | 596 | # Configuration dictionary for different Vision Transformer variants 597 | CONFIGS = { 598 | 'ViT-B_16': configs.get_b16_config(), 599 | 'ViT-B_32': configs.get_b32_config(), 600 | 'ViT-L_16': configs.get_l16_config(), 601 | 'ViT-L_32': configs.get_l32_config(), 602 | 'ViT-H_14': configs.get_h14_config(), 603 | 'R50-ViT-B_16': configs.get_r50_b16_config(), 604 | 'R50-ViT-L_16': configs.get_r50_l16_config(), 605 | 'testing': configs.get_testing(), 606 | } 607 | 608 | 609 | -------------------------------------------------------------------------------- /CSANet/networks/vit_seg_modeling_resnet_skip.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from os.path import join as pjoin 4 | from collections import OrderedDict 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | 11 | def np2th(weights, conv=False): 12 | """Possibly convert HWIO to OIHW.""" 13 | if conv: 14 | weights = weights.transpose([3, 2, 0, 1]) 15 | return torch.from_numpy(weights) 16 | 17 | 18 | class StdConv2d(nn.Conv2d): 19 | 20 | def forward(self, x): 21 | w = self.weight 22 | v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) 23 | w = (w - m) / torch.sqrt(v + 1e-5) 24 | return F.conv2d(x, w, self.bias, self.stride, self.padding, 25 | self.dilation, self.groups) 26 | 27 | 28 | def conv3x3(cin, cout, stride=1, groups=1, bias=False): 29 | return StdConv2d(cin, cout, kernel_size=3, stride=stride, 30 | padding=1, bias=bias, groups=groups) 31 | 32 | 33 | def conv1x1(cin, cout, stride=1, bias=False): 34 | return StdConv2d(cin, cout, kernel_size=1, stride=stride, 35 | padding=0, bias=bias) 36 | 37 | 38 | class PreActBottleneck(nn.Module): 39 | """Pre-activation (v2) bottleneck block. 40 | """ 41 | 42 | def __init__(self, cin, cout=None, cmid=None, stride=1): 43 | super().__init__() 44 | cout = cout or cin 45 | cmid = cmid or cout//4 46 | 47 | self.gn1 = nn.GroupNorm(32, cmid, eps=1e-6) 48 | self.conv1 = conv1x1(cin, cmid, bias=False) 49 | self.gn2 = nn.GroupNorm(32, cmid, eps=1e-6) 50 | self.conv2 = conv3x3(cmid, cmid, stride, bias=False) # Original code has it on conv1!! 51 | self.gn3 = nn.GroupNorm(32, cout, eps=1e-6) 52 | self.conv3 = conv1x1(cmid, cout, bias=False) 53 | self.relu = nn.ReLU(inplace=True) 54 | 55 | if (stride != 1 or cin != cout): 56 | # Projection also with pre-activation according to paper. 57 | self.downsample = conv1x1(cin, cout, stride, bias=False) 58 | self.gn_proj = nn.GroupNorm(cout, cout) 59 | 60 | def forward(self, x): 61 | 62 | # Residual branch 63 | residual = x 64 | if hasattr(self, 'downsample'): 65 | residual = self.downsample(x) 66 | residual = self.gn_proj(residual) 67 | 68 | # Unit's branch 69 | y = self.relu(self.gn1(self.conv1(x))) 70 | y = self.relu(self.gn2(self.conv2(y))) 71 | y = self.gn3(self.conv3(y)) 72 | 73 | y = self.relu(residual + y) 74 | return y 75 | 76 | def load_from(self, weights, n_block, n_unit): 77 | conv1_weight = np2th(weights[pjoin(n_block, n_unit, "conv1/kernel")], conv=True) 78 | conv2_weight = np2th(weights[pjoin(n_block, n_unit, "conv2/kernel")], conv=True) 79 | conv3_weight = np2th(weights[pjoin(n_block, n_unit, "conv3/kernel")], conv=True) 80 | 81 | gn1_weight = np2th(weights[pjoin(n_block, n_unit, "gn1/scale")]) 82 | gn1_bias = np2th(weights[pjoin(n_block, n_unit, "gn1/bias")]) 83 | 84 | gn2_weight = np2th(weights[pjoin(n_block, n_unit, "gn2/scale")]) 85 | gn2_bias = np2th(weights[pjoin(n_block, n_unit, "gn2/bias")]) 86 | 87 | gn3_weight = np2th(weights[pjoin(n_block, n_unit, "gn3/scale")]) 88 | gn3_bias = np2th(weights[pjoin(n_block, n_unit, "gn3/bias")]) 89 | 90 | self.conv1.weight.copy_(conv1_weight) 91 | self.conv2.weight.copy_(conv2_weight) 92 | self.conv3.weight.copy_(conv3_weight) 93 | 94 | self.gn1.weight.copy_(gn1_weight.view(-1)) 95 | self.gn1.bias.copy_(gn1_bias.view(-1)) 96 | 97 | self.gn2.weight.copy_(gn2_weight.view(-1)) 98 | self.gn2.bias.copy_(gn2_bias.view(-1)) 99 | 100 | self.gn3.weight.copy_(gn3_weight.view(-1)) 101 | self.gn3.bias.copy_(gn3_bias.view(-1)) 102 | 103 | if hasattr(self, 'downsample'): 104 | proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, "conv_proj/kernel")], conv=True) 105 | proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, "gn_proj/scale")]) 106 | proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, "gn_proj/bias")]) 107 | 108 | self.downsample.weight.copy_(proj_conv_weight) 109 | self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) 110 | self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) 111 | 112 | class ResNetV2(nn.Module): 113 | """Implementation of Pre-activation (v2) ResNet mode.""" 114 | 115 | def __init__(self, block_units, width_factor): 116 | super().__init__() 117 | width = int(64 * width_factor) 118 | self.width = width 119 | 120 | self.root = nn.Sequential(OrderedDict([ 121 | ('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), 122 | ('gn', nn.GroupNorm(32, width, eps=1e-6)), 123 | ('relu', nn.ReLU(inplace=True)), 124 | # ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) 125 | ])) 126 | 127 | self.body = nn.Sequential(OrderedDict([ 128 | ('block1', nn.Sequential(OrderedDict( 129 | [('unit1', PreActBottleneck(cin=width, cout=width*4, cmid=width))] + 130 | [(f'unit{i:d}', PreActBottleneck(cin=width*4, cout=width*4, cmid=width)) for i in range(2, block_units[0] + 1)], 131 | ))), 132 | ('block2', nn.Sequential(OrderedDict( 133 | [('unit1', PreActBottleneck(cin=width*4, cout=width*8, cmid=width*2, stride=2))] + 134 | [(f'unit{i:d}', PreActBottleneck(cin=width*8, cout=width*8, cmid=width*2)) for i in range(2, block_units[1] + 1)], 135 | ))), 136 | ('block3', nn.Sequential(OrderedDict( 137 | [('unit1', PreActBottleneck(cin=width*8, cout=width*16, cmid=width*4, stride=2))] + 138 | [(f'unit{i:d}', PreActBottleneck(cin=width*16, cout=width*16, cmid=width*4)) for i in range(2, block_units[2] + 1)], 139 | ))), 140 | ])) 141 | 142 | def forward(self, x): 143 | features = [] 144 | b, c, in_size, _ = x.size() 145 | x = self.root(x) 146 | features.append(x) 147 | x = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)(x) 148 | for i in range(len(self.body)-1): 149 | x = self.body[i](x) 150 | right_size = int(in_size / 4 / (i+1)) 151 | if x.size()[2] != right_size: 152 | pad = right_size - x.size()[2] 153 | assert pad < 3 and pad > 0, "x {} should {}".format(x.size(), right_size) 154 | feat = torch.zeros((b, x.size()[1], right_size, right_size), device=x.device) 155 | feat[:, :, 0:x.size()[2], 0:x.size()[3]] = x[:] 156 | else: 157 | feat = x 158 | features.append(feat) 159 | x = self.body[-1](x) 160 | return x, features[::-1] 161 | -------------------------------------------------------------------------------- /CSANet/test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import random 5 | import numpy as np 6 | import torch 7 | import torch.backends.cudnn as cudnn 8 | import sys 9 | import time 10 | import torch.nn as nn 11 | import torch.optim as optim 12 | from networks.vit_seg_modeling import VisionTransformer as ViT_seg 13 | from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg 14 | from datasets.dataset_CSANet import CSANet_dataset 15 | from tensorboardX import SummaryWriter 16 | from torch.utils.data import DataLoader 17 | from tqdm import tqdm 18 | from torchvision import transforms 19 | from utils import test_single_volume 20 | from datasets.dataset_CSANet import CSANet_dataset, RandomGenerator 21 | 22 | 23 | """ 24 | This script configures and initializes training for the CSANet segmentation model using Vision Transformers. It handles command-line arguments for various training parameters, sets up deterministic options for reproducibility, and initializes the model with specified configurations. 25 | 26 | Parameters: 27 | - volume_path: Directory for validation volume data. 28 | - dataset: Name of the dataset or experiment. 29 | - num_classes: Number of output classes for segmentation. 30 | - list_dir: Directory containing lists of data samples. 31 | - max_iterations: Maximum number of iterations to train. 32 | - max_epochs: Maximum number of epochs to train. 33 | - batch_size: Number of samples per batch. 34 | - seed: Seed for random number generators for reproducibility. 35 | - n_gpu: Number of GPUs to use. 36 | - img_size: Size of the input images. 37 | - base_lr: Base learning rate for the optimizer. 38 | - deterministic: Flag to set training as deterministic. 39 | - n_skip: Number of skip connections in the model. 40 | - vit_name: Name of the Vision Transformer model configuration. 41 | - vit_patches_size: Size of patches for the ViT model. 42 | 43 | The script also loads and validates the model from a saved state if available and performs inference to evaluate the model on a test dataset. 44 | """ 45 | 46 | # Setup command-line argument parsing 47 | parser = argparse.ArgumentParser() 48 | parser.add_argument('--volume_path', type=str, 49 | default='../data', help='root dir for validation volume data') 50 | parser.add_argument('--dataset', type=str, 51 | default='CSANet', help='experiment_name') 52 | parser.add_argument('--num_classes', type=int, 53 | default=5, help='output channel of network') 54 | parser.add_argument('--list_dir', type=str, 55 | default='./lists', help='list dir') 56 | parser.add_argument('--max_iterations', type=int, 57 | default=300000, help='maximum epoch number to train') 58 | parser.add_argument('--max_epochs', type=int, 59 | default=40, help='maximum epoch number to train') 60 | parser.add_argument('--batch_size', type=int, 61 | default=16, help='batch_size per gpu') 62 | parser.add_argument('--seed', type=int, 63 | default=1234, help='random seed') 64 | parser.add_argument('--n_gpu', type=int, default=1, help='total gpu') 65 | parser.add_argument('--img_size', type=int, 66 | default=224, help='input patch size of network input') 67 | parser.add_argument('--base_lr', type=float, default=0.001, 68 | help='segmentation network learning rate') 69 | parser.add_argument('--deterministic', type=int, default=1, 70 | help='whether use deterministic training') 71 | parser.add_argument('--n_skip', type=int, 72 | default=3, help='using number of skip-connect, default is num') 73 | parser.add_argument('--vit_name', type=str, 74 | default='R50-ViT-B_16', help='select one vit model') 75 | parser.add_argument('--vit_patches_size', type=int, 76 | default=16, help='vit_patches_size, default is 16') 77 | 78 | args = parser.parse_args() 79 | 80 | 81 | 82 | 83 | def vol_inference(args, model, test_save_path=None, validation=False): 84 | """ 85 | Performs inference on a test dataset, computes performance metrics such as Dice coefficients and distances, 86 | and can operate in validation mode or test mode based on a flag. 87 | 88 | Parameters: 89 | - args (Namespace): Contains all the necessary settings such as dataset paths, number of classes, image size, etc. 90 | - model (torch.nn.Module): The trained model to be evaluated. 91 | - test_save_path (str, optional): Path where test outputs (such as images) can be saved. 92 | - validation (bool, optional): If True, function returns average Dice coefficient for validation purposes. 93 | If False, returns a string message indicating test completion. 94 | 95 | Returns: 96 | - float: If validation is True, returns the average Dice coefficient. 97 | - str: If validation is False, returns a completion message "Testing Finished!" 98 | 99 | The function logs the number of test iterations, processes each test sample to compute Dice coefficients and distances, 100 | and aggregates these metrics across the dataset for reporting or validation. 101 | """ 102 | # Load the test dataset 103 | db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir) 104 | testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1) 105 | num = len(testloader) 106 | logging.info("{} test iterations per epoch".format(len(testloader))) 107 | model.eval() 108 | metric_list = 0.0 109 | 110 | # Initialize metrics storage 111 | total_dice_coeff1, total_dice_coeff2, total_dice_coeff3, total_dice_coeff4 = 0, 0, 0, 0 112 | total_dist1, total_dist2, total_dist3, total_dist4 = 0, 0, 0, 0 113 | 114 | # Process each batch in the test loader 115 | for i_batch, sampled_batch in tqdm(enumerate(testloader)): 116 | # Retrieve image and label from the batch 117 | image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'] 118 | image_next, image_prev = sampled_batch['next_image'], sampled_batch['prev_image'] 119 | 120 | dice1, dice2, dice3, dice4, dist1, dist2,dist3, dist4 = test_single_volume(image_next, image, image_prev, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size], 121 | test_save_path=test_save_path, case=case_name) 122 | # Output the metrics for monitoring 123 | print("dice1 = ",dice1, " dice2 = ", dice2, "dice3= ",dice3,"dice4= ", dice4) 124 | total_dice_coeff1 = total_dice_coeff1 + dice1 125 | total_dice_coeff2 = total_dice_coeff2 + dice2 126 | total_dice_coeff3 = total_dice_coeff3 + dice3 127 | total_dice_coeff4 = total_dice_coeff4 + dice4 128 | total_dist1 = total_dist1 + dist1 129 | total_dist2 = total_dist2 + dist2 130 | total_dist3 = total_dist3 + dist3 131 | total_dist4 = total_dist4 + dist4 132 | 133 | # Calculate average metrics for all cases 134 | print(f"dice1={total_dice_coeff1/num}, dice2={total_dice_coeff2/num}, dice3={total_dice_coeff3/num}, dice4={total_dice_coeff4/num}, hd1={total_dist1/num}, hd2={total_dist2/num}, hd3={total_dist3/num}, hd4={total_dist4/num}") 135 | avg_dice = (total_dice_coeff1 + total_dice_coeff2 + total_dice_coeff3 + total_dice_coeff4) / (4*num) 136 | print("avg_dice = ",avg_dice) 137 | # Return the appropriate result based on the validation flag 138 | if validation: 139 | return avg_dice 140 | else: 141 | return "Testing Finished!" 142 | 143 | 144 | 145 | 146 | if __name__ == "__main__": 147 | # Setup GPU/CPU seeds for reproducibility if deterministic mode is enabled 148 | if not args.deterministic: 149 | cudnn.benchmark = True 150 | cudnn.deterministic = False 151 | else: 152 | cudnn.benchmark = False 153 | cudnn.deterministic = True 154 | random.seed(args.seed) 155 | np.random.seed(args.seed) 156 | torch.manual_seed(args.seed) 157 | torch.cuda.manual_seed(args.seed) 158 | 159 | dataset_name = args.dataset 160 | # Load dataset configuration based on the provided dataset name 161 | dataset_config = { 162 | 'CSANet': { 163 | 'Dataset': CSANet_dataset, 164 | 'root_path': '../data/train_npz', 165 | 'volume_path': '../data/', 166 | 'list_dir': './lists', 167 | 'num_classes': 5, 168 | 'z_spacing': 1, 169 | }, 170 | } 171 | 172 | 173 | if args.batch_size != 24 and args.batch_size % 6 == 0: 174 | args.base_lr *= args.batch_size / 24 175 | 176 | args.num_classes = dataset_config[dataset_name]['num_classes'] 177 | args.list_dir = dataset_config[dataset_name]['list_dir'] 178 | args.is_pretrain = True 179 | args.Dataset = dataset_config[dataset_name]['Dataset'] 180 | args.exp = 'CSANet_' + str(args.img_size) 181 | 182 | 183 | 184 | snapshot_path = "../model/{}/{}".format(args.exp, 'TU') 185 | snapshot_path += '_' + args.vit_name 186 | snapshot_path = snapshot_path + '_skip' + str(args.n_skip) 187 | snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path 188 | snapshot_path = snapshot_path+'_'+str(args.max_iterations)[0:2]+'k' if args.max_iterations != 30000 else snapshot_path 189 | snapshot_path = snapshot_path + '_epo' +str(args.max_epochs) if args.max_epochs != 30 else snapshot_path 190 | snapshot_path = snapshot_path+'_bs'+str(args.batch_size) 191 | snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 0.01 else snapshot_path 192 | 193 | # Initialize and load the ViT model from the specified configuration and saved state 194 | config_vit = CONFIGS_ViT_seg[args.vit_name] 195 | config_vit.n_classes = args.num_classes 196 | config_vit.n_skip = args.n_skip 197 | config_vit.patches.size = (args.vit_patches_size, args.vit_patches_size) 198 | if args.vit_name.find('R50') !=-1: 199 | config_vit.patches.grid = (int(args.img_size/args.vit_patches_size), int(args.img_size/args.vit_patches_size)) 200 | net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes).cuda() 201 | 202 | snapshot = os.path.join(snapshot_path, 'best_model.pth') 203 | if not os.path.exists(snapshot): snapshot = snapshot.replace('best_model', 'epoch_'+str(args.max_epochs-1)) 204 | net.load_state_dict(torch.load(snapshot)) 205 | snapshot_name = snapshot_path.split('/')[-1] 206 | log_folder = './test_log/test_log_' + args.exp 207 | 208 | os.makedirs(log_folder, exist_ok=True) 209 | # Setup logging and initiate volume inference 210 | logging.basicConfig(filename=log_folder + '/'+snapshot_name+".txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') 211 | logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) 212 | logging.info(str(args)) 213 | logging.info(snapshot_name) 214 | 215 | vol_inference(args, net, validation=False) 216 | 217 | -------------------------------------------------------------------------------- /CSANet/train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import random 5 | import numpy as np 6 | import torch 7 | import torch.backends.cudnn as cudnn 8 | from networks.vit_seg_modeling import VisionTransformer as ViT_seg 9 | from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg 10 | from trainer import trainer_CSANet 11 | from datasets.dataset_CSANet import CSANet_dataset 12 | 13 | 14 | 15 | """ 16 | This script initializes and runs training for the CSANet segmentation model using Vision Transformer (ViT) architecture. 17 | It configures the training environment, sets up the data loading for a medical imaging dataset, and initializes the model 18 | with predefined or specified hyperparameters. The script is designed to be run with command-line arguments that allow 19 | customization of various parameters including data paths, model specifics, and training settings. 20 | 21 | Command-Line Arguments: 22 | - root_path: Directory containing training data. 23 | - dataset: Identifier for the dataset used, affecting certain preset configurations. 24 | - list_dir: Directory containing lists of training data specifics. 25 | - num_classes: Number of classes for segmentation. 26 | - volume_path: Path to validation data for model evaluation. 27 | - max_iterations: Total number of iterations the training should run. 28 | - max_epochs: Maximum number of epochs for which the model trains. 29 | - batch_size: Number of samples in each batch. 30 | - n_gpu: Number of GPUs available for training. 31 | - deterministic: Flag to ensure deterministic results, useful for reproducibility. 32 | - base_lr: Base learning rate for the optimizer. 33 | - img_size: Dimensions of the input images for the model. 34 | - seed: Random seed for initialization to ensure reproducibility. 35 | - n_skip: Number of skip connections in the ViT model. 36 | - vit_name: Name of the Vision Transformer configuration to be used. 37 | - vit_patches_size: Size of patches used in the ViT model. 38 | 39 | The script supports customization of the training process through these parameters and uses a pre-defined configuration 40 | for setting up the model, dataset, and training operations based on the provided dataset name. 41 | """ 42 | 43 | # Setup command-line interface 44 | parser = argparse.ArgumentParser() 45 | parser.add_argument('--root_path', type=str, 46 | default='../data/train_npz', help='root dir for data') 47 | parser.add_argument('--dataset', type=str, 48 | default='CSANet', help='experiment_name') 49 | parser.add_argument('--list_dir', type=str, 50 | default='./lists', help='list dir') 51 | parser.add_argument('--num_classes', type=int, 52 | default=5, help='output channel of network') # class change ----------------- 53 | parser.add_argument('--volume_path', type=str, 54 | default='../data', help='root dir for validation volume data') 55 | parser.add_argument('--max_iterations', type=int, 56 | default=300000, help='maximum epoch number to train') 57 | parser.add_argument('--max_epochs', type=int, 58 | default=40, help='maximum epoch number to train') 59 | parser.add_argument('--batch_size', type=int, 60 | default=16, help='batch_size per gpu') 61 | parser.add_argument('--n_gpu', type=int, default=1, help='total gpu') 62 | parser.add_argument('--deterministic', type=int, default=1, 63 | help='whether use deterministic training') 64 | parser.add_argument('--base_lr', type=float, default=0.001, 65 | help='segmentation network learning rate') 66 | parser.add_argument('--img_size', type=int, 67 | default=224, help='input patch size of network input') 68 | parser.add_argument('--seed', type=int, 69 | default=1234, help='random seed') 70 | parser.add_argument('--n_skip', type=int, 71 | default=3, help='using number of skip-connect, default is num') 72 | parser.add_argument('--vit_name', type=str, 73 | default='R50-ViT-B_16', help='select one vit model') 74 | parser.add_argument('--vit_patches_size', type=int, 75 | default=16, help='vit_patches_size, default is 16') 76 | 77 | args = parser.parse_args() 78 | 79 | 80 | if __name__ == "__main__": 81 | # Configure deterministic behavior for reproducibility if specified 82 | if not args.deterministic: 83 | cudnn.benchmark = True 84 | cudnn.deterministic = False 85 | else: 86 | cudnn.benchmark = False 87 | cudnn.deterministic = True 88 | 89 | random.seed(args.seed) 90 | np.random.seed(args.seed) 91 | torch.manual_seed(args.seed) 92 | torch.cuda.manual_seed(args.seed) 93 | dataset_name = args.dataset 94 | dataset_config = { 95 | 'CSANet': { 96 | 'Dataset': CSANet_dataset, 97 | 'root_path': '../data/train_npz', 98 | 'volume_path': '../data', 99 | 'list_dir': './lists', 100 | 'num_classes': 5, 101 | 'z_spacing': 1, 102 | }, 103 | } 104 | if args.batch_size != 24 and args.batch_size % 6 == 0: 105 | args.base_lr *= args.batch_size / 24 106 | args.num_classes = dataset_config[dataset_name]['num_classes'] 107 | 108 | args.root_path = dataset_config[dataset_name]['root_path'] 109 | args.list_dir = dataset_config[dataset_name]['list_dir'] 110 | args.is_pretrain = True 111 | args.Dataset = dataset_config[dataset_name]['Dataset'] 112 | args.exp = 'CSANet_'+ str(args.img_size) 113 | 114 | # Build snapshot path based on the configuration and command-line arguments 115 | snapshot_path = "../model/{}/{}".format(args.exp, 'TU') 116 | snapshot_path += '_' + args.vit_name 117 | snapshot_path = snapshot_path + '_skip' + str(args.n_skip) 118 | snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path 119 | snapshot_path = snapshot_path+'_'+str(args.max_iterations)[0:2]+'k' if args.max_iterations != 30000 else snapshot_path 120 | snapshot_path = snapshot_path + '_epo' +str(args.max_epochs) if args.max_epochs != 30 else snapshot_path 121 | snapshot_path = snapshot_path+'_bs'+str(args.batch_size) 122 | snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 0.01 else snapshot_path 123 | 124 | print("snapshot path = ", snapshot_path) 125 | if not os.path.exists(snapshot_path): 126 | os.makedirs(snapshot_path) 127 | 128 | # Load Vision Transformer with the specific configuration 129 | config_vit = CONFIGS_ViT_seg[args.vit_name] 130 | config_vit.n_classes = args.num_classes 131 | config_vit.n_skip = args.n_skip 132 | if args.vit_name.find('R50') != -1: 133 | config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) 134 | net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes).cuda() 135 | # Load initial weights if pretrained path is provided 136 | net.load_from(weights=np.load(config_vit.pretrained_path)) 137 | # Start training using the specified trainer for the dataset 138 | trainer = {'CSANet': trainer_CSANet} 139 | trainer[dataset_name](args, net, snapshot_path) 140 | -------------------------------------------------------------------------------- /CSANet/trainer.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import random 5 | import sys 6 | import time 7 | import numpy as np 8 | import torch 9 | import torch.nn as nn 10 | import torch.optim as optim 11 | from tensorboardX import SummaryWriter 12 | from torch.nn.modules.loss import CrossEntropyLoss 13 | from torch.utils.data import DataLoader 14 | from tqdm import tqdm 15 | from loss_function import DiceLoss 16 | from torchvision import transforms 17 | from PIL import Image 18 | import matplotlib.pyplot as plt 19 | from utils import test_single_volume 20 | from visualization import save_visualization 21 | import cv2 22 | import torch.backends.cudnn as cudnn 23 | from datasets.dataset_CSANet import CSANet_dataset, RandomGenerator 24 | from test import vol_inference 25 | 26 | 27 | 28 | 29 | 30 | 31 | def trainer_CSANet(args, model, snapshot_path): 32 | 33 | """ 34 | Trains the CSANet model with the specified parameters and dataset, performing evaluations and saving the model state based on performance metrics. 35 | 36 | Parameters: 37 | - args (Namespace): Configuration containing all settings for the training process, such as dataset paths, learning rates, batch sizes, and more. 38 | - model (torch.nn.Module): The neural network model to be trained. 39 | - snapshot_path (str): Directory path where training snapshots (model states and logs) will be saved. 40 | 41 | Returns: 42 | - str: A message indicating that training has finished. 43 | 44 | The function initializes training setup, logs configurations, and enters a training loop where it continually feeds data through the model, computes losses, updates the model's weights, and logs the results. It also evaluates the model periodically using the `vol_inference` function and saves the model state when performance improves. The summary of training progress is saved using TensorBoard. 45 | """ 46 | 47 | # Configure logging 48 | logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, 49 | format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') 50 | logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) 51 | logging.info(str(args)) 52 | 53 | # Set training parameters from args 54 | base_lr = args.base_lr 55 | num_classes = args.num_classes 56 | batch_size = args.batch_size * args.n_gpu 57 | 58 | # Initialize dataset and dataloader 59 | db_train = CSANet_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train_image", 60 | transform=transforms.Compose( 61 | [RandomGenerator(output_size=[args.img_size, args.img_size])])) 62 | print("The length of train set is: {}".format(len(db_train))) 63 | 64 | def worker_init_fn(worker_id): 65 | # Seed each worker for reproducibility 66 | random.seed(args.seed + worker_id) 67 | 68 | trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, 69 | worker_init_fn=worker_init_fn) 70 | # Use DataParallel for multi-GPU training 71 | if args.n_gpu > 1: 72 | model = nn.DataParallel(model) 73 | model.train() 74 | 75 | # Define loss functions and optimizer 76 | ce_loss = CrossEntropyLoss() 77 | dice_loss = DiceLoss(num_classes) 78 | optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) 79 | 80 | # Initialize TensorBoard writer 81 | writer = SummaryWriter(snapshot_path + '/log') 82 | iter_num = 0 83 | max_epoch = args.max_epochs 84 | max_iterations = args.max_epochs * len(trainloader) 85 | logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations)) 86 | best_performance = 0.0 87 | folder_path = "./training_result" 88 | if not os.path.exists(folder_path): 89 | os.makedirs(folder_path) 90 | 91 | #vol_inference(args, model, validation=False) 92 | # Training loop 93 | iterator = tqdm(range(max_epoch), ncols=70) 94 | for epoch_num in iterator: 95 | model.train() 96 | for i_batch, sampled_batch in enumerate(trainloader): 97 | image_batch, label_batch = sampled_batch['image'], sampled_batch['label'] 98 | image_next, image_prev = sampled_batch['next_image'], sampled_batch['prev_image'] 99 | 100 | # Ensure all tensors are on the same device 101 | image_batch, label_batch = image_batch.cuda(), label_batch.cuda() 102 | image_next_batch, image_prev_batch = image_next.cuda(), image_prev.cuda() 103 | 104 | # Forward pass 105 | outputs = model(image_prev_batch, image_batch, image_next_batch) 106 | 107 | # Calculate loss 108 | loss_ce = ce_loss(outputs, label_batch[:].long()) 109 | loss_dice = dice_loss(outputs, label_batch, softmax=True) 110 | loss = 0.5 * loss_ce + 0.5 * loss_dice 111 | 112 | # Backpropagation 113 | optimizer.zero_grad() 114 | loss.backward() 115 | optimizer.step() 116 | 117 | # Visualization and logging 118 | save_visualization(outputs, label_batch, epoch_num, i_batch) 119 | 120 | # Learning rate adjustment 121 | lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 122 | for param_group in optimizer.param_groups: 123 | param_group['lr'] = lr_ 124 | 125 | iter_num = iter_num + 1 126 | writer.add_scalar('info/lr', lr_, iter_num) 127 | writer.add_scalar('info/total_loss', loss, iter_num) 128 | writer.add_scalar('info/loss_ce', loss_ce, iter_num) 129 | 130 | logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item())) 131 | 132 | # End-of-epoch validation and checkpointing 133 | if epoch_num > 10 and (epoch_num % 5 == 0 or epoch_num == 39): 134 | avg_dice = vol_inference(args, model, validation=True) 135 | if avg_dice > best_performance: 136 | best_performance = avg_dice 137 | save_mode_path = os.path.join(snapshot_path, 'best_model.pth') 138 | torch.save(model.state_dict(), save_mode_path) 139 | logging.info(f"Saved new best model to {save_mode_path}") 140 | 141 | vol_inference(args, model, validation=False) 142 | writer.close() 143 | return "Training Finished!" 144 | -------------------------------------------------------------------------------- /CSANet/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from medpy import metric 4 | from scipy.ndimage import zoom 5 | import torch.nn as nn 6 | import SimpleITK as sitk 7 | import os 8 | import nibabel as nib 9 | from skimage.measure import label, regionprops 10 | import scipy.ndimage as ndi 11 | import math 12 | 13 | 14 | def computing_COM_distance(mask_array, pred_array, US_indicies, spacing): 15 | 16 | """ 17 | Computes the average physical distance between the centers of mass (COM) of corresponding predicted and ground truth masks. 18 | 19 | Parameters: 20 | - mask_array (np.array): Array of ground truth masks. 21 | - pred_array (np.array): Array of predicted masks. 22 | - US_indicies (list of int): Indices of the mask slices to be processed. 23 | - spacing (tuple of float): Physical spacing between pixels in the masks. 24 | 25 | Returns: 26 | - float: Mean physical distance between centers of mass across specified mask slices. 27 | """ 28 | 29 | dist = [] 30 | for num, US_num in enumerate(US_indicies): 31 | predicted_mask = pred_array[US_num].astype('uint8') 32 | ground_truth_mask = mask_array[US_num].astype('uint8') 33 | cy_hist, cx_hist = ndi.center_of_mass(predicted_mask) 34 | cy_us, cx_us = ndi.center_of_mass(ground_truth_mask) 35 | temp = math.dist([cx_hist, cy_hist], [cx_us, cy_us]) 36 | phy_temp = temp * spacing[0] 37 | dist.append(phy_temp) 38 | distances = np.array(dist) 39 | 40 | if distances.size > 0: 41 | percentile_95 = np.percentile(distances, 95) 42 | mean_of_95th_percentile = np.mean(distances[distances <= percentile_95]) 43 | else: 44 | percentile_95 = 0 45 | mean_of_95th_percentile = 0 46 | return mean_of_95th_percentile 47 | 48 | 49 | 50 | 51 | def Dice_cal(image1, image2): 52 | 53 | """ 54 | Calculates Dice coefficients for multiple class labels between two images. 55 | 56 | Parameters: 57 | - image1 (SimpleITK.Image): First image for comparison. 58 | - image2 (SimpleITK.Image): Second image for comparison. 59 | 60 | Returns: 61 | - tuple: Dice coefficients for each class label (1, 2, 3, 4). 62 | """ 63 | 64 | class_labels = [1 , 2, 3, 4] 65 | for num_labels in class_labels: 66 | # Create binary masks for each class 67 | mask1 = sitk.Cast(image1 == num_labels, sitk.sitkInt32) 68 | mask2 = sitk.Cast(image2 == num_labels, sitk.sitkInt32) 69 | 70 | overlap_filter = sitk.LabelOverlapMeasuresImageFilter() 71 | overlap_filter.Execute(mask1, mask2) 72 | 73 | if num_labels == 1: 74 | dice_coeff_1 = overlap_filter.GetDiceCoefficient() 75 | elif num_labels == 2: 76 | dice_coeff_2 = overlap_filter.GetDiceCoefficient() 77 | elif num_labels == 3: 78 | dice_coeff_3 = overlap_filter.GetDiceCoefficient() 79 | elif num_labels == 4: 80 | dice_coeff_4 = overlap_filter.GetDiceCoefficient() 81 | 82 | return dice_coeff_1, dice_coeff_2, dice_coeff_3, dice_coeff_4 83 | 84 | def compute_class_hausdorff(labels, outputs, class_index, spacing): 85 | 86 | """ 87 | Computes the Hausdorff distance for a specific class based on its segmentation masks. 88 | 89 | Parameters: 90 | - labels (np.array): Array of ground truth labels for all classes. 91 | - outputs (np.array): Array of predicted labels for all classes. 92 | - class_index (int): Index of the class for which to compute the distance. 93 | - spacing (tuple of float): Physical spacing of the images. 94 | 95 | Returns: 96 | - float: Computed Hausdorff distance for the specified class. 97 | """ 98 | 99 | US_indicies = [] 100 | new_labels = labels[:,:,:,class_index] 101 | new_outputs = outputs[:,:,:,class_index] 102 | 103 | for z in range(new_labels.shape[0]): 104 | if np.sum(new_labels[z]) > 0 and np.sum(new_outputs[z]) > 0: 105 | US_indicies.append(z) 106 | 107 | hausdorff_dist = computing_COM_distance(new_labels, new_outputs, US_indicies, spacing) 108 | return hausdorff_dist 109 | 110 | 111 | def test_single_volume(image_next, image, image_prev, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None): 112 | 113 | """ 114 | Tests a single volume for segmentation using a deep learning model, and evaluates segmentation accuracy using Dice and Hausdorff distances. 115 | 116 | Parameters: 117 | - image_next, image_prev, image (np.array): Current, previous, and next slices of the image volume. 118 | - label (np.array): Ground truth labels for the current image slice. 119 | - net (torch.nn.Module): Neural network model used for segmentation. 120 | - classes (int): Number of segmentation classes. 121 | - patch_size (list of int): Size of the patches processed by the network. 122 | - test_save_path (str, optional): Path to save the segmentation results. 123 | - case (str, optional): Identifier for the case being tested. 124 | 125 | Returns: 126 | - tuple: Dice coefficients and Hausdorff distances for each class. 127 | """ 128 | # Convert tensors to numpy arrays for processing 129 | image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy() 130 | image_next, image_prev = image_next.squeeze(0).cpu().detach().numpy(), image_prev.squeeze(0).cpu().detach().numpy() 131 | 132 | # Initialize prediction array 133 | if len(image.shape) == 3: 134 | prediction = np.zeros_like(label) 135 | for ind in range(image.shape[0]): 136 | # Resize slices if necessary to match the network's expected input size 137 | slice = image[ind, :, :] 138 | slice_prev = image_prev[ind, :, :] 139 | slice_next = image_next[ind, :, :] 140 | 141 | x, y = slice.shape[0], slice.shape[1] 142 | if x != patch_size[0] or y != patch_size[1]: 143 | slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0 144 | slice_prev = zoom(slice_prev, (patch_size[0] / x, patch_size[1] / y), order=3) 145 | slice_next = zoom(slice_next, (patch_size[0] / x, patch_size[1] / y), order=3) 146 | 147 | # Convert slices to tensors and run through the network 148 | input_curr = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda() 149 | input_prev = torch.from_numpy(slice_prev).unsqueeze(0).unsqueeze(0).float().cuda() 150 | input_next = torch.from_numpy(slice_next).unsqueeze(0).unsqueeze(0).float().cuda() 151 | 152 | net.eval() 153 | 154 | with torch.no_grad(): 155 | outputs = net(input_prev, input_curr , input_next) 156 | out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0) 157 | out = out.cpu().detach().numpy() 158 | if x != patch_size[0] or y != patch_size[1]: 159 | pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0) 160 | else: 161 | pred = out 162 | prediction[ind] = pred 163 | else: 164 | # Handle single slice case 165 | input = torch.from_numpy(image).unsqueeze( 166 | 0).unsqueeze(0).float().cuda() 167 | net.eval() 168 | with torch.no_grad(): 169 | out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0) 170 | prediction = out.cpu().detach().numpy() 171 | 172 | 173 | 174 | 175 | # Prepare data for analysis and visualization 176 | Result_path = "./Result" 177 | if not os.path.exists(Result_path): 178 | os.makedirs(Result_path) 179 | num_case = case[0] 180 | test_vol_path = "../data/testVol" + "/" + num_case + ".nii.gz" 181 | vol_image = sitk.ReadImage(test_vol_path) 182 | 183 | # Create SimpleITK images from numpy arrays for evaluation 184 | pred_image = sitk.GetImageFromArray(prediction) 185 | pred_image.SetSpacing(vol_image.GetSpacing()) 186 | pred_image.SetDirection(vol_image.GetDirection()) 187 | pred_image.SetOrigin(vol_image.GetOrigin()) 188 | pred_path = './Result/' + num_case +'_segmentation.nii.gz' 189 | sitk.WriteImage(pred_image, pred_path) 190 | 191 | label_image = sitk.GetImageFromArray(label) 192 | label_image.SetSpacing(vol_image.GetSpacing()) 193 | label_image.SetDirection(vol_image.GetDirection()) 194 | label_image.SetOrigin(vol_image.GetOrigin()) 195 | label_path = './Result/' + num_case +'_label_segmentation.nii.gz' 196 | sitk.WriteImage(label_image, label_path) 197 | 198 | # Load ground truth mask for evaluation 199 | mask_path = "../data/testMask/"+num_case+".nii.gz" 200 | mask_img = sitk.ReadImage(mask_path) 201 | 202 | image1 = pred_image 203 | image2 = mask_img 204 | dice_coeff_1, dice_coeff_2, dice_coeff_3, dice_coeff_4 = 0.0, 0.0, 0.0, 0.0 205 | 206 | # Dice Coefficient Calculation 207 | dice_coeff_1, dice_coeff_2, dice_coeff_3, dice_coeff_4= Dice_cal(image1, image2) 208 | 209 | 210 | labels = np.eye(classes)[label] 211 | outputs = np.eye(classes)[prediction] 212 | spacing = vol_image.GetSpacing() 213 | 214 | # Hausdroff Distance Calculation 215 | hausdorff_dist_1 = compute_class_hausdorff(labels, outputs, 1, spacing) 216 | hausdorff_dist_2 = compute_class_hausdorff(labels, outputs, 2, spacing) 217 | hausdorff_dist_3 = compute_class_hausdorff(labels, outputs, 3, spacing) 218 | hausdorff_dist_4 = compute_class_hausdorff(labels, outputs, 4, spacing) 219 | 220 | return dice_coeff_1, dice_coeff_2, dice_coeff_3, dice_coeff_4, hausdorff_dist_1,hausdorff_dist_2, hausdorff_dist_3, hausdorff_dist_4 221 | -------------------------------------------------------------------------------- /CSANet/visualization.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | 6 | def save_visualization(outputs, label_batch, epoch_num, i_batch): 7 | """ 8 | Processes the outputs and label batch, and saves visualization of the predictions and labels. 9 | 10 | Parameters: 11 | - outputs (torch.Tensor): The output predictions from the model. 12 | - label_batch (torch.Tensor): The batch of ground truth labels. 13 | - epoch_num (int): Current epoch number. 14 | - i_batch (int): Current batch index. 15 | 16 | Saves images to disk showing the predicted segmentation and actual labels. 17 | """ 18 | outputs = torch.softmax(outputs, dim=1) 19 | outputs = torch.argmax(outputs, dim=1).squeeze(dim=1) 20 | rand_slice_out = outputs[0,:,:] 21 | rand_slice_out = rand_slice_out.cpu().detach().numpy() 22 | plt.imshow(rand_slice_out) 23 | plt.colorbar() 24 | path1 = f"./training_result/{epoch_num}_{i_batch}pred_image.png" 25 | plt.savefig(path1) 26 | plt.close() 27 | 28 | rand_label_slice = label_batch[0,:,:] 29 | rand_label_slice = rand_label_slice.cpu().detach().numpy() 30 | plt.imshow(rand_label_slice) 31 | plt.colorbar() 32 | path2 = f"./training_result/{epoch_num}_{i_batch}label_image.png" 33 | plt.savefig(path2) 34 | plt.close() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 mirth AI lab at UF 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CSA-Net: A Flexible 2.5D Medical Image Segmentation Approach with In-Slice and Cross-Slice Attention 2 | 3 |

4 | image 5 |
6 | Figure 1: Visual representation of the CSA-Net architecture. 7 |

8 | 9 | Official PyTorch implementation of: 10 | 11 | [A Flexible 2.5D Medical Image Segmentation Approach with In-Slice and Cross-Slice Attention](https://doi.org/10.1016/j.compbiomed.2024.109173) 12 | 13 | The code is only for research purposes. If you have any questions regarding how to use this code, feel free to contact Amarjeet Kumar (amarjeetkumar@ufl.edu). 14 | 15 | ## Requirements 16 | Python==3.9.16 17 | ```bash 18 | pip install -r requirements.txt 19 | ``` 20 | 21 | ## Usage 22 | 23 | ### 1. Download Google pre-trained ViT models 24 | *[Get pretrained vision transformer model using this link](https://console.cloud.google.com/storage/browser/vit_models/imagenet21k?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))&prefix=&forceOnObjectsSortingFiltering=false) : R50-ViT-B_16 25 | * Save your model into folder "model/vit_checkpoint/imagenet21k/". 26 | ```bash 27 | ../model/vit_checkpoint/imagenet21k/{MODEL_NAME}.npz 28 | ``` 29 | 30 | ### 2. Prepare data 31 | 32 | Here we use the ProstateX dataset as an example. This dataset consists of T2-weighted prostate MRI, along with expert-annotation of four different prostate regions: transition zone, peripheral zone, urethra, and anterior fibromuscular stroma. 33 | 34 | 1. Access to the ProstateX dataset: 35 | Sign up in the [official ProstateX website](https://www.cancerimagingarchive.net/collection/prostatex/) and download the dataset. Partition it in training and testing dataset as :- trainVol, trainMask, testVol, testMask. Put these folders under data directory. 36 | * Run the preprocessing script, which would generate train_npz folder containing 2D images in folder "data/", data list files in folder "lists/" and train.csv for overview. 37 | ``` 38 | python preprocessing.py 39 | ``` 40 | OR You can download our preprocessed dataset using this [link](https://drive.google.com/drive/folders/1qAkX34E_5kP-2pKDI0RChqWKfTNl1FVQ?usp=sharing). After downloading, copy the "lists" directory from utils to "/CSANet" path to store text files containing the names of all samples for each task. 41 | 42 | The directory structure of the whole project is as follows: 43 | 44 | ```bash 45 | . 46 | ├── CSANet 47 | ├── model 48 | │ └── vit_checkpoint 49 | │ └── imagenet21k 50 | │ └── R50+ViT-B_16.npz 51 | │ 52 | └── data 53 | ├── trainVol 54 | ├── trainMask 55 | └── testVol 56 | ├── testMask 57 | └── train_npz 58 | ``` 59 | 60 | ### 3. Train/Test 61 | * Please go to the folder "CSANet/" and it's ready for you to train and test the model. 62 | ``` 63 | python train.py 64 | python test.py 65 | ``` 66 | You can see the test outputs in the "Results/" folder. 67 | 68 | ## Reference 69 | * [Google ViT](https://github.com/google-research/vision_transformer) 70 | * [ViT-pytorch](https://github.com/jeonsworld/ViT-pytorch) 71 | * [segmentation_models.pytorch](https://github.com/qubvel/segmentation_models.pytorch) 72 | 73 | This project incorporates concepts and implementations based on the following research papers and their corresponding code repositories: 74 | - "TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation": [Paper](https://arxiv.org/pdf/2102.04306) | [GitHub Repository](https://github.com/Beckschen/TransUNet) 75 | - "Non-local Neural Networks": [Paper](https://arxiv.org/abs/1711.07971) | [GitHub Repository](https://github.com/facebookresearch/video-nonlocal-net) 76 | 77 | 78 | ## Citations 79 | Kindly cite our paper as follows if you use our code. 80 | ```bibtex 81 | @misc{kumar2024CSANet, 82 | title={A Flexible 2.5D Medical Image Segmentation Approach with In-Slice and Cross-Slice Attention}, 83 | author={Amarjeet Kumar and Hongxu Jiang and Muhammad Imran and Cyndi Valdes and Gabriela Leon and Dahyun Kang and Parvathi Nataraj and Yuyin Zhou and Michael D. Weiss and Wei Shao}, 84 | journal={Computers in Biology and Medicine}, 85 | volume={182}, 86 | pages={109173}, 87 | year={2024}, 88 | publisher={Elsevier} 89 | } 90 | ``` 91 | -------------------------------------------------------------------------------- /data/README.md: -------------------------------------------------------------------------------- 1 | ### Folder Should be structured like below format. 2 | ![image](https://github.com/mirthAI/CSA-Net/assets/26433669/e0f04241-f051-49d7-a2bb-b50125359498) 3 | 4 | -------------------------------------------------------------------------------- /model/vit_checkpoint/imagenet21k/README.md: -------------------------------------------------------------------------------- 1 | . 2 | -------------------------------------------------------------------------------- /preprocessing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import SimpleITK as sitk 4 | import pandas as pd 5 | import csv 6 | import re 7 | import cv2 8 | def ensure_directory_exists(path): 9 | """ 10 | Ensures that a directory exists; if not, creates it. 11 | """ 12 | if not os.path.exists(path): 13 | os.makedirs(path) 14 | print(f"Created directory: {path}") 15 | else: 16 | print(f"Directory already exists: {path}") 17 | 18 | def ensure_file_exists(file_path): 19 | """ 20 | Ensures that a file exists; if not, creates an empty file. 21 | """ 22 | if not os.path.exists(file_path): 23 | with open(file_path, 'w') as file: 24 | file.write("") 25 | print(f"Created file: {file_path}") 26 | else: 27 | print(f"File already exists: {file_path}") 28 | 29 | # Directories and paths setup 30 | vol_dir = "data/trainVol" 31 | mask_dir = "data/trainMask" 32 | path_to_scan = os.path.join(os.getcwd(), vol_dir) 33 | mask_path_to_scan = os.path.join(os.getcwd(), mask_dir) 34 | 35 | save_image_path = "data/train_npz/trainingImages" 36 | save_mask_path = "data/train_npz/trainingMasks" 37 | ensure_directory_exists(save_image_path) 38 | ensure_directory_exists(save_mask_path) 39 | 40 | # Process volume files 41 | for file in os.listdir(path_to_scan): 42 | if not file.startswith('.'): 43 | vol_path = os.path.join(path_to_scan, file) 44 | image = sitk.ReadImage(vol_path) 45 | img = sitk.GetArrayFromImage(image) 46 | number = file.split('.')[0] 47 | for i in range(img.shape[0]): 48 | img_array = img[i, :, :].astype(np.uint8) 49 | clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) 50 | img_array = clahe.apply(img_array) 51 | p1 = np.percentile(img_array, 1) 52 | p99 = np.percentile(img_array, 99) 53 | 54 | normalized_img = (img_array - p1) / (p99 - p1) 55 | normalized_img = np.clip(normalized_img, 0, 1) 56 | slice_name = f"{number}_vol_slice_{i}.npy" 57 | slice_path = os.path.join(save_image_path, slice_name) 58 | np.save(slice_path, normalized_img) 59 | 60 | # Process mask files 61 | for file in os.listdir(mask_path_to_scan): 62 | if not file.startswith('._'): 63 | mask_path = os.path.join(mask_path_to_scan, file) 64 | mask = sitk.ReadImage(mask_path) 65 | mask_img = sitk.GetArrayFromImage(mask) 66 | number = file.split('.')[0] 67 | for i in range(mask_img.shape[0]): 68 | slice_name = f"{number}_mask_slice_{i}.npy" 69 | slice_path = os.path.join(save_mask_path, slice_name) 70 | np.save(slice_path, mask_img[i,:,:]) 71 | 72 | # Create CSV file for training data 73 | if not os.path.exists("CSANet/lists"): 74 | os.makedirs("CSANet/lists") 75 | csv_filename = "CSANet/lists/train.csv" 76 | with open(csv_filename, mode='w', newline='') as csv_file: 77 | csv_writer = csv.writer(csv_file) 78 | csv_writer.writerow(['image', 'mask']) 79 | for file in os.listdir(path_to_scan): 80 | if not file.startswith('._'): 81 | number = file.split('.')[0] 82 | for i in range(img.shape[0]): 83 | seg_file_name = f"{number}_mask_slice_{i}.npy" 84 | vol_file_name = f"{number}_vol_slice_{i}.npy" 85 | csv_writer.writerow([vol_file_name, seg_file_name]) 86 | 87 | # Ensure essential files exist 88 | image_list_path = "CSANet/lists/train_image.txt" 89 | mask_list_path = "CSANet/lists/train_mask.txt" 90 | vol_list_path = "CSANet/lists/test_vol.txt" 91 | ensure_file_exists(image_list_path) 92 | ensure_file_exists(mask_list_path) 93 | ensure_file_exists(vol_list_path) 94 | 95 | # Generate file lists for image and mask 96 | def generate_file_list(data, key, file_path): 97 | num = data[key].values.size 98 | names = [data[key].values[i].split('.')[0] for i in range(num)] 99 | with open(file_path, 'w') as f: 100 | for name in names: 101 | f.write(f"{name}\n") 102 | 103 | # Creating file lists 104 | data = pd.read_csv(csv_filename) 105 | generate_file_list(data, 'image', image_list_path) 106 | generate_file_list(data, 'mask', mask_list_path) 107 | 108 | # List files for test volumes 109 | folder_path = 'data/testVol' 110 | files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))] 111 | with open(vol_list_path, 'w') as file: 112 | for file_name in files: 113 | if not file_name.startswith('._'): 114 | file.write(file_name + '\n') 115 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy=1.23.4 2 | pandas=1.5.2 3 | SimpleITK=2.2.1 4 | torch=1.10.1 5 | scipy 6 | matplotlib=3.6.2 7 | opencv-python 8 | nibabel=3.2.1 9 | scikit-image=0.19.3 10 | torchvision=0.11.2 11 | cuda-toolkit=11.3.1 12 | cudnn=8.9.2.26 13 | pillow=9.2.0 14 | medpy 15 | sklearn=1.2.2 16 | tensorboardX 17 | SimpleITK 18 | python=3.9.16 19 | tensorboard 20 | 21 | 22 | -------------------------------------------------------------------------------- /utils/lists/test_vol.txt: -------------------------------------------------------------------------------- 1 | 309.nii.gz 2 | 238.nii.gz 3 | 209.nii.gz 4 | 201.nii.gz 5 | 251.nii.gz 6 | 227.nii.gz 7 | 199.nii.gz 8 | 241.nii.gz 9 | 217.nii.gz 10 | 323.nii.gz 11 | 296.nii.gz 12 | 334.nii.gz 13 | 254.nii.gz 14 | 320.nii.gz 15 | 249.nii.gz 16 | 275.nii.gz 17 | 283.nii.gz 18 | 288.nii.gz 19 | 331.nii.gz 20 | 297.nii.gz 21 | 304.nii.gz 22 | 340.nii.gz 23 | 234.nii.gz 24 | 219.nii.gz 25 | 332.nii.gz 26 | 293.nii.gz 27 | 239.nii.gz 28 | -------------------------------------------------------------------------------- /utils/lists/train_image.txt: -------------------------------------------------------------------------------- 1 | 130_vol_slice_0 2 | 130_vol_slice_1 3 | 130_vol_slice_2 4 | 130_vol_slice_3 5 | 130_vol_slice_4 6 | 130_vol_slice_5 7 | 130_vol_slice_6 8 | 130_vol_slice_7 9 | 130_vol_slice_8 10 | 130_vol_slice_9 11 | 130_vol_slice_10 12 | 130_vol_slice_11 13 | 130_vol_slice_12 14 | 130_vol_slice_13 15 | 130_vol_slice_14 16 | 130_vol_slice_15 17 | 130_vol_slice_16 18 | 130_vol_slice_17 19 | 130_vol_slice_18 20 | 130_vol_slice_19 21 | 130_vol_slice_20 22 | 130_vol_slice_21 23 | 130_vol_slice_22 24 | 125_vol_slice_0 25 | 125_vol_slice_1 26 | 125_vol_slice_2 27 | 125_vol_slice_3 28 | 125_vol_slice_4 29 | 125_vol_slice_5 30 | 125_vol_slice_6 31 | 125_vol_slice_7 32 | 125_vol_slice_8 33 | 125_vol_slice_9 34 | 125_vol_slice_10 35 | 125_vol_slice_11 36 | 125_vol_slice_12 37 | 125_vol_slice_13 38 | 125_vol_slice_14 39 | 125_vol_slice_15 40 | 125_vol_slice_16 41 | 125_vol_slice_17 42 | 125_vol_slice_18 43 | 125_vol_slice_19 44 | 125_vol_slice_20 45 | 12_vol_slice_0 46 | 12_vol_slice_1 47 | 12_vol_slice_2 48 | 12_vol_slice_3 49 | 12_vol_slice_4 50 | 12_vol_slice_5 51 | 12_vol_slice_6 52 | 12_vol_slice_7 53 | 12_vol_slice_8 54 | 12_vol_slice_9 55 | 12_vol_slice_10 56 | 12_vol_slice_11 57 | 12_vol_slice_12 58 | 12_vol_slice_13 59 | 12_vol_slice_14 60 | 12_vol_slice_15 61 | 12_vol_slice_16 62 | 12_vol_slice_17 63 | 12_vol_slice_18 64 | 65_vol_slice_0 65 | 65_vol_slice_1 66 | 65_vol_slice_2 67 | 65_vol_slice_3 68 | 65_vol_slice_4 69 | 65_vol_slice_5 70 | 65_vol_slice_6 71 | 65_vol_slice_7 72 | 65_vol_slice_8 73 | 65_vol_slice_9 74 | 65_vol_slice_10 75 | 65_vol_slice_11 76 | 65_vol_slice_12 77 | 65_vol_slice_13 78 | 65_vol_slice_14 79 | 65_vol_slice_15 80 | 65_vol_slice_16 81 | 65_vol_slice_17 82 | 65_vol_slice_18 83 | 136_vol_slice_0 84 | 136_vol_slice_1 85 | 136_vol_slice_2 86 | 136_vol_slice_3 87 | 136_vol_slice_4 88 | 136_vol_slice_5 89 | 136_vol_slice_6 90 | 136_vol_slice_7 91 | 136_vol_slice_8 92 | 136_vol_slice_9 93 | 136_vol_slice_10 94 | 136_vol_slice_11 95 | 136_vol_slice_12 96 | 136_vol_slice_13 97 | 136_vol_slice_14 98 | 136_vol_slice_15 99 | 136_vol_slice_16 100 | 136_vol_slice_17 101 | 136_vol_slice_18 102 | 136_vol_slice_19 103 | 136_vol_slice_20 104 | 117_vol_slice_0 105 | 117_vol_slice_1 106 | 117_vol_slice_2 107 | 117_vol_slice_3 108 | 117_vol_slice_4 109 | 117_vol_slice_5 110 | 117_vol_slice_6 111 | 117_vol_slice_7 112 | 117_vol_slice_8 113 | 117_vol_slice_9 114 | 117_vol_slice_10 115 | 117_vol_slice_11 116 | 117_vol_slice_12 117 | 117_vol_slice_13 118 | 117_vol_slice_14 119 | 117_vol_slice_15 120 | 117_vol_slice_16 121 | 117_vol_slice_17 122 | 117_vol_slice_18 123 | 117_vol_slice_19 124 | 117_vol_slice_20 125 | 117_vol_slice_21 126 | 117_vol_slice_22 127 | 134_vol_slice_0 128 | 134_vol_slice_1 129 | 134_vol_slice_2 130 | 134_vol_slice_3 131 | 134_vol_slice_4 132 | 134_vol_slice_5 133 | 134_vol_slice_6 134 | 134_vol_slice_7 135 | 134_vol_slice_8 136 | 134_vol_slice_9 137 | 134_vol_slice_10 138 | 134_vol_slice_11 139 | 134_vol_slice_12 140 | 134_vol_slice_13 141 | 134_vol_slice_14 142 | 134_vol_slice_15 143 | 134_vol_slice_16 144 | 134_vol_slice_17 145 | 134_vol_slice_18 146 | 134_vol_slice_19 147 | 134_vol_slice_20 148 | 193_vol_slice_0 149 | 193_vol_slice_1 150 | 193_vol_slice_2 151 | 193_vol_slice_3 152 | 193_vol_slice_4 153 | 193_vol_slice_5 154 | 193_vol_slice_6 155 | 193_vol_slice_7 156 | 193_vol_slice_8 157 | 193_vol_slice_9 158 | 193_vol_slice_10 159 | 193_vol_slice_11 160 | 193_vol_slice_12 161 | 193_vol_slice_13 162 | 193_vol_slice_14 163 | 193_vol_slice_15 164 | 193_vol_slice_16 165 | 193_vol_slice_17 166 | 193_vol_slice_18 167 | 9_vol_slice_0 168 | 9_vol_slice_1 169 | 9_vol_slice_2 170 | 9_vol_slice_3 171 | 9_vol_slice_4 172 | 9_vol_slice_5 173 | 9_vol_slice_6 174 | 9_vol_slice_7 175 | 9_vol_slice_8 176 | 9_vol_slice_9 177 | 9_vol_slice_10 178 | 9_vol_slice_11 179 | 9_vol_slice_12 180 | 9_vol_slice_13 181 | 9_vol_slice_14 182 | 9_vol_slice_15 183 | 9_vol_slice_16 184 | 9_vol_slice_17 185 | 9_vol_slice_18 186 | 9_vol_slice_19 187 | 9_vol_slice_20 188 | 107_vol_slice_0 189 | 107_vol_slice_1 190 | 107_vol_slice_2 191 | 107_vol_slice_3 192 | 107_vol_slice_4 193 | 107_vol_slice_5 194 | 107_vol_slice_6 195 | 107_vol_slice_7 196 | 107_vol_slice_8 197 | 107_vol_slice_9 198 | 107_vol_slice_10 199 | 107_vol_slice_11 200 | 107_vol_slice_12 201 | 107_vol_slice_13 202 | 107_vol_slice_14 203 | 107_vol_slice_15 204 | 107_vol_slice_16 205 | 107_vol_slice_17 206 | 107_vol_slice_18 207 | 107_vol_slice_19 208 | 107_vol_slice_20 209 | 107_vol_slice_21 210 | 107_vol_slice_22 211 | 54_vol_slice_0 212 | 54_vol_slice_1 213 | 54_vol_slice_2 214 | 54_vol_slice_3 215 | 54_vol_slice_4 216 | 54_vol_slice_5 217 | 54_vol_slice_6 218 | 54_vol_slice_7 219 | 54_vol_slice_8 220 | 54_vol_slice_9 221 | 54_vol_slice_10 222 | 54_vol_slice_11 223 | 54_vol_slice_12 224 | 54_vol_slice_13 225 | 54_vol_slice_14 226 | 54_vol_slice_15 227 | 54_vol_slice_16 228 | 54_vol_slice_17 229 | 54_vol_slice_18 230 | 54_vol_slice_19 231 | 54_vol_slice_20 232 | 39_vol_slice_0 233 | 39_vol_slice_1 234 | 39_vol_slice_2 235 | 39_vol_slice_3 236 | 39_vol_slice_4 237 | 39_vol_slice_5 238 | 39_vol_slice_6 239 | 39_vol_slice_7 240 | 39_vol_slice_8 241 | 39_vol_slice_9 242 | 39_vol_slice_10 243 | 39_vol_slice_11 244 | 39_vol_slice_12 245 | 39_vol_slice_13 246 | 39_vol_slice_14 247 | 39_vol_slice_15 248 | 39_vol_slice_16 249 | 39_vol_slice_17 250 | 39_vol_slice_18 251 | 39_vol_slice_19 252 | 39_vol_slice_20 253 | 39_vol_slice_21 254 | 39_vol_slice_22 255 | 39_vol_slice_23 256 | 44_vol_slice_0 257 | 44_vol_slice_1 258 | 44_vol_slice_2 259 | 44_vol_slice_3 260 | 44_vol_slice_4 261 | 44_vol_slice_5 262 | 44_vol_slice_6 263 | 44_vol_slice_7 264 | 44_vol_slice_8 265 | 44_vol_slice_9 266 | 44_vol_slice_10 267 | 44_vol_slice_11 268 | 44_vol_slice_12 269 | 44_vol_slice_13 270 | 44_vol_slice_14 271 | 44_vol_slice_15 272 | 44_vol_slice_16 273 | 44_vol_slice_17 274 | 44_vol_slice_18 275 | 70_vol_slice_0 276 | 70_vol_slice_1 277 | 70_vol_slice_2 278 | 70_vol_slice_3 279 | 70_vol_slice_4 280 | 70_vol_slice_5 281 | 70_vol_slice_6 282 | 70_vol_slice_7 283 | 70_vol_slice_8 284 | 70_vol_slice_9 285 | 70_vol_slice_10 286 | 70_vol_slice_11 287 | 70_vol_slice_12 288 | 70_vol_slice_13 289 | 70_vol_slice_14 290 | 70_vol_slice_15 291 | 70_vol_slice_16 292 | 70_vol_slice_17 293 | 70_vol_slice_18 294 | 70_vol_slice_19 295 | 70_vol_slice_20 296 | 72_vol_slice_0 297 | 72_vol_slice_1 298 | 72_vol_slice_2 299 | 72_vol_slice_3 300 | 72_vol_slice_4 301 | 72_vol_slice_5 302 | 72_vol_slice_6 303 | 72_vol_slice_7 304 | 72_vol_slice_8 305 | 72_vol_slice_9 306 | 72_vol_slice_10 307 | 72_vol_slice_11 308 | 72_vol_slice_12 309 | 72_vol_slice_13 310 | 72_vol_slice_14 311 | 72_vol_slice_15 312 | 72_vol_slice_16 313 | 72_vol_slice_17 314 | 72_vol_slice_18 315 | 168_vol_slice_0 316 | 168_vol_slice_1 317 | 168_vol_slice_2 318 | 168_vol_slice_3 319 | 168_vol_slice_4 320 | 168_vol_slice_5 321 | 168_vol_slice_6 322 | 168_vol_slice_7 323 | 168_vol_slice_8 324 | 168_vol_slice_9 325 | 168_vol_slice_10 326 | 168_vol_slice_11 327 | 168_vol_slice_12 328 | 168_vol_slice_13 329 | 168_vol_slice_14 330 | 168_vol_slice_15 331 | 168_vol_slice_16 332 | 168_vol_slice_17 333 | 168_vol_slice_18 334 | 56_vol_slice_0 335 | 56_vol_slice_1 336 | 56_vol_slice_2 337 | 56_vol_slice_3 338 | 56_vol_slice_4 339 | 56_vol_slice_5 340 | 56_vol_slice_6 341 | 56_vol_slice_7 342 | 56_vol_slice_8 343 | 56_vol_slice_9 344 | 56_vol_slice_10 345 | 56_vol_slice_11 346 | 56_vol_slice_12 347 | 56_vol_slice_13 348 | 56_vol_slice_14 349 | 56_vol_slice_15 350 | 56_vol_slice_16 351 | 56_vol_slice_17 352 | 56_vol_slice_18 353 | 56_vol_slice_19 354 | 56_vol_slice_20 355 | 142_vol_slice_0 356 | 142_vol_slice_1 357 | 142_vol_slice_2 358 | 142_vol_slice_3 359 | 142_vol_slice_4 360 | 142_vol_slice_5 361 | 142_vol_slice_6 362 | 142_vol_slice_7 363 | 142_vol_slice_8 364 | 142_vol_slice_9 365 | 142_vol_slice_10 366 | 142_vol_slice_11 367 | 142_vol_slice_12 368 | 142_vol_slice_13 369 | 142_vol_slice_14 370 | 142_vol_slice_15 371 | 142_vol_slice_16 372 | 142_vol_slice_17 373 | 142_vol_slice_18 374 | 142_vol_slice_19 375 | 142_vol_slice_20 376 | 94_vol_slice_0 377 | 94_vol_slice_1 378 | 94_vol_slice_2 379 | 94_vol_slice_3 380 | 94_vol_slice_4 381 | 94_vol_slice_5 382 | 94_vol_slice_6 383 | 94_vol_slice_7 384 | 94_vol_slice_8 385 | 94_vol_slice_9 386 | 94_vol_slice_10 387 | 94_vol_slice_11 388 | 94_vol_slice_12 389 | 94_vol_slice_13 390 | 94_vol_slice_14 391 | 94_vol_slice_15 392 | 94_vol_slice_16 393 | 94_vol_slice_17 394 | 94_vol_slice_18 395 | 7_vol_slice_0 396 | 7_vol_slice_1 397 | 7_vol_slice_2 398 | 7_vol_slice_3 399 | 7_vol_slice_4 400 | 7_vol_slice_5 401 | 7_vol_slice_6 402 | 7_vol_slice_7 403 | 7_vol_slice_8 404 | 7_vol_slice_9 405 | 7_vol_slice_10 406 | 7_vol_slice_11 407 | 7_vol_slice_12 408 | 7_vol_slice_13 409 | 7_vol_slice_14 410 | 7_vol_slice_15 411 | 7_vol_slice_16 412 | 7_vol_slice_17 413 | 7_vol_slice_18 414 | 7_vol_slice_19 415 | 7_vol_slice_20 416 | 7_vol_slice_21 417 | 7_vol_slice_22 418 | 144_vol_slice_0 419 | 144_vol_slice_1 420 | 144_vol_slice_2 421 | 144_vol_slice_3 422 | 144_vol_slice_4 423 | 144_vol_slice_5 424 | 144_vol_slice_6 425 | 144_vol_slice_7 426 | 144_vol_slice_8 427 | 144_vol_slice_9 428 | 144_vol_slice_10 429 | 144_vol_slice_11 430 | 144_vol_slice_12 431 | 144_vol_slice_13 432 | 144_vol_slice_14 433 | 144_vol_slice_15 434 | 144_vol_slice_16 435 | 144_vol_slice_17 436 | 144_vol_slice_18 437 | 144_vol_slice_19 438 | 144_vol_slice_20 439 | 118_vol_slice_0 440 | 118_vol_slice_1 441 | 118_vol_slice_2 442 | 118_vol_slice_3 443 | 118_vol_slice_4 444 | 118_vol_slice_5 445 | 118_vol_slice_6 446 | 118_vol_slice_7 447 | 118_vol_slice_8 448 | 118_vol_slice_9 449 | 118_vol_slice_10 450 | 118_vol_slice_11 451 | 118_vol_slice_12 452 | 118_vol_slice_13 453 | 118_vol_slice_14 454 | 118_vol_slice_15 455 | 118_vol_slice_16 456 | 118_vol_slice_17 457 | 118_vol_slice_18 458 | 118_vol_slice_19 459 | 118_vol_slice_20 460 | 118_vol_slice_21 461 | 118_vol_slice_22 462 | 183_vol_slice_0 463 | 183_vol_slice_1 464 | 183_vol_slice_2 465 | 183_vol_slice_3 466 | 183_vol_slice_4 467 | 183_vol_slice_5 468 | 183_vol_slice_6 469 | 183_vol_slice_7 470 | 183_vol_slice_8 471 | 183_vol_slice_9 472 | 183_vol_slice_10 473 | 183_vol_slice_11 474 | 183_vol_slice_12 475 | 183_vol_slice_13 476 | 183_vol_slice_14 477 | 183_vol_slice_15 478 | 183_vol_slice_16 479 | 183_vol_slice_17 480 | 183_vol_slice_18 481 | 183_vol_slice_19 482 | 183_vol_slice_20 483 | 121_vol_slice_0 484 | 121_vol_slice_1 485 | 121_vol_slice_2 486 | 121_vol_slice_3 487 | 121_vol_slice_4 488 | 121_vol_slice_5 489 | 121_vol_slice_6 490 | 121_vol_slice_7 491 | 121_vol_slice_8 492 | 121_vol_slice_9 493 | 121_vol_slice_10 494 | 121_vol_slice_11 495 | 121_vol_slice_12 496 | 121_vol_slice_13 497 | 121_vol_slice_14 498 | 121_vol_slice_15 499 | 121_vol_slice_16 500 | 121_vol_slice_17 501 | 121_vol_slice_18 502 | 121_vol_slice_19 503 | 121_vol_slice_20 504 | 26_vol_slice_0 505 | 26_vol_slice_1 506 | 26_vol_slice_2 507 | 26_vol_slice_3 508 | 26_vol_slice_4 509 | 26_vol_slice_5 510 | 26_vol_slice_6 511 | 26_vol_slice_7 512 | 26_vol_slice_8 513 | 26_vol_slice_9 514 | 26_vol_slice_10 515 | 26_vol_slice_11 516 | 26_vol_slice_12 517 | 26_vol_slice_13 518 | 26_vol_slice_14 519 | 26_vol_slice_15 520 | 26_vol_slice_16 521 | 26_vol_slice_17 522 | 26_vol_slice_18 523 | 26_vol_slice_19 524 | 26_vol_slice_20 525 | 60_vol_slice_0 526 | 60_vol_slice_1 527 | 60_vol_slice_2 528 | 60_vol_slice_3 529 | 60_vol_slice_4 530 | 60_vol_slice_5 531 | 60_vol_slice_6 532 | 60_vol_slice_7 533 | 60_vol_slice_8 534 | 60_vol_slice_9 535 | 60_vol_slice_10 536 | 60_vol_slice_11 537 | 60_vol_slice_12 538 | 60_vol_slice_13 539 | 60_vol_slice_14 540 | 60_vol_slice_15 541 | 60_vol_slice_16 542 | 60_vol_slice_17 543 | 60_vol_slice_18 544 | 60_vol_slice_19 545 | 60_vol_slice_20 546 | 60_vol_slice_21 547 | 60_vol_slice_22 548 | 180_vol_slice_0 549 | 180_vol_slice_1 550 | 180_vol_slice_2 551 | 180_vol_slice_3 552 | 180_vol_slice_4 553 | 180_vol_slice_5 554 | 180_vol_slice_6 555 | 180_vol_slice_7 556 | 180_vol_slice_8 557 | 180_vol_slice_9 558 | 180_vol_slice_10 559 | 180_vol_slice_11 560 | 180_vol_slice_12 561 | 180_vol_slice_13 562 | 180_vol_slice_14 563 | 180_vol_slice_15 564 | 180_vol_slice_16 565 | 180_vol_slice_17 566 | 180_vol_slice_18 567 | 154_vol_slice_0 568 | 154_vol_slice_1 569 | 154_vol_slice_2 570 | 154_vol_slice_3 571 | 154_vol_slice_4 572 | 154_vol_slice_5 573 | 154_vol_slice_6 574 | 154_vol_slice_7 575 | 154_vol_slice_8 576 | 154_vol_slice_9 577 | 154_vol_slice_10 578 | 154_vol_slice_11 579 | 154_vol_slice_12 580 | 154_vol_slice_13 581 | 154_vol_slice_14 582 | 154_vol_slice_15 583 | 154_vol_slice_16 584 | 154_vol_slice_17 585 | 154_vol_slice_18 586 | 154_vol_slice_19 587 | 154_vol_slice_20 588 | 90_vol_slice_0 589 | 90_vol_slice_1 590 | 90_vol_slice_2 591 | 90_vol_slice_3 592 | 90_vol_slice_4 593 | 90_vol_slice_5 594 | 90_vol_slice_6 595 | 90_vol_slice_7 596 | 90_vol_slice_8 597 | 90_vol_slice_9 598 | 90_vol_slice_10 599 | 90_vol_slice_11 600 | 90_vol_slice_12 601 | 90_vol_slice_13 602 | 90_vol_slice_14 603 | 90_vol_slice_15 604 | 90_vol_slice_16 605 | 90_vol_slice_17 606 | 90_vol_slice_18 607 | 166_vol_slice_0 608 | 166_vol_slice_1 609 | 166_vol_slice_2 610 | 166_vol_slice_3 611 | 166_vol_slice_4 612 | 166_vol_slice_5 613 | 166_vol_slice_6 614 | 166_vol_slice_7 615 | 166_vol_slice_8 616 | 166_vol_slice_9 617 | 166_vol_slice_10 618 | 166_vol_slice_11 619 | 166_vol_slice_12 620 | 166_vol_slice_13 621 | 166_vol_slice_14 622 | 166_vol_slice_15 623 | 166_vol_slice_16 624 | 166_vol_slice_17 625 | 166_vol_slice_18 626 | 102_vol_slice_0 627 | 102_vol_slice_1 628 | 102_vol_slice_2 629 | 102_vol_slice_3 630 | 102_vol_slice_4 631 | 102_vol_slice_5 632 | 102_vol_slice_6 633 | 102_vol_slice_7 634 | 102_vol_slice_8 635 | 102_vol_slice_9 636 | 102_vol_slice_10 637 | 102_vol_slice_11 638 | 102_vol_slice_12 639 | 102_vol_slice_13 640 | 102_vol_slice_14 641 | 102_vol_slice_15 642 | 102_vol_slice_16 643 | 102_vol_slice_17 644 | 102_vol_slice_18 645 | 102_vol_slice_19 646 | 102_vol_slice_20 647 | 83_vol_slice_0 648 | 83_vol_slice_1 649 | 83_vol_slice_2 650 | 83_vol_slice_3 651 | 83_vol_slice_4 652 | 83_vol_slice_5 653 | 83_vol_slice_6 654 | 83_vol_slice_7 655 | 83_vol_slice_8 656 | 83_vol_slice_9 657 | 83_vol_slice_10 658 | 83_vol_slice_11 659 | 83_vol_slice_12 660 | 83_vol_slice_13 661 | 83_vol_slice_14 662 | 83_vol_slice_15 663 | 83_vol_slice_16 664 | 83_vol_slice_17 665 | 83_vol_slice_18 666 | 120_vol_slice_0 667 | 120_vol_slice_1 668 | 120_vol_slice_2 669 | 120_vol_slice_3 670 | 120_vol_slice_4 671 | 120_vol_slice_5 672 | 120_vol_slice_6 673 | 120_vol_slice_7 674 | 120_vol_slice_8 675 | 120_vol_slice_9 676 | 120_vol_slice_10 677 | 120_vol_slice_11 678 | 120_vol_slice_12 679 | 120_vol_slice_13 680 | 120_vol_slice_14 681 | 120_vol_slice_15 682 | 120_vol_slice_16 683 | 120_vol_slice_17 684 | 120_vol_slice_18 685 | 120_vol_slice_19 686 | 120_vol_slice_20 687 | 120_vol_slice_21 688 | 120_vol_slice_22 689 | 120_vol_slice_23 690 | 120_vol_slice_24 691 | 84_vol_slice_0 692 | 84_vol_slice_1 693 | 84_vol_slice_2 694 | 84_vol_slice_3 695 | 84_vol_slice_4 696 | 84_vol_slice_5 697 | 84_vol_slice_6 698 | 84_vol_slice_7 699 | 84_vol_slice_8 700 | 84_vol_slice_9 701 | 84_vol_slice_10 702 | 84_vol_slice_11 703 | 84_vol_slice_12 704 | 84_vol_slice_13 705 | 84_vol_slice_14 706 | 84_vol_slice_15 707 | 84_vol_slice_16 708 | 84_vol_slice_17 709 | 84_vol_slice_18 710 | 89_vol_slice_0 711 | 89_vol_slice_1 712 | 89_vol_slice_2 713 | 89_vol_slice_3 714 | 89_vol_slice_4 715 | 89_vol_slice_5 716 | 89_vol_slice_6 717 | 89_vol_slice_7 718 | 89_vol_slice_8 719 | 89_vol_slice_9 720 | 89_vol_slice_10 721 | 89_vol_slice_11 722 | 89_vol_slice_12 723 | 89_vol_slice_13 724 | 89_vol_slice_14 725 | 89_vol_slice_15 726 | 89_vol_slice_16 727 | 89_vol_slice_17 728 | 89_vol_slice_18 729 | 89_vol_slice_19 730 | 89_vol_slice_20 731 | 89_vol_slice_21 732 | 89_vol_slice_22 733 | 69_vol_slice_0 734 | 69_vol_slice_1 735 | 69_vol_slice_2 736 | 69_vol_slice_3 737 | 69_vol_slice_4 738 | 69_vol_slice_5 739 | 69_vol_slice_6 740 | 69_vol_slice_7 741 | 69_vol_slice_8 742 | 69_vol_slice_9 743 | 69_vol_slice_10 744 | 69_vol_slice_11 745 | 69_vol_slice_12 746 | 69_vol_slice_13 747 | 69_vol_slice_14 748 | 69_vol_slice_15 749 | 69_vol_slice_16 750 | 69_vol_slice_17 751 | 69_vol_slice_18 752 | 162_vol_slice_0 753 | 162_vol_slice_1 754 | 162_vol_slice_2 755 | 162_vol_slice_3 756 | 162_vol_slice_4 757 | 162_vol_slice_5 758 | 162_vol_slice_6 759 | 162_vol_slice_7 760 | 162_vol_slice_8 761 | 162_vol_slice_9 762 | 162_vol_slice_10 763 | 162_vol_slice_11 764 | 162_vol_slice_12 765 | 162_vol_slice_13 766 | 162_vol_slice_14 767 | 162_vol_slice_15 768 | 162_vol_slice_16 769 | 162_vol_slice_17 770 | 162_vol_slice_18 771 | 66_vol_slice_0 772 | 66_vol_slice_1 773 | 66_vol_slice_2 774 | 66_vol_slice_3 775 | 66_vol_slice_4 776 | 66_vol_slice_5 777 | 66_vol_slice_6 778 | 66_vol_slice_7 779 | 66_vol_slice_8 780 | 66_vol_slice_9 781 | 66_vol_slice_10 782 | 66_vol_slice_11 783 | 66_vol_slice_12 784 | 66_vol_slice_13 785 | 66_vol_slice_14 786 | 66_vol_slice_15 787 | 66_vol_slice_16 788 | 66_vol_slice_17 789 | 66_vol_slice_18 790 | 66_vol_slice_19 791 | 66_vol_slice_20 792 | 170_vol_slice_0 793 | 170_vol_slice_1 794 | 170_vol_slice_2 795 | 170_vol_slice_3 796 | 170_vol_slice_4 797 | 170_vol_slice_5 798 | 170_vol_slice_6 799 | 170_vol_slice_7 800 | 170_vol_slice_8 801 | 170_vol_slice_9 802 | 170_vol_slice_10 803 | 170_vol_slice_11 804 | 170_vol_slice_12 805 | 170_vol_slice_13 806 | 170_vol_slice_14 807 | 170_vol_slice_15 808 | 170_vol_slice_16 809 | 170_vol_slice_17 810 | 170_vol_slice_18 811 | 76_vol_slice_0 812 | 76_vol_slice_1 813 | 76_vol_slice_2 814 | 76_vol_slice_3 815 | 76_vol_slice_4 816 | 76_vol_slice_5 817 | 76_vol_slice_6 818 | 76_vol_slice_7 819 | 76_vol_slice_8 820 | 76_vol_slice_9 821 | 76_vol_slice_10 822 | 76_vol_slice_11 823 | 76_vol_slice_12 824 | 76_vol_slice_13 825 | 76_vol_slice_14 826 | 76_vol_slice_15 827 | 76_vol_slice_16 828 | 76_vol_slice_17 829 | 76_vol_slice_18 830 | 188_vol_slice_0 831 | 188_vol_slice_1 832 | 188_vol_slice_2 833 | 188_vol_slice_3 834 | 188_vol_slice_4 835 | 188_vol_slice_5 836 | 188_vol_slice_6 837 | 188_vol_slice_7 838 | 188_vol_slice_8 839 | 188_vol_slice_9 840 | 188_vol_slice_10 841 | 188_vol_slice_11 842 | 188_vol_slice_12 843 | 188_vol_slice_13 844 | 188_vol_slice_14 845 | 188_vol_slice_15 846 | 188_vol_slice_16 847 | 188_vol_slice_17 848 | 188_vol_slice_18 849 | 188_vol_slice_19 850 | 188_vol_slice_20 851 | 182_vol_slice_0 852 | 182_vol_slice_1 853 | 182_vol_slice_2 854 | 182_vol_slice_3 855 | 182_vol_slice_4 856 | 182_vol_slice_5 857 | 182_vol_slice_6 858 | 182_vol_slice_7 859 | 182_vol_slice_8 860 | 182_vol_slice_9 861 | 182_vol_slice_10 862 | 182_vol_slice_11 863 | 182_vol_slice_12 864 | 182_vol_slice_13 865 | 182_vol_slice_14 866 | 182_vol_slice_15 867 | 182_vol_slice_16 868 | 182_vol_slice_17 869 | 182_vol_slice_18 870 | 182_vol_slice_19 871 | 182_vol_slice_20 872 | 111_vol_slice_0 873 | 111_vol_slice_1 874 | 111_vol_slice_2 875 | 111_vol_slice_3 876 | 111_vol_slice_4 877 | 111_vol_slice_5 878 | 111_vol_slice_6 879 | 111_vol_slice_7 880 | 111_vol_slice_8 881 | 111_vol_slice_9 882 | 111_vol_slice_10 883 | 111_vol_slice_11 884 | 111_vol_slice_12 885 | 111_vol_slice_13 886 | 111_vol_slice_14 887 | 111_vol_slice_15 888 | 111_vol_slice_16 889 | 111_vol_slice_17 890 | 111_vol_slice_18 891 | 176_vol_slice_0 892 | 176_vol_slice_1 893 | 176_vol_slice_2 894 | 176_vol_slice_3 895 | 176_vol_slice_4 896 | 176_vol_slice_5 897 | 176_vol_slice_6 898 | 176_vol_slice_7 899 | 176_vol_slice_8 900 | 176_vol_slice_9 901 | 176_vol_slice_10 902 | 176_vol_slice_11 903 | 176_vol_slice_12 904 | 176_vol_slice_13 905 | 176_vol_slice_14 906 | 176_vol_slice_15 907 | 176_vol_slice_16 908 | 176_vol_slice_17 909 | 176_vol_slice_18 910 | 176_vol_slice_19 911 | 176_vol_slice_20 912 | 33_vol_slice_0 913 | 33_vol_slice_1 914 | 33_vol_slice_2 915 | 33_vol_slice_3 916 | 33_vol_slice_4 917 | 33_vol_slice_5 918 | 33_vol_slice_6 919 | 33_vol_slice_7 920 | 33_vol_slice_8 921 | 33_vol_slice_9 922 | 33_vol_slice_10 923 | 33_vol_slice_11 924 | 33_vol_slice_12 925 | 33_vol_slice_13 926 | 33_vol_slice_14 927 | 33_vol_slice_15 928 | 33_vol_slice_16 929 | 33_vol_slice_17 930 | 33_vol_slice_18 931 | 161_vol_slice_0 932 | 161_vol_slice_1 933 | 161_vol_slice_2 934 | 161_vol_slice_3 935 | 161_vol_slice_4 936 | 161_vol_slice_5 937 | 161_vol_slice_6 938 | 161_vol_slice_7 939 | 161_vol_slice_8 940 | 161_vol_slice_9 941 | 161_vol_slice_10 942 | 161_vol_slice_11 943 | 161_vol_slice_12 944 | 161_vol_slice_13 945 | 161_vol_slice_14 946 | 161_vol_slice_15 947 | 161_vol_slice_16 948 | 161_vol_slice_17 949 | 161_vol_slice_18 950 | 161_vol_slice_19 951 | 161_vol_slice_20 952 | 161_vol_slice_21 953 | 161_vol_slice_22 954 | 141_vol_slice_0 955 | 141_vol_slice_1 956 | 141_vol_slice_2 957 | 141_vol_slice_3 958 | 141_vol_slice_4 959 | 141_vol_slice_5 960 | 141_vol_slice_6 961 | 141_vol_slice_7 962 | 141_vol_slice_8 963 | 141_vol_slice_9 964 | 141_vol_slice_10 965 | 141_vol_slice_11 966 | 141_vol_slice_12 967 | 141_vol_slice_13 968 | 141_vol_slice_14 969 | 141_vol_slice_15 970 | 141_vol_slice_16 971 | 141_vol_slice_17 972 | 141_vol_slice_18 973 | 141_vol_slice_19 974 | 141_vol_slice_20 975 | 156_vol_slice_0 976 | 156_vol_slice_1 977 | 156_vol_slice_2 978 | 156_vol_slice_3 979 | 156_vol_slice_4 980 | 156_vol_slice_5 981 | 156_vol_slice_6 982 | 156_vol_slice_7 983 | 156_vol_slice_8 984 | 156_vol_slice_9 985 | 156_vol_slice_10 986 | 156_vol_slice_11 987 | 156_vol_slice_12 988 | 156_vol_slice_13 989 | 156_vol_slice_14 990 | 156_vol_slice_15 991 | 156_vol_slice_16 992 | 156_vol_slice_17 993 | 156_vol_slice_18 994 | 156_vol_slice_19 995 | 156_vol_slice_20 996 | 46_vol_slice_0 997 | 46_vol_slice_1 998 | 46_vol_slice_2 999 | 46_vol_slice_3 1000 | 46_vol_slice_4 1001 | 46_vol_slice_5 1002 | 46_vol_slice_6 1003 | 46_vol_slice_7 1004 | 46_vol_slice_8 1005 | 46_vol_slice_9 1006 | 46_vol_slice_10 1007 | 46_vol_slice_11 1008 | 46_vol_slice_12 1009 | 46_vol_slice_13 1010 | 46_vol_slice_14 1011 | 46_vol_slice_15 1012 | 46_vol_slice_16 1013 | 46_vol_slice_17 1014 | 46_vol_slice_18 1015 | 80_vol_slice_0 1016 | 80_vol_slice_1 1017 | 80_vol_slice_2 1018 | 80_vol_slice_3 1019 | 80_vol_slice_4 1020 | 80_vol_slice_5 1021 | 80_vol_slice_6 1022 | 80_vol_slice_7 1023 | 80_vol_slice_8 1024 | 80_vol_slice_9 1025 | 80_vol_slice_10 1026 | 80_vol_slice_11 1027 | 80_vol_slice_12 1028 | 80_vol_slice_13 1029 | 80_vol_slice_14 1030 | 80_vol_slice_15 1031 | 80_vol_slice_16 1032 | 80_vol_slice_17 1033 | 80_vol_slice_18 1034 | 80_vol_slice_19 1035 | 80_vol_slice_20 1036 | 80_vol_slice_21 1037 | 80_vol_slice_22 1038 | 96_vol_slice_0 1039 | 96_vol_slice_1 1040 | 96_vol_slice_2 1041 | 96_vol_slice_3 1042 | 96_vol_slice_4 1043 | 96_vol_slice_5 1044 | 96_vol_slice_6 1045 | 96_vol_slice_7 1046 | 96_vol_slice_8 1047 | 96_vol_slice_9 1048 | 96_vol_slice_10 1049 | 96_vol_slice_11 1050 | 96_vol_slice_12 1051 | 96_vol_slice_13 1052 | 96_vol_slice_14 1053 | 96_vol_slice_15 1054 | 96_vol_slice_16 1055 | 96_vol_slice_17 1056 | 96_vol_slice_18 1057 | 96_vol_slice_19 1058 | 96_vol_slice_20 1059 | 104_vol_slice_0 1060 | 104_vol_slice_1 1061 | 104_vol_slice_2 1062 | 104_vol_slice_3 1063 | 104_vol_slice_4 1064 | 104_vol_slice_5 1065 | 104_vol_slice_6 1066 | 104_vol_slice_7 1067 | 104_vol_slice_8 1068 | 104_vol_slice_9 1069 | 104_vol_slice_10 1070 | 104_vol_slice_11 1071 | 104_vol_slice_12 1072 | 104_vol_slice_13 1073 | 104_vol_slice_14 1074 | 104_vol_slice_15 1075 | 104_vol_slice_16 1076 | 104_vol_slice_17 1077 | 104_vol_slice_18 1078 | 104_vol_slice_19 1079 | 104_vol_slice_20 1080 | 190_vol_slice_0 1081 | 190_vol_slice_1 1082 | 190_vol_slice_2 1083 | 190_vol_slice_3 1084 | 190_vol_slice_4 1085 | 190_vol_slice_5 1086 | 190_vol_slice_6 1087 | 190_vol_slice_7 1088 | 190_vol_slice_8 1089 | 190_vol_slice_9 1090 | 190_vol_slice_10 1091 | 190_vol_slice_11 1092 | 190_vol_slice_12 1093 | 190_vol_slice_13 1094 | 190_vol_slice_14 1095 | 190_vol_slice_15 1096 | 190_vol_slice_16 1097 | 190_vol_slice_17 1098 | 190_vol_slice_18 1099 | 110_vol_slice_0 1100 | 110_vol_slice_1 1101 | 110_vol_slice_2 1102 | 110_vol_slice_3 1103 | 110_vol_slice_4 1104 | 110_vol_slice_5 1105 | 110_vol_slice_6 1106 | 110_vol_slice_7 1107 | 110_vol_slice_8 1108 | 110_vol_slice_9 1109 | 110_vol_slice_10 1110 | 110_vol_slice_11 1111 | 110_vol_slice_12 1112 | 110_vol_slice_13 1113 | 110_vol_slice_14 1114 | 110_vol_slice_15 1115 | 110_vol_slice_16 1116 | 110_vol_slice_17 1117 | 110_vol_slice_18 1118 | 29_vol_slice_0 1119 | 29_vol_slice_1 1120 | 29_vol_slice_2 1121 | 29_vol_slice_3 1122 | 29_vol_slice_4 1123 | 29_vol_slice_5 1124 | 29_vol_slice_6 1125 | 29_vol_slice_7 1126 | 29_vol_slice_8 1127 | 29_vol_slice_9 1128 | 29_vol_slice_10 1129 | 29_vol_slice_11 1130 | 29_vol_slice_12 1131 | 29_vol_slice_13 1132 | 29_vol_slice_14 1133 | 29_vol_slice_15 1134 | 29_vol_slice_16 1135 | 29_vol_slice_17 1136 | 29_vol_slice_18 1137 | 29_vol_slice_19 1138 | 29_vol_slice_20 1139 | 196_vol_slice_0 1140 | 196_vol_slice_1 1141 | 196_vol_slice_2 1142 | 196_vol_slice_3 1143 | 196_vol_slice_4 1144 | 196_vol_slice_5 1145 | 196_vol_slice_6 1146 | 196_vol_slice_7 1147 | 196_vol_slice_8 1148 | 196_vol_slice_9 1149 | 196_vol_slice_10 1150 | 196_vol_slice_11 1151 | 196_vol_slice_12 1152 | 196_vol_slice_13 1153 | 196_vol_slice_14 1154 | 196_vol_slice_15 1155 | 196_vol_slice_16 1156 | 196_vol_slice_17 1157 | 196_vol_slice_18 1158 | 196_vol_slice_19 1159 | 196_vol_slice_20 1160 | 184_vol_slice_0 1161 | 184_vol_slice_1 1162 | 184_vol_slice_2 1163 | 184_vol_slice_3 1164 | 184_vol_slice_4 1165 | 184_vol_slice_5 1166 | 184_vol_slice_6 1167 | 184_vol_slice_7 1168 | 184_vol_slice_8 1169 | 184_vol_slice_9 1170 | 184_vol_slice_10 1171 | 184_vol_slice_11 1172 | 184_vol_slice_12 1173 | 184_vol_slice_13 1174 | 184_vol_slice_14 1175 | 184_vol_slice_15 1176 | 184_vol_slice_16 1177 | 184_vol_slice_17 1178 | 184_vol_slice_18 1179 | 31_vol_slice_0 1180 | 31_vol_slice_1 1181 | 31_vol_slice_2 1182 | 31_vol_slice_3 1183 | 31_vol_slice_4 1184 | 31_vol_slice_5 1185 | 31_vol_slice_6 1186 | 31_vol_slice_7 1187 | 31_vol_slice_8 1188 | 31_vol_slice_9 1189 | 31_vol_slice_10 1190 | 31_vol_slice_11 1191 | 31_vol_slice_12 1192 | 31_vol_slice_13 1193 | 31_vol_slice_14 1194 | 31_vol_slice_15 1195 | 31_vol_slice_16 1196 | 31_vol_slice_17 1197 | 31_vol_slice_18 1198 | 20_vol_slice_0 1199 | 20_vol_slice_1 1200 | 20_vol_slice_2 1201 | 20_vol_slice_3 1202 | 20_vol_slice_4 1203 | 20_vol_slice_5 1204 | 20_vol_slice_6 1205 | 20_vol_slice_7 1206 | 20_vol_slice_8 1207 | 20_vol_slice_9 1208 | 20_vol_slice_10 1209 | 20_vol_slice_11 1210 | 20_vol_slice_12 1211 | 20_vol_slice_13 1212 | 20_vol_slice_14 1213 | 20_vol_slice_15 1214 | 20_vol_slice_16 1215 | 20_vol_slice_17 1216 | 20_vol_slice_18 1217 | 20_vol_slice_19 1218 | 20_vol_slice_20 1219 | 146_vol_slice_0 1220 | 146_vol_slice_1 1221 | 146_vol_slice_2 1222 | 146_vol_slice_3 1223 | 146_vol_slice_4 1224 | 146_vol_slice_5 1225 | 146_vol_slice_6 1226 | 146_vol_slice_7 1227 | 146_vol_slice_8 1228 | 146_vol_slice_9 1229 | 146_vol_slice_10 1230 | 146_vol_slice_11 1231 | 146_vol_slice_12 1232 | 146_vol_slice_13 1233 | 146_vol_slice_14 1234 | 146_vol_slice_15 1235 | 146_vol_slice_16 1236 | 146_vol_slice_17 1237 | 146_vol_slice_18 1238 | 146_vol_slice_19 1239 | 146_vol_slice_20 1240 | 150_vol_slice_0 1241 | 150_vol_slice_1 1242 | 150_vol_slice_2 1243 | 150_vol_slice_3 1244 | 150_vol_slice_4 1245 | 150_vol_slice_5 1246 | 150_vol_slice_6 1247 | 150_vol_slice_7 1248 | 150_vol_slice_8 1249 | 150_vol_slice_9 1250 | 150_vol_slice_10 1251 | 150_vol_slice_11 1252 | 150_vol_slice_12 1253 | 150_vol_slice_13 1254 | 150_vol_slice_14 1255 | 150_vol_slice_15 1256 | 150_vol_slice_16 1257 | 150_vol_slice_17 1258 | 150_vol_slice_18 1259 | 177_vol_slice_0 1260 | 177_vol_slice_1 1261 | 177_vol_slice_2 1262 | 177_vol_slice_3 1263 | 177_vol_slice_4 1264 | 177_vol_slice_5 1265 | 177_vol_slice_6 1266 | 177_vol_slice_7 1267 | 177_vol_slice_8 1268 | 177_vol_slice_9 1269 | 177_vol_slice_10 1270 | 177_vol_slice_11 1271 | 177_vol_slice_12 1272 | 177_vol_slice_13 1273 | 177_vol_slice_14 1274 | 177_vol_slice_15 1275 | 177_vol_slice_16 1276 | 177_vol_slice_17 1277 | 177_vol_slice_18 1278 | 59_vol_slice_0 1279 | 59_vol_slice_1 1280 | 59_vol_slice_2 1281 | 59_vol_slice_3 1282 | 59_vol_slice_4 1283 | 59_vol_slice_5 1284 | 59_vol_slice_6 1285 | 59_vol_slice_7 1286 | 59_vol_slice_8 1287 | 59_vol_slice_9 1288 | 59_vol_slice_10 1289 | 59_vol_slice_11 1290 | 59_vol_slice_12 1291 | 59_vol_slice_13 1292 | 59_vol_slice_14 1293 | 59_vol_slice_15 1294 | 59_vol_slice_16 1295 | 59_vol_slice_17 1296 | 59_vol_slice_18 1297 | 112_vol_slice_0 1298 | 112_vol_slice_1 1299 | 112_vol_slice_2 1300 | 112_vol_slice_3 1301 | 112_vol_slice_4 1302 | 112_vol_slice_5 1303 | 112_vol_slice_6 1304 | 112_vol_slice_7 1305 | 112_vol_slice_8 1306 | 112_vol_slice_9 1307 | 112_vol_slice_10 1308 | 112_vol_slice_11 1309 | 112_vol_slice_12 1310 | 112_vol_slice_13 1311 | 112_vol_slice_14 1312 | 112_vol_slice_15 1313 | 112_vol_slice_16 1314 | 112_vol_slice_17 1315 | 112_vol_slice_18 1316 | 112_vol_slice_19 1317 | 112_vol_slice_20 1318 | 15_vol_slice_0 1319 | 15_vol_slice_1 1320 | 15_vol_slice_2 1321 | 15_vol_slice_3 1322 | 15_vol_slice_4 1323 | 15_vol_slice_5 1324 | 15_vol_slice_6 1325 | 15_vol_slice_7 1326 | 15_vol_slice_8 1327 | 15_vol_slice_9 1328 | 15_vol_slice_10 1329 | 15_vol_slice_11 1330 | 15_vol_slice_12 1331 | 15_vol_slice_13 1332 | 15_vol_slice_14 1333 | 15_vol_slice_15 1334 | 15_vol_slice_16 1335 | 15_vol_slice_17 1336 | 15_vol_slice_18 1337 | 4_vol_slice_0 1338 | 4_vol_slice_1 1339 | 4_vol_slice_2 1340 | 4_vol_slice_3 1341 | 4_vol_slice_4 1342 | 4_vol_slice_5 1343 | 4_vol_slice_6 1344 | 4_vol_slice_7 1345 | 4_vol_slice_8 1346 | 4_vol_slice_9 1347 | 4_vol_slice_10 1348 | 4_vol_slice_11 1349 | 4_vol_slice_12 1350 | 4_vol_slice_13 1351 | 4_vol_slice_14 1352 | 4_vol_slice_15 1353 | 4_vol_slice_16 1354 | 4_vol_slice_17 1355 | 4_vol_slice_18 1356 | 198_vol_slice_0 1357 | 198_vol_slice_1 1358 | 198_vol_slice_2 1359 | 198_vol_slice_3 1360 | 198_vol_slice_4 1361 | 198_vol_slice_5 1362 | 198_vol_slice_6 1363 | 198_vol_slice_7 1364 | 198_vol_slice_8 1365 | 198_vol_slice_9 1366 | 198_vol_slice_10 1367 | 198_vol_slice_11 1368 | 198_vol_slice_12 1369 | 198_vol_slice_13 1370 | 198_vol_slice_14 1371 | 198_vol_slice_15 1372 | 198_vol_slice_16 1373 | 198_vol_slice_17 1374 | 198_vol_slice_18 1375 | 129_vol_slice_0 1376 | 129_vol_slice_1 1377 | 129_vol_slice_2 1378 | 129_vol_slice_3 1379 | 129_vol_slice_4 1380 | 129_vol_slice_5 1381 | 129_vol_slice_6 1382 | 129_vol_slice_7 1383 | 129_vol_slice_8 1384 | 129_vol_slice_9 1385 | 129_vol_slice_10 1386 | 129_vol_slice_11 1387 | 129_vol_slice_12 1388 | 129_vol_slice_13 1389 | 129_vol_slice_14 1390 | 129_vol_slice_15 1391 | 129_vol_slice_16 1392 | 129_vol_slice_17 1393 | 129_vol_slice_18 1394 | 129_vol_slice_18 1395 | 129_vol_slice_16 1396 | 198_vol_slice_3 1397 | 15_vol_slice_6 1398 | 150_vol_slice_8 1399 | 46_vol_slice_16 1400 | 120_vol_slice_14 1401 | 130_vol_slice_5 1402 | 130_vol_slice_6 1403 | 130_vol_slice_7 1404 | 130_vol_slice_8 1405 | 130_vol_slice_9 1406 | 130_vol_slice_10 1407 | 130_vol_slice_11 1408 | 130_vol_slice_12 1409 | 130_vol_slice_13 1410 | 130_vol_slice_14 1411 | 130_vol_slice_15 1412 | 130_vol_slice_16 1413 | 125_vol_slice_5 1414 | 125_vol_slice_6 1415 | 125_vol_slice_7 1416 | 125_vol_slice_8 1417 | 125_vol_slice_9 1418 | 125_vol_slice_10 1419 | 125_vol_slice_11 1420 | 125_vol_slice_12 1421 | 125_vol_slice_13 1422 | 125_vol_slice_14 1423 | 125_vol_slice_15 1424 | 125_vol_slice_16 1425 | 12_vol_slice_4 1426 | 12_vol_slice_5 1427 | 12_vol_slice_6 1428 | 12_vol_slice_7 1429 | 12_vol_slice_8 1430 | 12_vol_slice_9 1431 | 12_vol_slice_10 1432 | 12_vol_slice_11 1433 | 12_vol_slice_12 1434 | 65_vol_slice_8 1435 | 65_vol_slice_9 1436 | 65_vol_slice_10 1437 | 65_vol_slice_11 1438 | 65_vol_slice_12 1439 | 65_vol_slice_13 1440 | 65_vol_slice_14 1441 | 65_vol_slice_15 1442 | 136_vol_slice_5 1443 | 136_vol_slice_6 1444 | 136_vol_slice_7 1445 | 136_vol_slice_8 1446 | 136_vol_slice_9 1447 | 136_vol_slice_10 1448 | 136_vol_slice_11 1449 | 136_vol_slice_12 1450 | 136_vol_slice_13 1451 | 136_vol_slice_14 1452 | 136_vol_slice_15 1453 | 117_vol_slice_8 1454 | 117_vol_slice_9 1455 | 117_vol_slice_10 1456 | 117_vol_slice_11 1457 | 117_vol_slice_12 1458 | 117_vol_slice_13 1459 | 117_vol_slice_14 1460 | 117_vol_slice_15 1461 | 134_vol_slice_4 1462 | 134_vol_slice_5 1463 | 134_vol_slice_6 1464 | 134_vol_slice_7 1465 | 134_vol_slice_8 1466 | 134_vol_slice_9 1467 | 134_vol_slice_10 1468 | 134_vol_slice_11 1469 | 134_vol_slice_12 1470 | 134_vol_slice_13 1471 | 134_vol_slice_14 1472 | 193_vol_slice_4 1473 | 193_vol_slice_5 1474 | 193_vol_slice_6 1475 | 193_vol_slice_7 1476 | 193_vol_slice_8 1477 | 193_vol_slice_9 1478 | 193_vol_slice_10 1479 | 193_vol_slice_11 1480 | 193_vol_slice_12 1481 | 193_vol_slice_13 1482 | 193_vol_slice_14 1483 | 9_vol_slice_2 1484 | 9_vol_slice_3 1485 | 9_vol_slice_4 1486 | 9_vol_slice_5 1487 | 9_vol_slice_6 1488 | 9_vol_slice_7 1489 | 9_vol_slice_8 1490 | 9_vol_slice_9 1491 | 9_vol_slice_10 1492 | 9_vol_slice_11 1493 | 9_vol_slice_12 1494 | 9_vol_slice_13 1495 | 9_vol_slice_14 1496 | 107_vol_slice_0 1497 | 107_vol_slice_1 1498 | 107_vol_slice_2 1499 | 107_vol_slice_3 1500 | 107_vol_slice_4 1501 | 107_vol_slice_5 1502 | 107_vol_slice_6 1503 | 107_vol_slice_7 1504 | 107_vol_slice_8 1505 | 107_vol_slice_9 1506 | 107_vol_slice_10 1507 | 107_vol_slice_11 1508 | 107_vol_slice_12 1509 | 107_vol_slice_13 1510 | 107_vol_slice_14 1511 | 107_vol_slice_15 1512 | 54_vol_slice_5 1513 | 54_vol_slice_6 1514 | 54_vol_slice_7 1515 | 54_vol_slice_8 1516 | 54_vol_slice_9 1517 | 54_vol_slice_10 1518 | 54_vol_slice_11 1519 | 54_vol_slice_12 1520 | 54_vol_slice_13 1521 | 54_vol_slice_14 1522 | 54_vol_slice_15 1523 | 54_vol_slice_16 1524 | 39_vol_slice_4 1525 | 39_vol_slice_5 1526 | 39_vol_slice_6 1527 | 39_vol_slice_7 1528 | 39_vol_slice_8 1529 | 39_vol_slice_9 1530 | 39_vol_slice_10 1531 | 39_vol_slice_11 1532 | 39_vol_slice_12 1533 | 39_vol_slice_13 1534 | 39_vol_slice_14 1535 | 39_vol_slice_15 1536 | 39_vol_slice_16 1537 | 39_vol_slice_17 1538 | 44_vol_slice_5 1539 | 44_vol_slice_6 1540 | 44_vol_slice_7 1541 | 44_vol_slice_8 1542 | 44_vol_slice_9 1543 | 44_vol_slice_10 1544 | 44_vol_slice_11 1545 | 44_vol_slice_12 1546 | 44_vol_slice_13 1547 | 44_vol_slice_14 1548 | 44_vol_slice_15 1549 | 44_vol_slice_16 1550 | 70_vol_slice_2 1551 | 70_vol_slice_3 1552 | 70_vol_slice_4 1553 | 70_vol_slice_5 1554 | 70_vol_slice_6 1555 | 70_vol_slice_7 1556 | 70_vol_slice_8 1557 | 70_vol_slice_9 1558 | 70_vol_slice_10 1559 | 70_vol_slice_11 1560 | 70_vol_slice_12 1561 | 70_vol_slice_13 1562 | 70_vol_slice_14 1563 | 70_vol_slice_15 1564 | 70_vol_slice_16 1565 | 72_vol_slice_2 1566 | 72_vol_slice_3 1567 | 72_vol_slice_4 1568 | 72_vol_slice_5 1569 | 72_vol_slice_6 1570 | 72_vol_slice_7 1571 | 72_vol_slice_8 1572 | 72_vol_slice_9 1573 | 72_vol_slice_10 1574 | 72_vol_slice_11 1575 | 72_vol_slice_12 1576 | 72_vol_slice_13 1577 | 72_vol_slice_14 1578 | 168_vol_slice_4 1579 | 168_vol_slice_5 1580 | 168_vol_slice_6 1581 | 168_vol_slice_7 1582 | 168_vol_slice_8 1583 | 168_vol_slice_9 1584 | 168_vol_slice_10 1585 | 168_vol_slice_11 1586 | 168_vol_slice_12 1587 | 168_vol_slice_13 1588 | 168_vol_slice_14 1589 | 168_vol_slice_15 1590 | 56_vol_slice_3 1591 | 56_vol_slice_4 1592 | 56_vol_slice_5 1593 | 56_vol_slice_6 1594 | 56_vol_slice_7 1595 | 56_vol_slice_8 1596 | 56_vol_slice_9 1597 | 56_vol_slice_10 1598 | 56_vol_slice_11 1599 | 56_vol_slice_12 1600 | 56_vol_slice_13 1601 | 56_vol_slice_14 1602 | 56_vol_slice_15 1603 | 56_vol_slice_16 1604 | 142_vol_slice_4 1605 | 142_vol_slice_5 1606 | 142_vol_slice_6 1607 | 142_vol_slice_7 1608 | 142_vol_slice_8 1609 | 142_vol_slice_9 1610 | 142_vol_slice_10 1611 | 142_vol_slice_11 1612 | 142_vol_slice_12 1613 | 142_vol_slice_13 1614 | 142_vol_slice_14 1615 | 142_vol_slice_15 1616 | 142_vol_slice_16 1617 | 94_vol_slice_5 1618 | 94_vol_slice_6 1619 | 94_vol_slice_7 1620 | 94_vol_slice_8 1621 | 94_vol_slice_9 1622 | 94_vol_slice_10 1623 | 94_vol_slice_11 1624 | 94_vol_slice_12 1625 | 94_vol_slice_13 1626 | 94_vol_slice_14 1627 | 94_vol_slice_15 1628 | 7_vol_slice_5 1629 | 7_vol_slice_6 1630 | 7_vol_slice_7 1631 | 7_vol_slice_8 1632 | 7_vol_slice_9 1633 | 7_vol_slice_10 1634 | 7_vol_slice_11 1635 | 7_vol_slice_12 1636 | 7_vol_slice_13 1637 | 7_vol_slice_14 1638 | 7_vol_slice_15 1639 | 7_vol_slice_16 1640 | 7_vol_slice_17 1641 | 144_vol_slice_5 1642 | 144_vol_slice_6 1643 | 144_vol_slice_7 1644 | 144_vol_slice_8 1645 | 144_vol_slice_9 1646 | 144_vol_slice_10 1647 | 144_vol_slice_11 1648 | 144_vol_slice_12 1649 | 144_vol_slice_13 1650 | 144_vol_slice_14 1651 | 144_vol_slice_15 1652 | 118_vol_slice_5 1653 | 118_vol_slice_6 1654 | 118_vol_slice_7 1655 | 118_vol_slice_8 1656 | 118_vol_slice_9 1657 | 118_vol_slice_10 1658 | 118_vol_slice_11 1659 | 118_vol_slice_12 1660 | 118_vol_slice_13 1661 | 118_vol_slice_14 1662 | 118_vol_slice_15 1663 | 118_vol_slice_16 1664 | 183_vol_slice_2 1665 | 183_vol_slice_3 1666 | 183_vol_slice_4 1667 | 183_vol_slice_5 1668 | 183_vol_slice_6 1669 | 183_vol_slice_7 1670 | 183_vol_slice_8 1671 | 183_vol_slice_9 1672 | 183_vol_slice_10 1673 | 183_vol_slice_11 1674 | 121_vol_slice_2 1675 | 121_vol_slice_3 1676 | 121_vol_slice_4 1677 | 121_vol_slice_5 1678 | 121_vol_slice_6 1679 | 121_vol_slice_7 1680 | 121_vol_slice_8 1681 | 121_vol_slice_9 1682 | 121_vol_slice_10 1683 | 121_vol_slice_11 1684 | 121_vol_slice_12 1685 | 121_vol_slice_13 1686 | 121_vol_slice_14 1687 | 26_vol_slice_2 1688 | 26_vol_slice_3 1689 | 26_vol_slice_4 1690 | 26_vol_slice_5 1691 | 26_vol_slice_6 1692 | 26_vol_slice_7 1693 | 26_vol_slice_8 1694 | 26_vol_slice_9 1695 | 26_vol_slice_10 1696 | 26_vol_slice_11 1697 | 26_vol_slice_12 1698 | 26_vol_slice_13 1699 | 60_vol_slice_0 1700 | 60_vol_slice_1 1701 | 60_vol_slice_2 1702 | 60_vol_slice_3 1703 | 60_vol_slice_4 1704 | 60_vol_slice_5 1705 | 60_vol_slice_6 1706 | 60_vol_slice_7 1707 | 60_vol_slice_8 1708 | 60_vol_slice_9 1709 | 60_vol_slice_10 1710 | 60_vol_slice_11 1711 | 60_vol_slice_12 1712 | 60_vol_slice_13 1713 | 60_vol_slice_14 1714 | 60_vol_slice_15 1715 | 60_vol_slice_16 1716 | 60_vol_slice_17 1717 | 60_vol_slice_18 1718 | 60_vol_slice_19 1719 | 180_vol_slice_5 1720 | 180_vol_slice_6 1721 | 180_vol_slice_7 1722 | 180_vol_slice_8 1723 | 180_vol_slice_9 1724 | 180_vol_slice_10 1725 | 180_vol_slice_11 1726 | 180_vol_slice_12 1727 | 180_vol_slice_13 1728 | 154_vol_slice_1 1729 | 154_vol_slice_2 1730 | 154_vol_slice_3 1731 | 154_vol_slice_4 1732 | 154_vol_slice_5 1733 | 154_vol_slice_6 1734 | 154_vol_slice_7 1735 | 154_vol_slice_8 1736 | 154_vol_slice_9 1737 | 154_vol_slice_10 1738 | 154_vol_slice_11 1739 | 154_vol_slice_12 1740 | 154_vol_slice_13 1741 | 154_vol_slice_14 1742 | 154_vol_slice_15 1743 | 90_vol_slice_2 1744 | 90_vol_slice_3 1745 | 90_vol_slice_4 1746 | 90_vol_slice_5 1747 | 90_vol_slice_6 1748 | 90_vol_slice_7 1749 | 90_vol_slice_8 1750 | 90_vol_slice_9 1751 | 90_vol_slice_10 1752 | 90_vol_slice_11 1753 | 90_vol_slice_12 1754 | 90_vol_slice_13 1755 | 166_vol_slice_0 1756 | 166_vol_slice_1 1757 | 166_vol_slice_2 1758 | 166_vol_slice_3 1759 | 166_vol_slice_4 1760 | 166_vol_slice_5 1761 | 166_vol_slice_6 1762 | 166_vol_slice_7 1763 | 166_vol_slice_8 1764 | 166_vol_slice_9 1765 | 166_vol_slice_10 1766 | 166_vol_slice_11 1767 | 166_vol_slice_12 1768 | 102_vol_slice_3 1769 | 102_vol_slice_4 1770 | 102_vol_slice_5 1771 | 102_vol_slice_6 1772 | 102_vol_slice_7 1773 | 102_vol_slice_8 1774 | 102_vol_slice_9 1775 | 102_vol_slice_10 1776 | 102_vol_slice_11 1777 | 102_vol_slice_12 1778 | 102_vol_slice_13 1779 | 102_vol_slice_14 1780 | 83_vol_slice_7 1781 | 83_vol_slice_8 1782 | 83_vol_slice_9 1783 | 83_vol_slice_10 1784 | 83_vol_slice_11 1785 | 83_vol_slice_12 1786 | 83_vol_slice_13 1787 | 83_vol_slice_14 1788 | 83_vol_slice_15 1789 | 120_vol_slice_2 1790 | 120_vol_slice_3 1791 | 120_vol_slice_4 1792 | 120_vol_slice_5 1793 | 120_vol_slice_6 1794 | 120_vol_slice_7 1795 | 120_vol_slice_8 1796 | 120_vol_slice_9 1797 | 120_vol_slice_10 1798 | 120_vol_slice_11 1799 | 120_vol_slice_12 1800 | 120_vol_slice_13 1801 | 120_vol_slice_14 1802 | 120_vol_slice_15 1803 | 84_vol_slice_4 1804 | 84_vol_slice_5 1805 | 84_vol_slice_6 1806 | 84_vol_slice_7 1807 | 84_vol_slice_8 1808 | 84_vol_slice_9 1809 | 84_vol_slice_10 1810 | 84_vol_slice_11 1811 | 84_vol_slice_12 1812 | 84_vol_slice_13 1813 | 89_vol_slice_6 1814 | 89_vol_slice_7 1815 | 89_vol_slice_8 1816 | 89_vol_slice_9 1817 | 89_vol_slice_10 1818 | 89_vol_slice_11 1819 | 89_vol_slice_12 1820 | 89_vol_slice_13 1821 | 89_vol_slice_14 1822 | 89_vol_slice_15 1823 | 89_vol_slice_16 1824 | 89_vol_slice_17 1825 | 89_vol_slice_18 1826 | 89_vol_slice_19 1827 | 69_vol_slice_4 1828 | 69_vol_slice_5 1829 | 69_vol_slice_6 1830 | 69_vol_slice_7 1831 | 69_vol_slice_8 1832 | 69_vol_slice_9 1833 | 69_vol_slice_10 1834 | 69_vol_slice_11 1835 | 69_vol_slice_12 1836 | 69_vol_slice_13 1837 | 162_vol_slice_4 1838 | 162_vol_slice_5 1839 | 162_vol_slice_6 1840 | 162_vol_slice_7 1841 | 162_vol_slice_8 1842 | 162_vol_slice_9 1843 | 162_vol_slice_10 1844 | 162_vol_slice_11 1845 | 162_vol_slice_12 1846 | 162_vol_slice_13 1847 | 66_vol_slice_4 1848 | 66_vol_slice_5 1849 | 66_vol_slice_6 1850 | 66_vol_slice_7 1851 | 66_vol_slice_8 1852 | 66_vol_slice_9 1853 | 66_vol_slice_10 1854 | 66_vol_slice_11 1855 | 66_vol_slice_12 1856 | 66_vol_slice_13 1857 | 66_vol_slice_14 1858 | 170_vol_slice_3 1859 | 170_vol_slice_4 1860 | 170_vol_slice_5 1861 | 170_vol_slice_6 1862 | 170_vol_slice_7 1863 | 170_vol_slice_8 1864 | 170_vol_slice_9 1865 | 170_vol_slice_10 1866 | 170_vol_slice_11 1867 | 170_vol_slice_12 1868 | 170_vol_slice_13 1869 | 170_vol_slice_14 1870 | 170_vol_slice_15 1871 | 170_vol_slice_16 1872 | 170_vol_slice_17 1873 | 76_vol_slice_4 1874 | 76_vol_slice_5 1875 | 76_vol_slice_6 1876 | 76_vol_slice_7 1877 | 76_vol_slice_8 1878 | 76_vol_slice_9 1879 | 76_vol_slice_10 1880 | 76_vol_slice_11 1881 | 76_vol_slice_12 1882 | 76_vol_slice_13 1883 | 188_vol_slice_5 1884 | 188_vol_slice_6 1885 | 188_vol_slice_7 1886 | 188_vol_slice_8 1887 | 188_vol_slice_9 1888 | 188_vol_slice_10 1889 | 188_vol_slice_11 1890 | 188_vol_slice_12 1891 | 188_vol_slice_13 1892 | 188_vol_slice_14 1893 | 182_vol_slice_3 1894 | 182_vol_slice_4 1895 | 182_vol_slice_5 1896 | 182_vol_slice_6 1897 | 182_vol_slice_7 1898 | 182_vol_slice_8 1899 | 182_vol_slice_9 1900 | 182_vol_slice_10 1901 | 182_vol_slice_11 1902 | 182_vol_slice_12 1903 | 182_vol_slice_13 1904 | 111_vol_slice_4 1905 | 111_vol_slice_5 1906 | 111_vol_slice_6 1907 | 111_vol_slice_7 1908 | 111_vol_slice_8 1909 | 111_vol_slice_9 1910 | 111_vol_slice_10 1911 | 111_vol_slice_11 1912 | 111_vol_slice_12 1913 | 111_vol_slice_13 1914 | 111_vol_slice_14 1915 | 176_vol_slice_6 1916 | 176_vol_slice_7 1917 | 176_vol_slice_8 1918 | 176_vol_slice_9 1919 | 176_vol_slice_10 1920 | 176_vol_slice_11 1921 | 176_vol_slice_12 1922 | 176_vol_slice_13 1923 | 176_vol_slice_14 1924 | 176_vol_slice_15 1925 | 176_vol_slice_16 1926 | 176_vol_slice_17 1927 | 33_vol_slice_3 1928 | 33_vol_slice_4 1929 | 33_vol_slice_5 1930 | 33_vol_slice_6 1931 | 33_vol_slice_7 1932 | 33_vol_slice_8 1933 | 33_vol_slice_9 1934 | 33_vol_slice_10 1935 | 33_vol_slice_11 1936 | 33_vol_slice_12 1937 | 33_vol_slice_13 1938 | 33_vol_slice_14 1939 | 33_vol_slice_15 1940 | 161_vol_slice_2 1941 | 161_vol_slice_3 1942 | 161_vol_slice_4 1943 | 161_vol_slice_5 1944 | 161_vol_slice_6 1945 | 161_vol_slice_7 1946 | 161_vol_slice_8 1947 | 161_vol_slice_9 1948 | 161_vol_slice_10 1949 | 161_vol_slice_11 1950 | 161_vol_slice_12 1951 | 161_vol_slice_13 1952 | 161_vol_slice_14 1953 | 161_vol_slice_15 1954 | 161_vol_slice_16 1955 | 161_vol_slice_17 1956 | 141_vol_slice_3 1957 | 141_vol_slice_4 1958 | 141_vol_slice_5 1959 | 141_vol_slice_6 1960 | 141_vol_slice_7 1961 | 141_vol_slice_8 1962 | 141_vol_slice_9 1963 | 141_vol_slice_10 1964 | 141_vol_slice_11 1965 | 141_vol_slice_12 1966 | 141_vol_slice_13 1967 | 141_vol_slice_14 1968 | 141_vol_slice_15 1969 | 141_vol_slice_16 1970 | 156_vol_slice_3 1971 | 156_vol_slice_4 1972 | 156_vol_slice_5 1973 | 156_vol_slice_6 1974 | 156_vol_slice_7 1975 | 156_vol_slice_8 1976 | 156_vol_slice_9 1977 | 156_vol_slice_10 1978 | 156_vol_slice_11 1979 | 156_vol_slice_12 1980 | 156_vol_slice_13 1981 | 156_vol_slice_14 1982 | 156_vol_slice_15 1983 | 156_vol_slice_16 1984 | 46_vol_slice_6 1985 | 46_vol_slice_7 1986 | 46_vol_slice_8 1987 | 46_vol_slice_9 1988 | 46_vol_slice_10 1989 | 46_vol_slice_11 1990 | 46_vol_slice_12 1991 | 46_vol_slice_13 1992 | 46_vol_slice_14 1993 | 46_vol_slice_15 1994 | 80_vol_slice_3 1995 | 80_vol_slice_4 1996 | 80_vol_slice_5 1997 | 80_vol_slice_6 1998 | 80_vol_slice_7 1999 | 80_vol_slice_8 2000 | 80_vol_slice_9 2001 | 80_vol_slice_10 2002 | 80_vol_slice_11 2003 | 80_vol_slice_12 2004 | 80_vol_slice_13 2005 | 80_vol_slice_14 2006 | 80_vol_slice_15 2007 | 80_vol_slice_16 2008 | 80_vol_slice_17 2009 | 80_vol_slice_18 2010 | 96_vol_slice_5 2011 | 96_vol_slice_6 2012 | 96_vol_slice_7 2013 | 96_vol_slice_8 2014 | 96_vol_slice_9 2015 | 96_vol_slice_10 2016 | 96_vol_slice_11 2017 | 96_vol_slice_12 2018 | 96_vol_slice_13 2019 | 96_vol_slice_14 2020 | 96_vol_slice_15 2021 | 104_vol_slice_4 2022 | 104_vol_slice_5 2023 | 104_vol_slice_6 2024 | 104_vol_slice_7 2025 | 104_vol_slice_8 2026 | 104_vol_slice_9 2027 | 104_vol_slice_10 2028 | 104_vol_slice_11 2029 | 104_vol_slice_12 2030 | 104_vol_slice_13 2031 | 104_vol_slice_14 2032 | 104_vol_slice_15 2033 | 104_vol_slice_16 2034 | 190_vol_slice_4 2035 | 190_vol_slice_5 2036 | 190_vol_slice_6 2037 | 190_vol_slice_7 2038 | 190_vol_slice_8 2039 | 190_vol_slice_9 2040 | 190_vol_slice_10 2041 | 190_vol_slice_11 2042 | 190_vol_slice_12 2043 | 190_vol_slice_13 2044 | 190_vol_slice_14 2045 | 190_vol_slice_15 2046 | 110_vol_slice_2 2047 | 110_vol_slice_3 2048 | 110_vol_slice_4 2049 | 110_vol_slice_5 2050 | 110_vol_slice_6 2051 | 110_vol_slice_7 2052 | 110_vol_slice_8 2053 | 110_vol_slice_9 2054 | 110_vol_slice_10 2055 | 110_vol_slice_11 2056 | 110_vol_slice_12 2057 | 110_vol_slice_13 2058 | 110_vol_slice_14 2059 | 29_vol_slice_3 2060 | 29_vol_slice_4 2061 | 29_vol_slice_5 2062 | 29_vol_slice_6 2063 | 29_vol_slice_7 2064 | 29_vol_slice_8 2065 | 29_vol_slice_9 2066 | 29_vol_slice_10 2067 | 29_vol_slice_11 2068 | 29_vol_slice_12 2069 | 29_vol_slice_13 2070 | 29_vol_slice_14 2071 | 29_vol_slice_15 2072 | 196_vol_slice_4 2073 | 196_vol_slice_5 2074 | 196_vol_slice_6 2075 | 196_vol_slice_7 2076 | 196_vol_slice_8 2077 | 196_vol_slice_9 2078 | 196_vol_slice_10 2079 | 196_vol_slice_11 2080 | 196_vol_slice_12 2081 | 196_vol_slice_13 2082 | 196_vol_slice_14 2083 | 184_vol_slice_8 2084 | 184_vol_slice_9 2085 | 184_vol_slice_10 2086 | 184_vol_slice_11 2087 | 184_vol_slice_12 2088 | 184_vol_slice_13 2089 | 184_vol_slice_14 2090 | 184_vol_slice_15 2091 | 31_vol_slice_9 2092 | 31_vol_slice_10 2093 | 31_vol_slice_11 2094 | 31_vol_slice_12 2095 | 31_vol_slice_13 2096 | 31_vol_slice_14 2097 | 31_vol_slice_15 2098 | 31_vol_slice_16 2099 | 31_vol_slice_17 2100 | 31_vol_slice_18 2101 | 20_vol_slice_4 2102 | 20_vol_slice_5 2103 | 20_vol_slice_6 2104 | 20_vol_slice_7 2105 | 20_vol_slice_8 2106 | 20_vol_slice_9 2107 | 20_vol_slice_10 2108 | 20_vol_slice_11 2109 | 20_vol_slice_12 2110 | 20_vol_slice_13 2111 | 20_vol_slice_14 2112 | 146_vol_slice_0 2113 | 146_vol_slice_1 2114 | 146_vol_slice_2 2115 | 146_vol_slice_3 2116 | 146_vol_slice_4 2117 | 146_vol_slice_5 2118 | 146_vol_slice_6 2119 | 146_vol_slice_7 2120 | 146_vol_slice_8 2121 | 146_vol_slice_9 2122 | 146_vol_slice_10 2123 | 146_vol_slice_11 2124 | 146_vol_slice_12 2125 | 146_vol_slice_13 2126 | 146_vol_slice_14 2127 | 146_vol_slice_15 2128 | 150_vol_slice_3 2129 | 150_vol_slice_4 2130 | 150_vol_slice_5 2131 | 150_vol_slice_6 2132 | 150_vol_slice_7 2133 | 150_vol_slice_8 2134 | 150_vol_slice_9 2135 | 150_vol_slice_10 2136 | 150_vol_slice_11 2137 | 150_vol_slice_12 2138 | 177_vol_slice_3 2139 | 177_vol_slice_4 2140 | 177_vol_slice_5 2141 | 177_vol_slice_6 2142 | 177_vol_slice_7 2143 | 177_vol_slice_8 2144 | 177_vol_slice_9 2145 | 177_vol_slice_10 2146 | 177_vol_slice_11 2147 | 177_vol_slice_12 2148 | 59_vol_slice_5 2149 | 59_vol_slice_6 2150 | 59_vol_slice_7 2151 | 59_vol_slice_8 2152 | 59_vol_slice_9 2153 | 59_vol_slice_10 2154 | 59_vol_slice_11 2155 | 59_vol_slice_12 2156 | 59_vol_slice_13 2157 | 59_vol_slice_14 2158 | 59_vol_slice_15 2159 | 59_vol_slice_16 2160 | 112_vol_slice_3 2161 | 112_vol_slice_4 2162 | 112_vol_slice_5 2163 | 112_vol_slice_6 2164 | 112_vol_slice_7 2165 | 112_vol_slice_8 2166 | 112_vol_slice_9 2167 | 112_vol_slice_10 2168 | 112_vol_slice_11 2169 | 112_vol_slice_12 2170 | 112_vol_slice_13 2171 | 112_vol_slice_14 2172 | 112_vol_slice_15 2173 | 15_vol_slice_5 2174 | 15_vol_slice_6 2175 | 15_vol_slice_7 2176 | 15_vol_slice_8 2177 | 15_vol_slice_9 2178 | 15_vol_slice_10 2179 | 15_vol_slice_11 2180 | 15_vol_slice_12 2181 | 15_vol_slice_13 2182 | 15_vol_slice_14 2183 | 15_vol_slice_15 2184 | 4_vol_slice_3 2185 | 4_vol_slice_4 2186 | 4_vol_slice_5 2187 | 4_vol_slice_6 2188 | 4_vol_slice_7 2189 | 4_vol_slice_8 2190 | 4_vol_slice_9 2191 | 4_vol_slice_10 2192 | 4_vol_slice_11 2193 | 4_vol_slice_12 2194 | 4_vol_slice_13 2195 | 4_vol_slice_14 2196 | 4_vol_slice_15 2197 | 198_vol_slice_5 2198 | 198_vol_slice_6 2199 | 198_vol_slice_7 2200 | 198_vol_slice_8 2201 | 198_vol_slice_9 2202 | 198_vol_slice_10 2203 | 198_vol_slice_11 2204 | 198_vol_slice_12 2205 | 198_vol_slice_13 2206 | 198_vol_slice_14 2207 | 198_vol_slice_15 2208 | 198_vol_slice_16 2209 | 129_vol_slice_4 2210 | 129_vol_slice_5 2211 | 129_vol_slice_6 2212 | 129_vol_slice_7 2213 | 129_vol_slice_8 2214 | 129_vol_slice_9 2215 | 129_vol_slice_10 2216 | 129_vol_slice_11 2217 | 129_vol_slice_12 2218 | 129_vol_slice_13 2219 | 15_vol_slice_6 2220 | 150_vol_slice_8 2221 | 120_vol_slice_14 2222 | 130_vol_slice_2 2223 | 130_vol_slice_3 2224 | 130_vol_slice_4 2225 | 130_vol_slice_5 2226 | 130_vol_slice_6 2227 | 130_vol_slice_7 2228 | 130_vol_slice_8 2229 | 130_vol_slice_9 2230 | 130_vol_slice_10 2231 | 130_vol_slice_11 2232 | 130_vol_slice_12 2233 | 130_vol_slice_13 2234 | 130_vol_slice_14 2235 | 130_vol_slice_15 2236 | 130_vol_slice_16 2237 | 130_vol_slice_17 2238 | 130_vol_slice_18 2239 | 130_vol_slice_19 2240 | 130_vol_slice_20 2241 | 125_vol_slice_4 2242 | 125_vol_slice_5 2243 | 125_vol_slice_6 2244 | 125_vol_slice_7 2245 | 125_vol_slice_8 2246 | 125_vol_slice_9 2247 | 125_vol_slice_10 2248 | 125_vol_slice_11 2249 | 125_vol_slice_12 2250 | 125_vol_slice_13 2251 | 125_vol_slice_14 2252 | 125_vol_slice_15 2253 | 125_vol_slice_16 2254 | 125_vol_slice_17 2255 | 125_vol_slice_18 2256 | 125_vol_slice_19 2257 | 125_vol_slice_20 2258 | 12_vol_slice_1 2259 | 12_vol_slice_2 2260 | 12_vol_slice_3 2261 | 12_vol_slice_4 2262 | 12_vol_slice_5 2263 | 12_vol_slice_6 2264 | 12_vol_slice_7 2265 | 12_vol_slice_8 2266 | 12_vol_slice_9 2267 | 12_vol_slice_10 2268 | 12_vol_slice_11 2269 | 12_vol_slice_12 2270 | 12_vol_slice_13 2271 | 12_vol_slice_14 2272 | 65_vol_slice_5 2273 | 65_vol_slice_6 2274 | 65_vol_slice_7 2275 | 65_vol_slice_8 2276 | 65_vol_slice_9 2277 | 65_vol_slice_10 2278 | 65_vol_slice_11 2279 | 65_vol_slice_12 2280 | 65_vol_slice_13 2281 | 65_vol_slice_14 2282 | 65_vol_slice_15 2283 | 65_vol_slice_16 2284 | 136_vol_slice_2 2285 | 136_vol_slice_3 2286 | 136_vol_slice_4 2287 | 136_vol_slice_5 2288 | 136_vol_slice_6 2289 | 136_vol_slice_7 2290 | 136_vol_slice_8 2291 | 136_vol_slice_9 2292 | 136_vol_slice_10 2293 | 136_vol_slice_11 2294 | 136_vol_slice_12 2295 | 136_vol_slice_13 2296 | 136_vol_slice_14 2297 | 136_vol_slice_15 2298 | 136_vol_slice_16 2299 | 136_vol_slice_17 2300 | 136_vol_slice_18 2301 | 117_vol_slice_6 2302 | 117_vol_slice_7 2303 | 117_vol_slice_8 2304 | 117_vol_slice_9 2305 | 117_vol_slice_10 2306 | 117_vol_slice_11 2307 | 117_vol_slice_12 2308 | 117_vol_slice_13 2309 | 117_vol_slice_14 2310 | 117_vol_slice_15 2311 | 117_vol_slice_16 2312 | 117_vol_slice_17 2313 | 134_vol_slice_3 2314 | 134_vol_slice_4 2315 | 134_vol_slice_5 2316 | 134_vol_slice_6 2317 | 134_vol_slice_7 2318 | 134_vol_slice_8 2319 | 134_vol_slice_9 2320 | 134_vol_slice_10 2321 | 134_vol_slice_11 2322 | 134_vol_slice_12 2323 | 134_vol_slice_13 2324 | 134_vol_slice_14 2325 | 134_vol_slice_15 2326 | 134_vol_slice_16 2327 | 134_vol_slice_17 2328 | 134_vol_slice_18 2329 | 134_vol_slice_19 2330 | 134_vol_slice_20 2331 | 193_vol_slice_4 2332 | 193_vol_slice_5 2333 | 193_vol_slice_6 2334 | 193_vol_slice_7 2335 | 193_vol_slice_8 2336 | 193_vol_slice_9 2337 | 193_vol_slice_10 2338 | 193_vol_slice_11 2339 | 193_vol_slice_12 2340 | 193_vol_slice_13 2341 | 193_vol_slice_14 2342 | 193_vol_slice_15 2343 | 193_vol_slice_16 2344 | 9_vol_slice_0 2345 | 9_vol_slice_1 2346 | 9_vol_slice_2 2347 | 9_vol_slice_3 2348 | 9_vol_slice_4 2349 | 9_vol_slice_5 2350 | 9_vol_slice_6 2351 | 9_vol_slice_7 2352 | 9_vol_slice_8 2353 | 9_vol_slice_9 2354 | 9_vol_slice_10 2355 | 9_vol_slice_11 2356 | 9_vol_slice_12 2357 | 9_vol_slice_13 2358 | 9_vol_slice_14 2359 | 9_vol_slice_15 2360 | 9_vol_slice_16 2361 | 9_vol_slice_17 2362 | 9_vol_slice_18 2363 | 9_vol_slice_19 2364 | 107_vol_slice_0 2365 | 107_vol_slice_1 2366 | 107_vol_slice_2 2367 | 107_vol_slice_3 2368 | 107_vol_slice_4 2369 | 107_vol_slice_5 2370 | 107_vol_slice_6 2371 | 107_vol_slice_7 2372 | 107_vol_slice_8 2373 | 107_vol_slice_9 2374 | 107_vol_slice_10 2375 | 107_vol_slice_11 2376 | 107_vol_slice_12 2377 | 107_vol_slice_13 2378 | 107_vol_slice_14 2379 | 107_vol_slice_15 2380 | 107_vol_slice_16 2381 | 107_vol_slice_17 2382 | 107_vol_slice_18 2383 | 107_vol_slice_19 2384 | 107_vol_slice_20 2385 | 107_vol_slice_21 2386 | 54_vol_slice_3 2387 | 54_vol_slice_4 2388 | 54_vol_slice_5 2389 | 54_vol_slice_6 2390 | 54_vol_slice_7 2391 | 54_vol_slice_8 2392 | 54_vol_slice_9 2393 | 54_vol_slice_10 2394 | 54_vol_slice_11 2395 | 54_vol_slice_12 2396 | 54_vol_slice_13 2397 | 54_vol_slice_14 2398 | 54_vol_slice_15 2399 | 54_vol_slice_16 2400 | 54_vol_slice_17 2401 | 54_vol_slice_18 2402 | 54_vol_slice_19 2403 | 39_vol_slice_2 2404 | 39_vol_slice_3 2405 | 39_vol_slice_4 2406 | 39_vol_slice_5 2407 | 39_vol_slice_6 2408 | 39_vol_slice_7 2409 | 39_vol_slice_8 2410 | 39_vol_slice_9 2411 | 39_vol_slice_10 2412 | 39_vol_slice_11 2413 | 39_vol_slice_12 2414 | 39_vol_slice_13 2415 | 39_vol_slice_14 2416 | 39_vol_slice_15 2417 | 39_vol_slice_16 2418 | 39_vol_slice_17 2419 | 39_vol_slice_18 2420 | 39_vol_slice_19 2421 | 39_vol_slice_20 2422 | 39_vol_slice_21 2423 | 44_vol_slice_1 2424 | 44_vol_slice_2 2425 | 44_vol_slice_3 2426 | 44_vol_slice_4 2427 | 44_vol_slice_5 2428 | 44_vol_slice_6 2429 | 44_vol_slice_7 2430 | 44_vol_slice_8 2431 | 44_vol_slice_9 2432 | 44_vol_slice_10 2433 | 44_vol_slice_11 2434 | 44_vol_slice_12 2435 | 44_vol_slice_13 2436 | 44_vol_slice_14 2437 | 44_vol_slice_15 2438 | 44_vol_slice_16 2439 | 44_vol_slice_17 2440 | 44_vol_slice_18 2441 | 70_vol_slice_1 2442 | 70_vol_slice_2 2443 | 70_vol_slice_3 2444 | 70_vol_slice_4 2445 | 70_vol_slice_5 2446 | 70_vol_slice_6 2447 | 70_vol_slice_7 2448 | 70_vol_slice_8 2449 | 70_vol_slice_9 2450 | 70_vol_slice_10 2451 | 70_vol_slice_11 2452 | 70_vol_slice_12 2453 | 70_vol_slice_13 2454 | 70_vol_slice_14 2455 | 70_vol_slice_15 2456 | 70_vol_slice_16 2457 | 70_vol_slice_17 2458 | 70_vol_slice_18 2459 | 70_vol_slice_19 2460 | 72_vol_slice_0 2461 | 72_vol_slice_1 2462 | 72_vol_slice_2 2463 | 72_vol_slice_3 2464 | 72_vol_slice_4 2465 | 72_vol_slice_5 2466 | 72_vol_slice_6 2467 | 72_vol_slice_7 2468 | 72_vol_slice_8 2469 | 72_vol_slice_9 2470 | 72_vol_slice_10 2471 | 72_vol_slice_11 2472 | 72_vol_slice_12 2473 | 72_vol_slice_13 2474 | 72_vol_slice_14 2475 | 72_vol_slice_15 2476 | 72_vol_slice_16 2477 | 72_vol_slice_17 2478 | 168_vol_slice_3 2479 | 168_vol_slice_4 2480 | 168_vol_slice_5 2481 | 168_vol_slice_6 2482 | 168_vol_slice_7 2483 | 168_vol_slice_8 2484 | 168_vol_slice_9 2485 | 168_vol_slice_10 2486 | 168_vol_slice_11 2487 | 168_vol_slice_12 2488 | 168_vol_slice_13 2489 | 168_vol_slice_14 2490 | 168_vol_slice_15 2491 | 168_vol_slice_16 2492 | 56_vol_slice_2 2493 | 56_vol_slice_3 2494 | 56_vol_slice_4 2495 | 56_vol_slice_5 2496 | 56_vol_slice_6 2497 | 56_vol_slice_7 2498 | 56_vol_slice_8 2499 | 56_vol_slice_9 2500 | 56_vol_slice_10 2501 | 56_vol_slice_11 2502 | 56_vol_slice_12 2503 | 56_vol_slice_13 2504 | 56_vol_slice_14 2505 | 56_vol_slice_15 2506 | 56_vol_slice_16 2507 | 56_vol_slice_17 2508 | 56_vol_slice_18 2509 | 142_vol_slice_2 2510 | 142_vol_slice_3 2511 | 142_vol_slice_4 2512 | 142_vol_slice_5 2513 | 142_vol_slice_6 2514 | 142_vol_slice_7 2515 | 142_vol_slice_8 2516 | 142_vol_slice_9 2517 | 142_vol_slice_10 2518 | 142_vol_slice_11 2519 | 142_vol_slice_12 2520 | 142_vol_slice_13 2521 | 142_vol_slice_14 2522 | 142_vol_slice_15 2523 | 142_vol_slice_16 2524 | 142_vol_slice_17 2525 | 142_vol_slice_18 2526 | 142_vol_slice_19 2527 | 94_vol_slice_4 2528 | 94_vol_slice_5 2529 | 94_vol_slice_6 2530 | 94_vol_slice_7 2531 | 94_vol_slice_8 2532 | 94_vol_slice_9 2533 | 94_vol_slice_10 2534 | 94_vol_slice_11 2535 | 94_vol_slice_12 2536 | 94_vol_slice_13 2537 | 94_vol_slice_14 2538 | 94_vol_slice_15 2539 | 7_vol_slice_4 2540 | 7_vol_slice_5 2541 | 7_vol_slice_6 2542 | 7_vol_slice_7 2543 | 7_vol_slice_8 2544 | 7_vol_slice_9 2545 | 7_vol_slice_10 2546 | 7_vol_slice_11 2547 | 7_vol_slice_12 2548 | 7_vol_slice_13 2549 | 7_vol_slice_14 2550 | 7_vol_slice_15 2551 | 7_vol_slice_16 2552 | 7_vol_slice_17 2553 | 7_vol_slice_18 2554 | 7_vol_slice_19 2555 | 7_vol_slice_20 2556 | 7_vol_slice_21 2557 | 144_vol_slice_3 2558 | 144_vol_slice_4 2559 | 144_vol_slice_5 2560 | 144_vol_slice_6 2561 | 144_vol_slice_7 2562 | 144_vol_slice_8 2563 | 144_vol_slice_9 2564 | 144_vol_slice_10 2565 | 144_vol_slice_11 2566 | 144_vol_slice_12 2567 | 144_vol_slice_13 2568 | 144_vol_slice_14 2569 | 144_vol_slice_15 2570 | 144_vol_slice_16 2571 | 144_vol_slice_17 2572 | 144_vol_slice_18 2573 | 144_vol_slice_19 2574 | 118_vol_slice_5 2575 | 118_vol_slice_6 2576 | 118_vol_slice_7 2577 | 118_vol_slice_8 2578 | 118_vol_slice_9 2579 | 118_vol_slice_10 2580 | 118_vol_slice_11 2581 | 118_vol_slice_12 2582 | 118_vol_slice_13 2583 | 118_vol_slice_14 2584 | 118_vol_slice_15 2585 | 118_vol_slice_16 2586 | 118_vol_slice_17 2587 | 118_vol_slice_18 2588 | 118_vol_slice_19 2589 | 183_vol_slice_0 2590 | 183_vol_slice_1 2591 | 183_vol_slice_2 2592 | 183_vol_slice_3 2593 | 183_vol_slice_4 2594 | 183_vol_slice_5 2595 | 183_vol_slice_6 2596 | 183_vol_slice_7 2597 | 183_vol_slice_8 2598 | 183_vol_slice_9 2599 | 183_vol_slice_10 2600 | 183_vol_slice_11 2601 | 183_vol_slice_12 2602 | 183_vol_slice_13 2603 | 183_vol_slice_14 2604 | 183_vol_slice_15 2605 | 183_vol_slice_16 2606 | 183_vol_slice_17 2607 | 121_vol_slice_0 2608 | 121_vol_slice_1 2609 | 121_vol_slice_2 2610 | 121_vol_slice_3 2611 | 121_vol_slice_4 2612 | 121_vol_slice_5 2613 | 121_vol_slice_6 2614 | 121_vol_slice_7 2615 | 121_vol_slice_8 2616 | 121_vol_slice_9 2617 | 121_vol_slice_10 2618 | 121_vol_slice_11 2619 | 121_vol_slice_12 2620 | 121_vol_slice_13 2621 | 121_vol_slice_14 2622 | 121_vol_slice_15 2623 | 121_vol_slice_16 2624 | 121_vol_slice_17 2625 | 121_vol_slice_18 2626 | 121_vol_slice_19 2627 | 26_vol_slice_2 2628 | 26_vol_slice_3 2629 | 26_vol_slice_4 2630 | 26_vol_slice_5 2631 | 26_vol_slice_6 2632 | 26_vol_slice_7 2633 | 26_vol_slice_8 2634 | 26_vol_slice_9 2635 | 26_vol_slice_10 2636 | 26_vol_slice_11 2637 | 26_vol_slice_12 2638 | 26_vol_slice_13 2639 | 60_vol_slice_0 2640 | 60_vol_slice_1 2641 | 60_vol_slice_2 2642 | 60_vol_slice_3 2643 | 60_vol_slice_4 2644 | 60_vol_slice_5 2645 | 60_vol_slice_6 2646 | 60_vol_slice_7 2647 | 60_vol_slice_8 2648 | 60_vol_slice_9 2649 | 60_vol_slice_10 2650 | 60_vol_slice_11 2651 | 60_vol_slice_12 2652 | 60_vol_slice_13 2653 | 60_vol_slice_14 2654 | 60_vol_slice_15 2655 | 60_vol_slice_16 2656 | 60_vol_slice_17 2657 | 60_vol_slice_18 2658 | 60_vol_slice_19 2659 | 60_vol_slice_20 2660 | 60_vol_slice_21 2661 | 180_vol_slice_3 2662 | 180_vol_slice_4 2663 | 180_vol_slice_5 2664 | 180_vol_slice_6 2665 | 180_vol_slice_7 2666 | 180_vol_slice_8 2667 | 180_vol_slice_9 2668 | 180_vol_slice_10 2669 | 180_vol_slice_11 2670 | 180_vol_slice_12 2671 | 180_vol_slice_13 2672 | 180_vol_slice_14 2673 | 180_vol_slice_15 2674 | 180_vol_slice_16 2675 | 180_vol_slice_17 2676 | 154_vol_slice_1 2677 | 154_vol_slice_2 2678 | 154_vol_slice_3 2679 | 154_vol_slice_4 2680 | 154_vol_slice_5 2681 | 154_vol_slice_6 2682 | 154_vol_slice_7 2683 | 154_vol_slice_8 2684 | 154_vol_slice_9 2685 | 154_vol_slice_10 2686 | 154_vol_slice_11 2687 | 154_vol_slice_12 2688 | 154_vol_slice_13 2689 | 154_vol_slice_14 2690 | 154_vol_slice_15 2691 | 154_vol_slice_16 2692 | 154_vol_slice_17 2693 | 154_vol_slice_18 2694 | 90_vol_slice_2 2695 | 90_vol_slice_3 2696 | 90_vol_slice_4 2697 | 90_vol_slice_5 2698 | 90_vol_slice_6 2699 | 90_vol_slice_7 2700 | 90_vol_slice_8 2701 | 90_vol_slice_9 2702 | 90_vol_slice_10 2703 | 90_vol_slice_11 2704 | 90_vol_slice_12 2705 | 90_vol_slice_13 2706 | 90_vol_slice_14 2707 | 90_vol_slice_15 2708 | 90_vol_slice_16 2709 | 166_vol_slice_0 2710 | 166_vol_slice_1 2711 | 166_vol_slice_2 2712 | 166_vol_slice_3 2713 | 166_vol_slice_4 2714 | 166_vol_slice_5 2715 | 166_vol_slice_6 2716 | 166_vol_slice_7 2717 | 166_vol_slice_8 2718 | 166_vol_slice_9 2719 | 166_vol_slice_10 2720 | 166_vol_slice_11 2721 | 166_vol_slice_12 2722 | 166_vol_slice_13 2723 | 166_vol_slice_14 2724 | 166_vol_slice_15 2725 | 166_vol_slice_16 2726 | 102_vol_slice_2 2727 | 102_vol_slice_3 2728 | 102_vol_slice_4 2729 | 102_vol_slice_5 2730 | 102_vol_slice_6 2731 | 102_vol_slice_7 2732 | 102_vol_slice_8 2733 | 102_vol_slice_9 2734 | 102_vol_slice_10 2735 | 102_vol_slice_11 2736 | 102_vol_slice_12 2737 | 102_vol_slice_13 2738 | 102_vol_slice_14 2739 | 102_vol_slice_15 2740 | 102_vol_slice_16 2741 | 102_vol_slice_17 2742 | 102_vol_slice_18 2743 | 102_vol_slice_19 2744 | 83_vol_slice_4 2745 | 83_vol_slice_5 2746 | 83_vol_slice_6 2747 | 83_vol_slice_7 2748 | 83_vol_slice_8 2749 | 83_vol_slice_9 2750 | 83_vol_slice_10 2751 | 83_vol_slice_11 2752 | 83_vol_slice_12 2753 | 83_vol_slice_13 2754 | 83_vol_slice_14 2755 | 83_vol_slice_15 2756 | 83_vol_slice_16 2757 | 120_vol_slice_1 2758 | 120_vol_slice_2 2759 | 120_vol_slice_3 2760 | 120_vol_slice_4 2761 | 120_vol_slice_5 2762 | 120_vol_slice_6 2763 | 120_vol_slice_7 2764 | 120_vol_slice_8 2765 | 120_vol_slice_9 2766 | 120_vol_slice_10 2767 | 120_vol_slice_11 2768 | 120_vol_slice_12 2769 | 120_vol_slice_13 2770 | 120_vol_slice_14 2771 | 120_vol_slice_15 2772 | 120_vol_slice_16 2773 | 120_vol_slice_17 2774 | 120_vol_slice_18 2775 | 120_vol_slice_19 2776 | 120_vol_slice_20 2777 | 120_vol_slice_21 2778 | 120_vol_slice_22 2779 | 120_vol_slice_23 2780 | 84_vol_slice_1 2781 | 84_vol_slice_2 2782 | 84_vol_slice_3 2783 | 84_vol_slice_4 2784 | 84_vol_slice_5 2785 | 84_vol_slice_6 2786 | 84_vol_slice_7 2787 | 84_vol_slice_8 2788 | 84_vol_slice_9 2789 | 84_vol_slice_10 2790 | 84_vol_slice_11 2791 | 84_vol_slice_12 2792 | 84_vol_slice_13 2793 | 84_vol_slice_14 2794 | 84_vol_slice_15 2795 | 84_vol_slice_16 2796 | 89_vol_slice_3 2797 | 89_vol_slice_4 2798 | 89_vol_slice_5 2799 | 89_vol_slice_6 2800 | 89_vol_slice_7 2801 | 89_vol_slice_8 2802 | 89_vol_slice_9 2803 | 89_vol_slice_10 2804 | 89_vol_slice_11 2805 | 89_vol_slice_12 2806 | 89_vol_slice_13 2807 | 89_vol_slice_14 2808 | 89_vol_slice_15 2809 | 89_vol_slice_16 2810 | 89_vol_slice_17 2811 | 89_vol_slice_18 2812 | 89_vol_slice_19 2813 | 89_vol_slice_20 2814 | 89_vol_slice_21 2815 | 69_vol_slice_2 2816 | 69_vol_slice_3 2817 | 69_vol_slice_4 2818 | 69_vol_slice_5 2819 | 69_vol_slice_6 2820 | 69_vol_slice_7 2821 | 69_vol_slice_8 2822 | 69_vol_slice_9 2823 | 69_vol_slice_10 2824 | 69_vol_slice_11 2825 | 69_vol_slice_12 2826 | 69_vol_slice_13 2827 | 69_vol_slice_14 2828 | 162_vol_slice_2 2829 | 162_vol_slice_3 2830 | 162_vol_slice_4 2831 | 162_vol_slice_5 2832 | 162_vol_slice_6 2833 | 162_vol_slice_7 2834 | 162_vol_slice_8 2835 | 162_vol_slice_9 2836 | 162_vol_slice_10 2837 | 162_vol_slice_11 2838 | 162_vol_slice_12 2839 | 162_vol_slice_13 2840 | 162_vol_slice_14 2841 | 162_vol_slice_15 2842 | 162_vol_slice_16 2843 | 162_vol_slice_17 2844 | 66_vol_slice_2 2845 | 66_vol_slice_3 2846 | 66_vol_slice_4 2847 | 66_vol_slice_5 2848 | 66_vol_slice_6 2849 | 66_vol_slice_7 2850 | 66_vol_slice_8 2851 | 66_vol_slice_9 2852 | 66_vol_slice_10 2853 | 66_vol_slice_11 2854 | 66_vol_slice_12 2855 | 66_vol_slice_13 2856 | 66_vol_slice_14 2857 | 66_vol_slice_15 2858 | 66_vol_slice_16 2859 | 66_vol_slice_17 2860 | 66_vol_slice_18 2861 | 170_vol_slice_2 2862 | 170_vol_slice_3 2863 | 170_vol_slice_4 2864 | 170_vol_slice_5 2865 | 170_vol_slice_6 2866 | 170_vol_slice_7 2867 | 170_vol_slice_8 2868 | 170_vol_slice_9 2869 | 170_vol_slice_10 2870 | 170_vol_slice_11 2871 | 170_vol_slice_12 2872 | 170_vol_slice_13 2873 | 170_vol_slice_14 2874 | 170_vol_slice_15 2875 | 170_vol_slice_16 2876 | 170_vol_slice_17 2877 | 76_vol_slice_2 2878 | 76_vol_slice_3 2879 | 76_vol_slice_4 2880 | 76_vol_slice_5 2881 | 76_vol_slice_6 2882 | 76_vol_slice_7 2883 | 76_vol_slice_8 2884 | 76_vol_slice_9 2885 | 76_vol_slice_10 2886 | 76_vol_slice_11 2887 | 76_vol_slice_12 2888 | 76_vol_slice_13 2889 | 76_vol_slice_14 2890 | 188_vol_slice_2 2891 | 188_vol_slice_3 2892 | 188_vol_slice_4 2893 | 188_vol_slice_5 2894 | 188_vol_slice_6 2895 | 188_vol_slice_7 2896 | 188_vol_slice_8 2897 | 188_vol_slice_9 2898 | 188_vol_slice_10 2899 | 188_vol_slice_11 2900 | 188_vol_slice_12 2901 | 188_vol_slice_13 2902 | 188_vol_slice_14 2903 | 188_vol_slice_15 2904 | 188_vol_slice_16 2905 | 188_vol_slice_17 2906 | 188_vol_slice_18 2907 | 188_vol_slice_19 2908 | 188_vol_slice_20 2909 | 182_vol_slice_0 2910 | 182_vol_slice_1 2911 | 182_vol_slice_2 2912 | 182_vol_slice_3 2913 | 182_vol_slice_4 2914 | 182_vol_slice_5 2915 | 182_vol_slice_6 2916 | 182_vol_slice_7 2917 | 182_vol_slice_8 2918 | 182_vol_slice_9 2919 | 182_vol_slice_10 2920 | 182_vol_slice_11 2921 | 182_vol_slice_12 2922 | 182_vol_slice_13 2923 | 182_vol_slice_14 2924 | 182_vol_slice_15 2925 | 182_vol_slice_16 2926 | 182_vol_slice_17 2927 | 182_vol_slice_18 2928 | 182_vol_slice_19 2929 | 111_vol_slice_4 2930 | 111_vol_slice_5 2931 | 111_vol_slice_6 2932 | 111_vol_slice_7 2933 | 111_vol_slice_8 2934 | 111_vol_slice_9 2935 | 111_vol_slice_10 2936 | 111_vol_slice_11 2937 | 111_vol_slice_12 2938 | 111_vol_slice_13 2939 | 111_vol_slice_14 2940 | 111_vol_slice_15 2941 | 176_vol_slice_6 2942 | 176_vol_slice_7 2943 | 176_vol_slice_8 2944 | 176_vol_slice_9 2945 | 176_vol_slice_10 2946 | 176_vol_slice_11 2947 | 176_vol_slice_12 2948 | 176_vol_slice_13 2949 | 176_vol_slice_14 2950 | 176_vol_slice_15 2951 | 176_vol_slice_16 2952 | 176_vol_slice_17 2953 | 176_vol_slice_18 2954 | 33_vol_slice_2 2955 | 33_vol_slice_3 2956 | 33_vol_slice_4 2957 | 33_vol_slice_5 2958 | 33_vol_slice_6 2959 | 33_vol_slice_7 2960 | 33_vol_slice_8 2961 | 33_vol_slice_9 2962 | 33_vol_slice_10 2963 | 33_vol_slice_11 2964 | 33_vol_slice_12 2965 | 33_vol_slice_13 2966 | 33_vol_slice_14 2967 | 33_vol_slice_15 2968 | 33_vol_slice_16 2969 | 161_vol_slice_1 2970 | 161_vol_slice_2 2971 | 161_vol_slice_3 2972 | 161_vol_slice_4 2973 | 161_vol_slice_5 2974 | 161_vol_slice_6 2975 | 161_vol_slice_7 2976 | 161_vol_slice_8 2977 | 161_vol_slice_9 2978 | 161_vol_slice_10 2979 | 161_vol_slice_11 2980 | 161_vol_slice_12 2981 | 161_vol_slice_13 2982 | 161_vol_slice_14 2983 | 161_vol_slice_15 2984 | 161_vol_slice_16 2985 | 161_vol_slice_17 2986 | 161_vol_slice_18 2987 | 161_vol_slice_19 2988 | 161_vol_slice_20 2989 | 141_vol_slice_3 2990 | 141_vol_slice_4 2991 | 141_vol_slice_5 2992 | 141_vol_slice_6 2993 | 141_vol_slice_7 2994 | 141_vol_slice_8 2995 | 141_vol_slice_9 2996 | 141_vol_slice_10 2997 | 141_vol_slice_11 2998 | 141_vol_slice_12 2999 | 141_vol_slice_13 3000 | 141_vol_slice_14 3001 | 141_vol_slice_15 3002 | 141_vol_slice_16 3003 | 141_vol_slice_17 3004 | 141_vol_slice_18 3005 | 156_vol_slice_2 3006 | 156_vol_slice_3 3007 | 156_vol_slice_4 3008 | 156_vol_slice_5 3009 | 156_vol_slice_6 3010 | 156_vol_slice_7 3011 | 156_vol_slice_8 3012 | 156_vol_slice_9 3013 | 156_vol_slice_10 3014 | 156_vol_slice_11 3015 | 156_vol_slice_12 3016 | 156_vol_slice_13 3017 | 156_vol_slice_14 3018 | 156_vol_slice_15 3019 | 156_vol_slice_16 3020 | 156_vol_slice_17 3021 | 156_vol_slice_18 3022 | 156_vol_slice_19 3023 | 46_vol_slice_2 3024 | 46_vol_slice_3 3025 | 46_vol_slice_4 3026 | 46_vol_slice_5 3027 | 46_vol_slice_6 3028 | 46_vol_slice_7 3029 | 46_vol_slice_8 3030 | 46_vol_slice_9 3031 | 46_vol_slice_10 3032 | 46_vol_slice_11 3033 | 46_vol_slice_12 3034 | 46_vol_slice_13 3035 | 46_vol_slice_14 3036 | 46_vol_slice_15 3037 | 80_vol_slice_2 3038 | 80_vol_slice_3 3039 | 80_vol_slice_4 3040 | 80_vol_slice_5 3041 | 80_vol_slice_6 3042 | 80_vol_slice_7 3043 | 80_vol_slice_8 3044 | 80_vol_slice_9 3045 | 80_vol_slice_10 3046 | 80_vol_slice_11 3047 | 80_vol_slice_12 3048 | 80_vol_slice_13 3049 | 80_vol_slice_14 3050 | 80_vol_slice_15 3051 | 80_vol_slice_16 3052 | 80_vol_slice_17 3053 | 80_vol_slice_18 3054 | 80_vol_slice_19 3055 | 80_vol_slice_20 3056 | 80_vol_slice_21 3057 | 80_vol_slice_22 3058 | 96_vol_slice_2 3059 | 96_vol_slice_3 3060 | 96_vol_slice_4 3061 | 96_vol_slice_5 3062 | 96_vol_slice_6 3063 | 96_vol_slice_7 3064 | 96_vol_slice_8 3065 | 96_vol_slice_9 3066 | 96_vol_slice_10 3067 | 96_vol_slice_11 3068 | 96_vol_slice_12 3069 | 96_vol_slice_13 3070 | 96_vol_slice_14 3071 | 96_vol_slice_15 3072 | 96_vol_slice_16 3073 | 96_vol_slice_17 3074 | 96_vol_slice_18 3075 | 104_vol_slice_2 3076 | 104_vol_slice_3 3077 | 104_vol_slice_4 3078 | 104_vol_slice_5 3079 | 104_vol_slice_6 3080 | 104_vol_slice_7 3081 | 104_vol_slice_8 3082 | 104_vol_slice_9 3083 | 104_vol_slice_10 3084 | 104_vol_slice_11 3085 | 104_vol_slice_12 3086 | 104_vol_slice_13 3087 | 104_vol_slice_14 3088 | 104_vol_slice_15 3089 | 104_vol_slice_16 3090 | 104_vol_slice_17 3091 | 190_vol_slice_2 3092 | 190_vol_slice_3 3093 | 190_vol_slice_4 3094 | 190_vol_slice_5 3095 | 190_vol_slice_6 3096 | 190_vol_slice_7 3097 | 190_vol_slice_8 3098 | 190_vol_slice_9 3099 | 190_vol_slice_10 3100 | 190_vol_slice_11 3101 | 190_vol_slice_12 3102 | 190_vol_slice_13 3103 | 190_vol_slice_14 3104 | 190_vol_slice_15 3105 | 110_vol_slice_0 3106 | 110_vol_slice_1 3107 | 110_vol_slice_2 3108 | 110_vol_slice_3 3109 | 110_vol_slice_4 3110 | 110_vol_slice_5 3111 | 110_vol_slice_6 3112 | 110_vol_slice_7 3113 | 110_vol_slice_8 3114 | 110_vol_slice_9 3115 | 110_vol_slice_10 3116 | 110_vol_slice_11 3117 | 110_vol_slice_12 3118 | 110_vol_slice_13 3119 | 110_vol_slice_14 3120 | 110_vol_slice_15 3121 | 29_vol_slice_3 3122 | 29_vol_slice_4 3123 | 29_vol_slice_5 3124 | 29_vol_slice_6 3125 | 29_vol_slice_7 3126 | 29_vol_slice_8 3127 | 29_vol_slice_9 3128 | 29_vol_slice_10 3129 | 29_vol_slice_11 3130 | 29_vol_slice_12 3131 | 29_vol_slice_13 3132 | 29_vol_slice_14 3133 | 29_vol_slice_15 3134 | 29_vol_slice_16 3135 | 29_vol_slice_17 3136 | 29_vol_slice_18 3137 | 29_vol_slice_19 3138 | 29_vol_slice_20 3139 | 196_vol_slice_2 3140 | 196_vol_slice_3 3141 | 196_vol_slice_4 3142 | 196_vol_slice_5 3143 | 196_vol_slice_6 3144 | 196_vol_slice_7 3145 | 196_vol_slice_8 3146 | 196_vol_slice_9 3147 | 196_vol_slice_10 3148 | 196_vol_slice_11 3149 | 196_vol_slice_12 3150 | 196_vol_slice_13 3151 | 196_vol_slice_14 3152 | 196_vol_slice_15 3153 | 196_vol_slice_16 3154 | 196_vol_slice_17 3155 | 196_vol_slice_18 3156 | 196_vol_slice_19 3157 | 196_vol_slice_20 3158 | 184_vol_slice_5 3159 | 184_vol_slice_6 3160 | 184_vol_slice_7 3161 | 184_vol_slice_8 3162 | 184_vol_slice_9 3163 | 184_vol_slice_10 3164 | 184_vol_slice_11 3165 | 184_vol_slice_12 3166 | 184_vol_slice_13 3167 | 184_vol_slice_14 3168 | 184_vol_slice_15 3169 | 31_vol_slice_6 3170 | 31_vol_slice_7 3171 | 31_vol_slice_8 3172 | 31_vol_slice_9 3173 | 31_vol_slice_10 3174 | 31_vol_slice_11 3175 | 31_vol_slice_12 3176 | 31_vol_slice_13 3177 | 31_vol_slice_14 3178 | 31_vol_slice_15 3179 | 31_vol_slice_16 3180 | 31_vol_slice_17 3181 | 31_vol_slice_18 3182 | 20_vol_slice_4 3183 | 20_vol_slice_5 3184 | 20_vol_slice_6 3185 | 20_vol_slice_7 3186 | 20_vol_slice_8 3187 | 20_vol_slice_9 3188 | 20_vol_slice_10 3189 | 20_vol_slice_11 3190 | 20_vol_slice_12 3191 | 20_vol_slice_13 3192 | 20_vol_slice_14 3193 | 20_vol_slice_15 3194 | 20_vol_slice_16 3195 | 20_vol_slice_17 3196 | 20_vol_slice_18 3197 | 20_vol_slice_19 3198 | 146_vol_slice_0 3199 | 146_vol_slice_1 3200 | 146_vol_slice_2 3201 | 146_vol_slice_3 3202 | 146_vol_slice_4 3203 | 146_vol_slice_5 3204 | 146_vol_slice_6 3205 | 146_vol_slice_7 3206 | 146_vol_slice_8 3207 | 146_vol_slice_9 3208 | 146_vol_slice_10 3209 | 146_vol_slice_11 3210 | 146_vol_slice_12 3211 | 146_vol_slice_13 3212 | 146_vol_slice_14 3213 | 146_vol_slice_15 3214 | 146_vol_slice_16 3215 | 146_vol_slice_17 3216 | 146_vol_slice_18 3217 | 146_vol_slice_19 3218 | 150_vol_slice_1 3219 | 150_vol_slice_2 3220 | 150_vol_slice_3 3221 | 150_vol_slice_4 3222 | 150_vol_slice_5 3223 | 150_vol_slice_6 3224 | 150_vol_slice_7 3225 | 150_vol_slice_8 3226 | 150_vol_slice_9 3227 | 150_vol_slice_10 3228 | 150_vol_slice_11 3229 | 150_vol_slice_12 3230 | 150_vol_slice_13 3231 | 150_vol_slice_14 3232 | 150_vol_slice_15 3233 | 150_vol_slice_16 3234 | 177_vol_slice_3 3235 | 177_vol_slice_4 3236 | 177_vol_slice_5 3237 | 177_vol_slice_6 3238 | 177_vol_slice_7 3239 | 177_vol_slice_8 3240 | 177_vol_slice_9 3241 | 177_vol_slice_10 3242 | 177_vol_slice_11 3243 | 177_vol_slice_12 3244 | 177_vol_slice_13 3245 | 177_vol_slice_14 3246 | 59_vol_slice_2 3247 | 59_vol_slice_3 3248 | 59_vol_slice_4 3249 | 59_vol_slice_5 3250 | 59_vol_slice_6 3251 | 59_vol_slice_7 3252 | 59_vol_slice_8 3253 | 59_vol_slice_9 3254 | 59_vol_slice_10 3255 | 59_vol_slice_11 3256 | 59_vol_slice_12 3257 | 59_vol_slice_13 3258 | 59_vol_slice_14 3259 | 59_vol_slice_15 3260 | 59_vol_slice_16 3261 | 59_vol_slice_17 3262 | 112_vol_slice_0 3263 | 112_vol_slice_1 3264 | 112_vol_slice_2 3265 | 112_vol_slice_3 3266 | 112_vol_slice_4 3267 | 112_vol_slice_5 3268 | 112_vol_slice_6 3269 | 112_vol_slice_7 3270 | 112_vol_slice_8 3271 | 112_vol_slice_9 3272 | 112_vol_slice_10 3273 | 112_vol_slice_11 3274 | 112_vol_slice_12 3275 | 112_vol_slice_13 3276 | 112_vol_slice_14 3277 | 112_vol_slice_15 3278 | 112_vol_slice_16 3279 | 112_vol_slice_17 3280 | 15_vol_slice_2 3281 | 15_vol_slice_3 3282 | 15_vol_slice_4 3283 | 15_vol_slice_5 3284 | 15_vol_slice_6 3285 | 15_vol_slice_7 3286 | 15_vol_slice_8 3287 | 15_vol_slice_9 3288 | 15_vol_slice_10 3289 | 15_vol_slice_11 3290 | 15_vol_slice_12 3291 | 15_vol_slice_13 3292 | 15_vol_slice_14 3293 | 15_vol_slice_15 3294 | 4_vol_slice_2 3295 | 4_vol_slice_3 3296 | 4_vol_slice_4 3297 | 4_vol_slice_5 3298 | 4_vol_slice_6 3299 | 4_vol_slice_7 3300 | 4_vol_slice_8 3301 | 4_vol_slice_9 3302 | 4_vol_slice_10 3303 | 4_vol_slice_11 3304 | 4_vol_slice_12 3305 | 4_vol_slice_13 3306 | 4_vol_slice_14 3307 | 4_vol_slice_15 3308 | 4_vol_slice_16 3309 | 4_vol_slice_17 3310 | 198_vol_slice_5 3311 | 198_vol_slice_6 3312 | 198_vol_slice_7 3313 | 198_vol_slice_8 3314 | 198_vol_slice_9 3315 | 198_vol_slice_10 3316 | 198_vol_slice_11 3317 | 198_vol_slice_12 3318 | 198_vol_slice_13 3319 | 198_vol_slice_14 3320 | 198_vol_slice_15 3321 | 198_vol_slice_16 3322 | 129_vol_slice_4 3323 | 129_vol_slice_5 3324 | 129_vol_slice_6 3325 | 129_vol_slice_7 3326 | 129_vol_slice_8 3327 | 129_vol_slice_9 3328 | 129_vol_slice_10 3329 | 129_vol_slice_11 3330 | 129_vol_slice_12 3331 | 129_vol_slice_13 3332 | 129_vol_slice_14 3333 | 129_vol_slice_15 3334 | 15_vol_slice_6 3335 | 150_vol_slice_8 3336 | 120_vol_slice_14 --------------------------------------------------------------------------------