()
79 | );
80 | }
81 |
--------------------------------------------------------------------------------
/models/py_utils/_cpools/top_pool.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/models/py_utils/_cpools/top_pool.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/models/py_utils/_cpools/top_pool.py:
--------------------------------------------------------------------------------
1 | def __bootstrap__():
2 | global __bootstrap__, __loader__, __file__
3 | import sys, pkg_resources, imp
4 | __file__ = pkg_resources.resource_filename(__name__, 'top_pool.cpython-36m-x86_64-linux-gnu.so')
5 | __loader__ = None; del __bootstrap__, __loader__
6 | imp.load_dynamic(__name__,__file__)
7 | __bootstrap__()
8 |
--------------------------------------------------------------------------------
/models/py_utils/data_parallel.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.nn.modules import Module
3 | from torch.nn.parallel.scatter_gather import gather
4 | from torch.nn.parallel.replicate import replicate
5 | from torch.nn.parallel.parallel_apply import parallel_apply
6 |
7 | from .scatter_gather import scatter_kwargs
8 |
9 | class DataParallel(Module):
10 | r"""Implements data parallelism at the module level.
11 |
12 | This container parallelizes the application of the given module by
13 | splitting the input across the specified devices by chunking in the batch
14 | dimension. In the forward pass, the module is replicated on each device,
15 | and each replica handles a portion of the input. During the backwards
16 | pass, gradients from each replica are summed into the original module.
17 |
18 | The batch size should be larger than the number of GPUs used. It should
19 | also be an integer multiple of the number of GPUs so that each chunk is the
20 | same size (so that each GPU processes the same number of samples).
21 |
22 | See also: :ref:`cuda-nn-dataparallel-instead`
23 |
24 | Arbitrary positional and keyword inputs are allowed to be passed into
25 | DataParallel EXCEPT Tensors. All variables will be scattered on dim
26 | specified (default 0). Primitive types will be broadcasted, but all
27 | other types will be a shallow copy and can be corrupted if written to in
28 | the model's forward pass.
29 |
30 | Args:
31 | module: module to be parallelized
32 | device_ids: CUDA devices (default: all devices)
33 | output_device: device location of output (default: device_ids[0])
34 |
35 | Example::
36 |
37 | >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
38 | >>> output = net(input_var)
39 | """
40 |
41 | # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
42 |
43 | def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
44 | super(DataParallel, self).__init__()
45 |
46 | if not torch.cuda.is_available():
47 | self.module = module
48 | self.device_ids = []
49 | return
50 |
51 | if device_ids is None:
52 | device_ids = list(range(torch.cuda.device_count()))
53 | if output_device is None:
54 | output_device = device_ids[0]
55 | self.dim = dim
56 | self.module = module
57 | self.device_ids = device_ids
58 | self.chunk_sizes = chunk_sizes
59 | self.output_device = output_device
60 | if len(self.device_ids) == 1:
61 | self.module.cuda(device_ids[0])
62 |
63 | def forward(self, *inputs, **kwargs):
64 | if not self.device_ids:
65 | return self.module(*inputs, **kwargs)
66 | inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
67 | if len(self.device_ids) == 1:
68 | return self.module(*inputs[0], **kwargs[0])
69 | replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
70 | outputs = self.parallel_apply(replicas, inputs, kwargs)
71 | return self.gather(outputs, self.output_device)
72 |
73 | def replicate(self, module, device_ids):
74 | return replicate(module, device_ids)
75 |
76 | def scatter(self, inputs, kwargs, device_ids, chunk_sizes):
77 | return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes)
78 |
79 | def parallel_apply(self, replicas, inputs, kwargs):
80 | return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
81 |
82 | def gather(self, outputs, output_device):
83 | return gather(outputs, output_device, dim=self.dim)
84 |
85 |
86 | def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
87 | r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
88 |
89 | This is the functional version of the DataParallel module.
90 |
91 | Args:
92 | module: the module to evaluate in parallel
93 | inputs: inputs to the module
94 | device_ids: GPU ids on which to replicate module
95 | output_device: GPU location of the output Use -1 to indicate the CPU.
96 | (default: device_ids[0])
97 | Returns:
98 | a Variable containing the result of module(input) located on
99 | output_device
100 | """
101 | if not isinstance(inputs, tuple):
102 | inputs = (inputs,)
103 |
104 | if device_ids is None:
105 | device_ids = list(range(torch.cuda.device_count()))
106 |
107 | if output_device is None:
108 | output_device = device_ids[0]
109 |
110 | inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
111 | if len(device_ids) == 1:
112 | return module(*inputs[0], **module_kwargs[0])
113 | used_device_ids = device_ids[:len(inputs)]
114 | replicas = replicate(module, used_device_ids)
115 | outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
116 | return gather(outputs, output_device, dim)
117 |
--------------------------------------------------------------------------------
/models/py_utils/scatter_gather.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 | from torch.nn.parallel._functions import Scatter, Gather
4 |
5 |
6 | def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
7 | r"""
8 | Slices variables into approximately equal chunks and
9 | distributes them across given GPUs. Duplicates
10 | references to objects that are not variables. Does not
11 | support Tensors.
12 | """
13 | def scatter_map(obj):
14 | if isinstance(obj, Variable):
15 | return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
16 | assert not torch.is_tensor(obj), "Tensors not supported in scatter."
17 | if isinstance(obj, tuple):
18 | return list(zip(*map(scatter_map, obj)))
19 | if isinstance(obj, list):
20 | return list(map(list, zip(*map(scatter_map, obj))))
21 | if isinstance(obj, dict):
22 | return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
23 | return [obj for targets in target_gpus]
24 |
25 | return scatter_map(inputs)
26 |
27 |
28 | def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
29 | r"""Scatter with support for kwargs dictionary"""
30 | inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
31 | kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
32 | if len(inputs) < len(kwargs):
33 | inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
34 | elif len(kwargs) < len(inputs):
35 | kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
36 | inputs = tuple(inputs)
37 | kwargs = tuple(kwargs)
38 | return inputs, kwargs
39 |
--------------------------------------------------------------------------------
/nnet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/nnet/__init__.py
--------------------------------------------------------------------------------
/nnet/py_factory.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import importlib
4 | import torch.nn as nn
5 |
6 | from config import system_configs
7 | from models.py_utils.data_parallel import DataParallel
8 |
9 | torch.manual_seed(317)
10 |
11 | class Network(nn.Module):
12 | def __init__(self, model, loss):
13 | super(Network, self).__init__()
14 |
15 | self.model = model
16 | self.loss = loss
17 |
18 | def forward(self, xs, ys, **kwargs):
19 | preds = self.model(*xs, **kwargs)
20 | loss = self.loss(preds, ys, **kwargs)
21 | return loss
22 |
23 | # for model backward compatibility
24 | # previously model was wrapped by DataParallel module
25 | class DummyModule(nn.Module):
26 | def __init__(self, model):
27 | super(DummyModule, self).__init__()
28 | self.module = model
29 |
30 | def forward(self, *xs, **kwargs):
31 | return self.module(*xs, **kwargs)
32 |
33 | class NetworkFactory(object):
34 | def __init__(self, db):
35 | super(NetworkFactory, self).__init__()
36 |
37 | module_file = "models.{}".format(system_configs.snapshot_name)
38 | print("module_file: {}".format(module_file))
39 | nnet_module = importlib.import_module(module_file)
40 |
41 | self.model = DummyModule(nnet_module.model(db))
42 | self.loss = nnet_module.loss
43 | self.network = Network(self.model, self.loss)
44 |
45 | total_params = 0
46 | for params in self.model.parameters():
47 | num_params = 1
48 | for x in params.size():
49 | num_params *= x
50 | total_params += num_params
51 | print("total parameters: {}".format(total_params))
52 |
53 | if system_configs.opt_algo == "adam":
54 | self.optimizer = torch.optim.Adam(
55 | filter(lambda p: p.requires_grad, self.model.parameters())
56 | )
57 | elif system_configs.opt_algo == "sgd":
58 | self.optimizer = torch.optim.SGD(
59 | filter(lambda p: p.requires_grad, self.model.parameters()),
60 | lr=system_configs.learning_rate,
61 | momentum=0.9, weight_decay=0.0001
62 | )
63 | else:
64 | raise ValueError("unknown optimizer")
65 |
66 | def cuda(self, cuda_id=0):
67 | self.model.cuda(cuda_id)
68 | self.network.cuda(cuda_id)
69 | self.cuda_id = cuda_id
70 |
71 | def train_mode(self):
72 | self.network.train()
73 |
74 | def eval_mode(self):
75 | self.network.eval()
76 |
77 | def train(self, xs, ys, **kwargs):
78 | xs = [x.cuda(non_blocking=True, device=self.cuda_id) for x in xs]
79 | ys = [y.cuda(non_blocking=True, device=self.cuda_id) for y in ys]
80 | self.optimizer.zero_grad()
81 | loss = self.network(xs, ys)
82 | loss = loss.mean()
83 | loss.backward()
84 | self.optimizer.step()
85 | return loss
86 |
87 | def validate(self, xs, ys, **kwargs):
88 | with torch.no_grad():
89 | if torch.cuda.is_available():
90 | xs = [x.cuda(non_blocking=True, device=self.cuda_id) for x in xs]
91 | ys = [y.cuda(non_blocking=True, device=self.cuda_id) for y in ys]
92 |
93 | loss = self.network(xs, ys)
94 | loss = loss.mean()
95 | return loss
96 |
97 | def test(self, xs, **kwargs):
98 | with torch.no_grad():
99 | if torch.cuda.is_available():
100 | xs = [x.cuda(non_blocking=True, device=self.cuda_id) for x in xs]
101 | return self.model(*xs, **kwargs)
102 |
103 | def set_lr(self, lr):
104 | print("setting learning rate to: {}".format(lr))
105 | for param_group in self.optimizer.param_groups:
106 | param_group["lr"] = lr
107 |
108 | def load_pretrained_params(self, pretrained_model):
109 | print("loading from {}".format(pretrained_model))
110 | with open(pretrained_model, "rb") as f:
111 | params = torch.load(f)
112 | self.model.load_state_dict(params)
113 |
114 | def load_params(self, iteration):
115 | cache_file = system_configs.snapshot_file.format(iteration)
116 | print("loading model from {}".format(cache_file))
117 | with open(cache_file, "rb") as f:
118 | if torch.cuda.is_available():
119 | params = torch.load(f)
120 | else:
121 | params = torch.load(f, map_location='cpu')
122 | self.model.load_state_dict(params)
123 |
124 | def save_params(self, iteration):
125 | cache_file = system_configs.snapshot_file.format(iteration)
126 | print("saving model to {}".format(cache_file))
127 | with open(cache_file, "wb") as f:
128 | params = self.model.state_dict()
129 | torch.save(params, f)
130 |
--------------------------------------------------------------------------------
/ocr.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import time
3 | import json
4 | import os
5 | from PIL import Image, ImageEnhance
6 | def ocr_space_file(filename, overlay=False, api_key='e61f4d4c3488957', language='eng'):
7 | """ OCR.space API request with local file.
8 | Python3.5 - not tested on 2.7
9 | :param filename: Your file path & name.
10 | :param overlay: Is OCR.space overlay required in your response.
11 | Defaults to False.
12 | :param api_key: OCR.space API key.
13 | Defaults to 'helloworld'.
14 | :param language: Language code to be used in OCR.
15 | List of available language codes can be found on https://ocr.space/OCRAPI
16 | Defaults to 'en'.
17 | :return: Result in JSON format.
18 | """
19 |
20 | payload = {'isOverlayRequired': overlay,
21 | 'apikey': api_key,
22 | 'language': language,
23 | }
24 | with open(filename, 'rb') as f:
25 | r = requests.post('https://api.ocr.space/parse/image',
26 | files={filename: f},
27 | data=payload,
28 | )
29 | return r.content.decode()
30 | def ocr_result(image_path):
31 | subscription_key = "ad143190288d40b79483aa0d5c532724"
32 | vision_base_url = "https://westus2.api.cognitive.microsoft.com/vision/v2.0/"
33 | ocr_url = vision_base_url + "recognizeText?mode=Printed"
34 | headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'}
35 | params = {'language': 'eng', 'detectOrientation': 'true'}
36 |
37 | image = Image.open(image_path)
38 | enh_con = ImageEnhance.Contrast(image)
39 | contrast = 2.0
40 | image = enh_con.enhance(contrast)
41 | # image = image.convert('L')
42 | # image = image.resize((800, 800))
43 | image.save('OCR_temp.png')
44 | image_data = open('OCR_temp.png', "rb").read()
45 | response = requests.post(ocr_url, headers=headers, params=params, data=image_data)
46 | response.raise_for_status()
47 | op_location = response.headers['Operation-Location']
48 | analysis = {}
49 | while "recognitionResults" not in analysis.keys():
50 | time.sleep(3)
51 | binary_content = requests.get(op_location, headers=headers, params=params).content
52 | analysis = json.loads(binary_content.decode('ascii'))
53 | line_infos = [region["lines"] for region in analysis["recognitionResults"]]
54 | word_infos = []
55 | for line in line_infos:
56 | for word_metadata in line:
57 | for word_info in word_metadata["words"]:
58 | if 'confidence' in word_info.keys():
59 | if word_info['confidence'] == 'Low':
60 | continue
61 | if word_info['boundingBox'][0] > word_info['boundingBox'][4]:
62 | continue
63 | word_infos.append(word_info)
64 | return word_infos
65 | image_path = 'C:\\work\\evalset_fqa\\vbar\\bitmap\\'
66 | image_names = os.listdir(image_path)
67 | for name in ['495.jpg', '151,jpg']:
68 | image_file_path = os.path.join(image_path, name)
69 | result = ocr_result(image_file_path)
70 | print(result)
71 |
--------------------------------------------------------------------------------
/outputs/read.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/outputs/read.txt
--------------------------------------------------------------------------------
/pycocotool/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/pycocotool/_mask.cp37-win_amd64.pyd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/pycocotool/_mask.cp37-win_amd64.pyd
--------------------------------------------------------------------------------
/pycocotool/_mask.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/pycocotool/_mask.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/pycocotool/_mask.py:
--------------------------------------------------------------------------------
1 | def __bootstrap__():
2 | global __bootstrap__, __loader__, __file__
3 | import sys, pkg_resources, imp
4 | __file__ = pkg_resources.resource_filename(__name__, '_mask.cpython-36m-x86_64-linux-gnu.so')
5 | __loader__ = None; del __bootstrap__, __loader__
6 | imp.load_dynamic(__name__,__file__)
7 | __bootstrap__()
8 |
--------------------------------------------------------------------------------
/pycocotool/mask.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tsungyi'
2 |
3 | import pycocotool._mask as _mask
4 | # Interface for manipulating masks stored in RLE format.
5 | #
6 | # RLE is a simple yet efficient format for storing binary masks. RLE
7 | # first divides a vector (or vectorized image) into a series of piecewise
8 | # constant regions and then for each piece simply stores the length of
9 | # that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
10 | # be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
11 | # (note that the odd counts are always the numbers of zeros). Instead of
12 | # storing the counts directly, additional compression is achieved with a
13 | # variable bitrate representation based on a common scheme called LEB128.
14 | #
15 | # Compression is greatest given large piecewise constant regions.
16 | # Specifically, the size of the RLE is proportional to the number of
17 | # *boundaries* in M (or for an image the number of boundaries in the y
18 | # direction). Assuming fairly simple shapes, the RLE representation is
19 | # O(sqrt(n)) where n is number of pixels in the object. Hence space usage
20 | # is substantially lower, especially for large simple objects (large n).
21 | #
22 | # Many common operations on masks can be computed directly using the RLE
23 | # (without need for decoding). This includes computations such as area,
24 | # union, intersection, etc. All of these operations are linear in the
25 | # size of the RLE, in other words they are O(sqrt(n)) where n is the area
26 | # of the object. Computing these operations on the original mask is O(n).
27 | # Thus, using the RLE can result in substantial computational savings.
28 | #
29 | # The following API functions are defined:
30 | # encode - Encode binary masks using RLE.
31 | # decode - Decode binary masks encoded via RLE.
32 | # merge - Compute union or intersection of encoded masks.
33 | # iou - Compute intersection over union between masks.
34 | # area - Compute area of encoded masks.
35 | # toBbox - Get bounding boxes surrounding encoded masks.
36 | # frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
37 | #
38 | # Usage:
39 | # Rs = encode( masks )
40 | # masks = decode( Rs )
41 | # R = merge( Rs, intersect=false )
42 | # o = iou( dt, gt, iscrowd )
43 | # a = area( Rs )
44 | # bbs = toBbox( Rs )
45 | # Rs = frPyObjects( [pyObjects], h, w )
46 | #
47 | # In the API the following formats are used:
48 | # Rs - [dict] Run-length encoding of binary masks
49 | # R - dict Run-length encoding of binary mask
50 | # masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
51 | # iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
52 | # bbs - [nx4] Bounding box(es) stored as [x y w h]
53 | # poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
54 | # dt,gt - May be either bounding boxes or encoded masks
55 | # Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
56 | #
57 | # Finally, a note about the intersection over union (iou) computation.
58 | # The standard iou of a ground truth (gt) and detected (dt) object is
59 | # iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
60 | # For "crowd" regions, we use a modified criteria. If a gt object is
61 | # marked as "iscrowd", we allow a dt to match any subregion of the gt.
62 | # Choosing gt' in the crowd gt that best matches the dt can be done using
63 | # gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
64 | # iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
65 | # For crowd gt regions we use this modified criteria above for the iou.
66 | #
67 | # To compile run "python setup.py build_ext --inplace"
68 | # Please do not contact us for help with compiling.
69 | #
70 | # Microsoft COCO Toolbox. version 2.0
71 | # Data, paper, and tutorials available at: http://mscoco.org/
72 | # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
73 | # Licensed under the Simplified BSD License [see coco/license.txt]
74 |
75 | iou = _mask.iou
76 | merge = _mask.merge
77 | frPyObjects = _mask.frPyObjects
78 |
79 | def encode(bimask):
80 | if len(bimask.shape) == 3:
81 | return _mask.encode(bimask)
82 | elif len(bimask.shape) == 2:
83 | h, w = bimask.shape
84 | return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
85 |
86 | def decode(rleObjs):
87 | if type(rleObjs) == list:
88 | return _mask.decode(rleObjs)
89 | else:
90 | return _mask.decode([rleObjs])[:,:,0]
91 |
92 | def area(rleObjs):
93 | if type(rleObjs) == list:
94 | return _mask.area(rleObjs)
95 | else:
96 | return _mask.area([rleObjs])[0]
97 |
98 | def toBbox(rleObjs):
99 | if type(rleObjs) == list:
100 | return _mask.toBbox(rleObjs)
101 | else:
102 | return _mask.toBbox([rleObjs])[0]
--------------------------------------------------------------------------------
/requirements-2023.txt:
--------------------------------------------------------------------------------
1 | certifi==2023.5.7
2 | charset-normalizer==3.1.0
3 | contourpy @ file:///opt/conda/conda-bld/contourpy_1663827406301/work
4 | cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
5 | fonttools==4.25.0
6 | h5py==3.8.0
7 | idna==3.4
8 | importlib-resources @ file:///tmp/build/80754af9/importlib_resources_1625135880749/work
9 | kiwisolver==1.4.4
10 | matplotlib==3.7.1
11 | mkl-fft==1.3.6
12 | mkl-random @ file:///work/mkl/mkl_random_1682950433854/work
13 | mkl-service==2.4.0
14 | munkres==1.1.4
15 | numpy @ file:///work/mkl/numpy_and_numpy_base_1682953417311/work
16 | opencv-python==4.7.0.72
17 | packaging @ file:///croot/packaging_1678965309396/work
18 | Pillow==9.4.0
19 | ply==3.11
20 | pyparsing @ file:///opt/conda/conda-bld/pyparsing_1661452539315/work
21 | PyQt5-sip==12.11.0
22 | python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
23 | requests==2.30.0
24 | sip @ file:///tmp/abs_44cd77b_pu/croots/recipe/sip_1659012365470/work
25 | six @ file:///tmp/build/80754af9/six_1644875935023/work
26 | toml @ file:///tmp/build/80754af9/toml_1616166611790/work
27 | torch==1.7.1+cu110
28 | torchaudio==0.7.2
29 | torchvision==0.8.2+cu110
30 | tornado @ file:///opt/conda/conda-bld/tornado_1662061693373/work
31 | typing_extensions==4.5.0
32 | urllib3==2.0.2
33 | zipp @ file:///croot/zipp_1672387121353/work
34 |
--------------------------------------------------------------------------------
/sample/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/sample/__init__.py
--------------------------------------------------------------------------------
/server_match/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/server_match/__init__.py
--------------------------------------------------------------------------------
/server_match/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for server_match project.
3 |
4 | Generated by 'django-admin startproject' using Django 2.2.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/2.2/topics/settings/
8 |
9 | For the full list of settings and their values, see
10 | https://docs.djangoproject.com/en/2.2/ref/settings/
11 | """
12 |
13 | import os
14 |
15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 |
18 |
19 | # Quick-start development settings - unsuitable for production
20 | # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
21 |
22 | # SECURITY WARNING: keep the secret key used in production secret!
23 | SECRET_KEY = 'zo&)@ryp4g@2a2#sl0*2^zdw)zhg%3xc7ar7$945(ka_mhhuc3'
24 |
25 | # SECURITY WARNING: don't run with debug turned on in production!
26 | DEBUG = True
27 |
28 | ALLOWED_HOSTS = ['gpu12.kcgpu.com']
29 |
30 |
31 | # Application definition
32 |
33 | INSTALLED_APPS = [
34 | 'django.contrib.admin',
35 | 'django.contrib.auth',
36 | 'django.contrib.contenttypes',
37 | 'django.contrib.sessions',
38 | 'django.contrib.messages',
39 | 'django.contrib.staticfiles',
40 | ]
41 |
42 | MIDDLEWARE = [
43 | 'django.middleware.security.SecurityMiddleware',
44 | 'django.contrib.sessions.middleware.SessionMiddleware',
45 | 'django.middleware.common.CommonMiddleware',
46 | #'django.middleware.csrf.CsrfViewMiddleware',
47 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
48 | 'django.contrib.messages.middleware.MessageMiddleware',
49 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
50 | ]
51 |
52 | ROOT_URLCONF = 'server_match.urls'
53 |
54 | TEMPLATES = [
55 | {
56 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
57 | 'DIRS': [BASE_DIR+"/templates"],
58 | 'APP_DIRS': True,
59 | 'OPTIONS': {
60 | 'context_processors': [
61 | 'django.template.context_processors.debug',
62 | 'django.template.context_processors.request',
63 | 'django.contrib.auth.context_processors.auth',
64 | 'django.contrib.messages.context_processors.messages',
65 | ],
66 | },
67 | },
68 | ]
69 |
70 | WSGI_APPLICATION = 'server_match.wsgi.application'
71 |
72 | SECURE_SSL_REDIRECT = True
73 | # Database
74 | # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
75 |
76 | DATABASES = {
77 | 'default': {
78 | 'ENGINE': 'django.db.backends.sqlite3',
79 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
80 | }
81 | }
82 |
83 |
84 | # Password validation
85 | # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
86 |
87 | AUTH_PASSWORD_VALIDATORS = [
88 | {
89 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
90 | },
91 | {
92 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
93 | },
94 | {
95 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
96 | },
97 | {
98 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
99 | },
100 | ]
101 |
102 |
103 | # Internationalization
104 | # https://docs.djangoproject.com/en/2.2/topics/i18n/
105 |
106 | LANGUAGE_CODE = 'en-us'
107 |
108 | TIME_ZONE = 'UTC'
109 |
110 | USE_I18N = True
111 |
112 | USE_L10N = True
113 |
114 | USE_TZ = True
115 |
116 |
117 | # Static files (CSS, JavaScript, Images)
118 | # https://docs.djangoproject.com/en/2.2/howto/static-files/
119 |
120 | STATIC_PATH = os.path.join(os.path.join(BASE_DIR,'static'))
121 | STATICFILES_DIRS=[STATIC_PATH,]
122 |
123 | STATIC_URL = '/static/'
124 |
--------------------------------------------------------------------------------
/server_match/urls.py:
--------------------------------------------------------------------------------
1 | """server_match URL Configuration
2 |
3 | The `urlpatterns` list routes URLs to views. For more information please see:
4 | https://docs.djangoproject.com/en/2.2/topics/http/urls/
5 | Examples:
6 | Function views
7 | 1. Add an import: from my_app import views
8 | 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 | Class-based views
10 | 1. Add an import: from other_app.views import Home
11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 | Including another URLconf
13 | 1. Import the include() function: from django.urls import include, path
14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 | """
16 | from django.conf.urls import url
17 | from . import view
18 | urlpatterns = [
19 | url(r'^$', view.get_group),
20 | ]
21 |
--------------------------------------------------------------------------------
/server_match/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for server_match project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server_match.settings')
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/static/target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/static/target.png
--------------------------------------------------------------------------------
/static/target_draw.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/static/target_draw.png
--------------------------------------------------------------------------------
/target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/target.png
--------------------------------------------------------------------------------
/target_draw.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/target_draw.png
--------------------------------------------------------------------------------
/templates/onuse.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Onuse
6 |
7 |
8 | Is processing a previous request, please wait!
9 |
10 |
--------------------------------------------------------------------------------
/templates/results.html:
--------------------------------------------------------------------------------
1 |
2 | {% load staticfiles %}
3 |
4 |
5 |
6 | Results
7 |
8 |
9 |
10 |
11 |
12 | Type
13 |
14 | {{Type}}
15 |
16 | Y Range
17 |
18 | {{min2max}}
19 |
20 |
29 | Data
30 |
31 |
32 | {{data|linebreaks}}
33 |
34 |
35 | ChartTitle
36 |
37 | {{ChartTitle}}
38 |
39 | ValueAxisTitle
40 |
41 | {{ValueAxisTitle}}
42 |
43 | CategoryAxisTitle
44 |
45 | {{CategoryAxisTitle}}
46 |
47 |
--------------------------------------------------------------------------------
/templates/upload.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/test.png
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/soap117/DeepRule/622c8c87a00125604e5af067543ca0888a8b7319/test/__init__.py
--------------------------------------------------------------------------------
/test_chart.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 | import importlib
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetLine", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=50000, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument('--tar_data_path', dest="tar_data_path", type=str)
29 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
30 | parser.add_argument("--debug", action="store_true")
31 | parser.add_argument("--data_dir", dest="data_dir", default="c:/work/linedata(1023)", type=str)
32 |
33 | args = parser.parse_args()
34 | return args
35 |
36 | def make_dirs(directories):
37 | for directory in directories:
38 | if not os.path.exists(directory):
39 | os.makedirs(directory)
40 |
41 | def test(db, split, testiter, debug=False, suffix=None):
42 | with torch.no_grad():
43 | result_dir = system_configs.result_dir
44 | result_dir = os.path.join(result_dir, str(testiter), split)
45 |
46 | if suffix is not None:
47 | result_dir = os.path.join(result_dir, suffix)
48 |
49 | make_dirs([result_dir])
50 |
51 | test_iter = system_configs.max_iter if testiter is None else testiter
52 | print("loading parameters at iteration: {}".format(test_iter))
53 |
54 | print("building neural network...")
55 | nnet = NetworkFactory(db)
56 | print("loading parameters...")
57 | nnet.load_params(test_iter)
58 |
59 | from testfile.test_line_cls_pure_real import testing
60 | path = 'testfile.test_%s' % args.cfg_file
61 | testing = importlib.import_module(path).testing
62 | nnet.cuda()
63 | nnet.eval_mode()
64 | testing(db, nnet, result_dir, debug=debug)
65 |
66 | if __name__ == "__main__":
67 |
68 | args = parse_args()
69 |
70 | if args.suffix is None:
71 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
72 | else:
73 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
74 | print("cfg_file: {}".format(cfg_file))
75 |
76 | with open(cfg_file, "r") as f:
77 | configs = json.load(f)
78 | configs["system"]["snapshot_name"] = args.cfg_file
79 | configs["system"]["data_dir"] = args.data_dir
80 | configs["system"]["cache_dir"] = args.cache_path
81 | configs["system"]["result_dir"] = args.result_path
82 | configs["system"]["tar_data_dir"] = args.tar_data_path
83 | system_configs.update_config(configs["system"])
84 |
85 | train_split = system_configs.train_split
86 | val_split = system_configs.val_split
87 | test_split = system_configs.test_split
88 |
89 | split = {
90 | "training": train_split,
91 | "validation": val_split,
92 | "testing": test_split
93 | }[args.split]
94 |
95 | print("loading all datasets...")
96 | dataset = system_configs.dataset
97 | print("split: {}".format(split))
98 | testing_db = datasets[dataset](configs["db"], split)
99 |
100 | print("system config...")
101 | pprint.pprint(system_configs.full)
102 |
103 | print("db config...")
104 | pprint.pprint(testing_db.configs)
105 |
106 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
107 |
--------------------------------------------------------------------------------
/testfile/test_coco.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=500000, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
27 | parser.add_argument("--debug", action="store_true")
28 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
29 |
30 | args = parser.parse_args()
31 | return args
32 |
33 | def make_dirs(directories):
34 | for directory in directories:
35 | if not os.path.exists(directory):
36 | os.makedirs(directory)
37 |
38 | def test(db, split, testiter, debug=False, suffix=None):
39 | with torch.no_grad():
40 | result_dir = system_configs.result_dir
41 | result_dir = os.path.join(result_dir, str(testiter), split)
42 |
43 | if suffix is not None:
44 | result_dir = os.path.join(result_dir, suffix)
45 |
46 | make_dirs([result_dir])
47 |
48 | test_iter = system_configs.max_iter if testiter is None else testiter
49 | print("loading parameters at iteration: {}".format(test_iter))
50 |
51 | print("building neural network...")
52 | nnet = NetworkFactory(db)
53 | print("loading parameters...")
54 | nnet.load_params(test_iter)
55 |
56 | from testfile.test_chart import testing
57 |
58 | nnet.cuda()
59 | nnet.eval_mode()
60 | testing(db, nnet, result_dir, debug=debug)
61 |
62 | if __name__ == "__main__":
63 |
64 | args = parse_args()
65 |
66 | if args.suffix is None:
67 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
68 | else:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
70 | print("cfg_file: {}".format(cfg_file))
71 |
72 | with open(cfg_file, "r") as f:
73 | configs = json.load(f)
74 | configs["system"]["snapshot_name"] = args.cfg_file
75 | configs["system"]["data_dir"] = args.data_dir + '/juluo/cocodata/'
76 | configs["system"]["cache_dir"] = args.data_dir + '/juluo/cache_coco/'
77 | configs["system"]["result_dir"] = args.data_dir + '/juluo/cache_coco/'
78 | system_configs.update_config(configs["system"])
79 |
80 | train_split = system_configs.train_split
81 | val_split = system_configs.val_split
82 | test_split = system_configs.test_split
83 | split = {
84 | "training": train_split,
85 | "validation": val_split,
86 | "testing": test_split,
87 | }[args.split]
88 |
89 | print("loading all datasets...")
90 | dataset = system_configs.dataset
91 | print("split: {}".format(split))
92 | testing_db = datasets[dataset](configs["db"], split)
93 |
94 | print("system config...")
95 | pprint.pprint(system_configs.full)
96 |
97 | print("db config...")
98 | pprint.pprint(testing_db.configs)
99 |
100 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
101 |
--------------------------------------------------------------------------------
/testfile/test_ori.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=None, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
29 | parser.add_argument("--debug", action="store_true")
30 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
31 |
32 | args = parser.parse_args()
33 | return args
34 |
35 | def make_dirs(directories):
36 | for directory in directories:
37 | if not os.path.exists(directory):
38 | os.makedirs(directory)
39 |
40 | def test(db, split, testiter, debug=False, suffix=None):
41 | with torch.no_grad():
42 | result_dir = system_configs.result_dir
43 | result_dir = os.path.join(result_dir, str(testiter), split)
44 |
45 | if suffix is not None:
46 | result_dir = os.path.join(result_dir, suffix)
47 |
48 | make_dirs([result_dir])
49 |
50 | test_iter = system_configs.max_iter if testiter is None else testiter
51 | print("loading parameters at iteration: {}".format(test_iter))
52 |
53 | print("building neural network...")
54 | nnet = NetworkFactory(db)
55 | print("loading parameters...")
56 | nnet.load_params(test_iter)
57 |
58 | from testfile.test_chart import testing
59 |
60 | nnet.cuda()
61 | nnet.eval_mode()
62 | testing(db, nnet, result_dir, debug=debug)
63 |
64 | if __name__ == "__main__":
65 |
66 | args = parse_args()
67 |
68 | if args.suffix is None:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
70 | else:
71 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
72 | print("cfg_file: {}".format(cfg_file))
73 |
74 | with open(cfg_file, "r") as f:
75 | configs = json.load(f)
76 | configs["system"]["snapshot_name"] = args.cfg_file
77 | configs["system"]["data_dir"] = args.data_dir
78 | configs["system"]["cache_dir"] = args.cache_path
79 | configs["system"]["result_dir"] = args.result_path
80 | system_configs.update_config(configs["system"])
81 |
82 | train_split = system_configs.train_split
83 | val_split = system_configs.val_split
84 | test_split = system_configs.test_split
85 |
86 | split = {
87 | "training": train_split,
88 | "validation": val_split,
89 | "testing": test_split
90 | }[args.split]
91 |
92 | print("loading all datasets...")
93 | dataset = system_configs.dataset
94 | print("split: {}".format(split))
95 | testing_db = datasets[dataset](configs["db"], split)
96 |
97 | print("system config...")
98 | pprint.pprint(system_configs.full)
99 |
100 | print("db config...")
101 | pprint.pprint(testing_db.configs)
102 |
103 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
104 |
--------------------------------------------------------------------------------
/testfile/test_pure_bar.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=None, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
29 | parser.add_argument("--debug", action="store_true")
30 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
31 |
32 | args = parser.parse_args()
33 | return args
34 |
35 | def make_dirs(directories):
36 | for directory in directories:
37 | if not os.path.exists(directory):
38 | os.makedirs(directory)
39 |
40 | def test(db, split, testiter, debug=False, suffix=None):
41 | with torch.no_grad():
42 | result_dir = system_configs.result_dir
43 | result_dir = os.path.join(result_dir, str(testiter), split)
44 |
45 | if suffix is not None:
46 | result_dir = os.path.join(result_dir, suffix)
47 |
48 | make_dirs([result_dir])
49 |
50 | test_iter = system_configs.max_iter if testiter is None else testiter
51 | print("loading parameters at iteration: {}".format(test_iter))
52 |
53 | print("building neural network...")
54 | nnet = NetworkFactory(db)
55 | print("loading parameters...")
56 | nnet.load_params(test_iter)
57 |
58 | from testfile.test_bar_pure import testing
59 |
60 | nnet.cuda()
61 | nnet.eval_mode()
62 | testing(db, nnet, result_dir, debug=debug)
63 |
64 | if __name__ == "__main__":
65 |
66 | args = parse_args()
67 |
68 | if args.suffix is None:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
70 | else:
71 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
72 | print("cfg_file: {}".format(cfg_file))
73 |
74 | with open(cfg_file, "r") as f:
75 | configs = json.load(f)
76 | configs["system"]["snapshot_name"] = args.cfg_file
77 | configs["system"]["data_dir"] = args.data_dir
78 | configs["system"]["cache_dir"] = args.cache_path
79 | configs["system"]["result_dir"] = args.result_path
80 | system_configs.update_config(configs["system"])
81 |
82 | train_split = system_configs.train_split
83 | val_split = system_configs.val_split
84 | test_split = system_configs.test_split
85 |
86 | split = {
87 | "training": train_split,
88 | "validation": val_split,
89 | "testing": test_split
90 | }[args.split]
91 |
92 | print("loading all datasets...")
93 | dataset = system_configs.dataset
94 | print("split: {}".format(split))
95 | testing_db = datasets[dataset](configs["db"], split)
96 |
97 | print("system config...")
98 | pprint.pprint(system_configs.full)
99 |
100 | print("db config...")
101 | pprint.pprint(testing_db.configs)
102 |
103 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
104 |
--------------------------------------------------------------------------------
/testfile/test_pure_chart.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=None, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
29 | parser.add_argument("--debug", action="store_true")
30 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
31 |
32 | args = parser.parse_args()
33 | return args
34 |
35 | def make_dirs(directories):
36 | for directory in directories:
37 | if not os.path.exists(directory):
38 | os.makedirs(directory)
39 |
40 | def test(db, split, testiter, debug=False, suffix=None):
41 | with torch.no_grad():
42 | result_dir = system_configs.result_dir
43 | result_dir = os.path.join(result_dir, str(testiter), split)
44 |
45 | if suffix is not None:
46 | result_dir = os.path.join(result_dir, suffix)
47 |
48 | make_dirs([result_dir])
49 |
50 | test_iter = system_configs.max_iter if testiter is None else testiter
51 | print("loading parameters at iteration: {}".format(test_iter))
52 |
53 | print("building neural network...")
54 | nnet = NetworkFactory(db)
55 | print("loading parameters...")
56 | nnet.load_params(test_iter)
57 |
58 | from testfile.test_chart_pure import testing
59 |
60 | nnet.cuda()
61 | nnet.eval_mode()
62 | testing(db, nnet, result_dir, debug=debug)
63 |
64 | if __name__ == "__main__":
65 |
66 | args = parse_args()
67 |
68 | if args.suffix is None:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
70 | else:
71 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
72 | print("cfg_file: {}".format(cfg_file))
73 |
74 | with open(cfg_file, "r") as f:
75 | configs = json.load(f)
76 | configs["system"]["snapshot_name"] = args.cfg_file
77 | configs["system"]["data_dir"] = args.data_dir
78 | configs["system"]["cache_dir"] = args.cache_path
79 | configs["system"]["result_dir"] = args.result_path
80 | system_configs.update_config(configs["system"])
81 |
82 | train_split = system_configs.train_split
83 | val_split = system_configs.val_split
84 | test_split = system_configs.test_split
85 |
86 | split = {
87 | "training": train_split,
88 | "validation": val_split,
89 | "testing": test_split
90 | }[args.split]
91 |
92 | print("loading all datasets...")
93 | dataset = system_configs.dataset
94 | print("split: {}".format(split))
95 | testing_db = datasets[dataset](configs["db"], split)
96 |
97 | print("system config...")
98 | pprint.pprint(system_configs.full)
99 |
100 | print("db config...")
101 | pprint.pprint(testing_db.configs)
102 |
103 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
104 |
--------------------------------------------------------------------------------
/testfile/test_pure_cls.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetLine", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=50000, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument('--tar_data_path', dest="tar_data_path", type=str)
29 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
30 | parser.add_argument("--debug", action="store_true")
31 | parser.add_argument("--data_dir", dest="data_dir", default="c:/work/linedata(1023)", type=str)
32 |
33 | args = parser.parse_args()
34 | return args
35 |
36 | def make_dirs(directories):
37 | for directory in directories:
38 | if not os.path.exists(directory):
39 | os.makedirs(directory)
40 |
41 | def test(db, split, testiter, debug=False, suffix=None):
42 | with torch.no_grad():
43 | result_dir = system_configs.result_dir
44 | result_dir = os.path.join(result_dir, str(testiter), split)
45 |
46 | if suffix is not None:
47 | result_dir = os.path.join(result_dir, suffix)
48 |
49 | make_dirs([result_dir])
50 |
51 | test_iter = system_configs.max_iter if testiter is None else testiter
52 | print("loading parameters at iteration: {}".format(test_iter))
53 |
54 | print("building neural network...")
55 | nnet = NetworkFactory(db)
56 | print("loading parameters...")
57 | nnet.load_params(test_iter)
58 |
59 | from testfile.test_cls_pure import testing
60 |
61 | nnet.cuda()
62 | nnet.eval_mode()
63 | testing(db, nnet, result_dir, debug=debug)
64 |
65 | if __name__ == "__main__":
66 |
67 | args = parse_args()
68 |
69 | if args.suffix is None:
70 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
71 | else:
72 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
73 | print("cfg_file: {}".format(cfg_file))
74 |
75 | with open(cfg_file, "r") as f:
76 | configs = json.load(f)
77 | configs["system"]["snapshot_name"] = args.cfg_file
78 | configs["system"]["data_dir"] = args.data_dir
79 | configs["system"]["cache_dir"] = args.cache_path
80 | configs["system"]["result_dir"] = args.result_path
81 | configs["system"]["tar_data_dir"] = args.tar_data_path
82 | system_configs.update_config(configs["system"])
83 |
84 | train_split = system_configs.train_split
85 | val_split = system_configs.val_split
86 | test_split = system_configs.test_split
87 |
88 | split = {
89 | "training": train_split,
90 | "validation": val_split,
91 | "testing": test_split
92 | }[args.split]
93 |
94 | print("loading all datasets...")
95 | dataset = system_configs.dataset
96 | print("split: {}".format(split))
97 | testing_db = datasets[dataset](configs["db"], split)
98 |
99 | print("system config...")
100 | pprint.pprint(system_configs.full)
101 |
102 | print("db config...")
103 | pprint.pprint(testing_db.configs)
104 |
105 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
106 |
--------------------------------------------------------------------------------
/testfile/test_pure_coco.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=None, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="training", type=str)
26 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
27 | parser.add_argument("--debug", action="store_true")
28 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
29 |
30 | args = parser.parse_args()
31 | return args
32 |
33 | def make_dirs(directories):
34 | for directory in directories:
35 | if not os.path.exists(directory):
36 | os.makedirs(directory)
37 |
38 | def test(db, split, testiter, debug=False, suffix=None):
39 | with torch.no_grad():
40 | result_dir = system_configs.result_dir
41 | result_dir = os.path.join(result_dir, str(testiter), split)
42 |
43 | if suffix is not None:
44 | result_dir = os.path.join(result_dir, suffix)
45 |
46 | make_dirs([result_dir])
47 |
48 | test_iter = system_configs.max_iter if testiter is None else testiter
49 | print("loading parameters at iteration: {}".format(test_iter))
50 |
51 | print("building neural network...")
52 | nnet = NetworkFactory(db)
53 | print("loading parameters...")
54 | nnet.load_params(test_iter)
55 |
56 | from testfile.test_chart_pure import testing
57 |
58 | nnet.cuda()
59 | nnet.eval_mode()
60 | testing(db, nnet, result_dir, debug=debug)
61 |
62 | if __name__ == "__main__":
63 |
64 | args = parse_args()
65 |
66 | if args.suffix is None:
67 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
68 | else:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
70 | print("cfg_file: {}".format(cfg_file))
71 |
72 | with open(cfg_file, "r") as f:
73 | configs = json.load(f)
74 | configs["system"]["snapshot_name"] = args.cfg_file
75 | configs["system"]["data_dir"] = args.data_dir + '/juluo/cocodata/'
76 | configs["system"]["cache_dir"] = args.data_dir + '/juluo/cache_pure_coco/'
77 | configs["system"]["result_dir"] = args.data_dir + '/juluo/cache_pure_coco/'
78 | system_configs.update_config(configs["system"])
79 |
80 | train_split = system_configs.train_split
81 | val_split = system_configs.val_split
82 | test_split = system_configs.test_split
83 |
84 | split = {
85 | "training": train_split,
86 | "validation": val_split,
87 | "testing": test_split
88 | }[args.split]
89 |
90 | print("loading all datasets...")
91 | dataset = system_configs.dataset
92 | print("split: {}".format(split))
93 | testing_db = datasets[dataset](configs["db"], split)
94 |
95 | print("system config...")
96 | pprint.pprint(system_configs.full)
97 |
98 | print("db config...")
99 | pprint.pprint(testing_db.configs)
100 |
101 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
102 |
--------------------------------------------------------------------------------
/testfile/test_pure_line.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetLine", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=50000, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
29 | parser.add_argument("--debug", action="store_true")
30 | parser.add_argument("--data_dir", dest="data_dir", default="c:/work/linedata(1023)", type=str)
31 |
32 | args = parser.parse_args()
33 | return args
34 |
35 | def make_dirs(directories):
36 | for directory in directories:
37 | if not os.path.exists(directory):
38 | os.makedirs(directory)
39 |
40 | def test(db, split, testiter, debug=False, suffix=None):
41 | with torch.no_grad():
42 | result_dir = system_configs.result_dir
43 | result_dir = os.path.join(result_dir, str(testiter), split)
44 |
45 | if suffix is not None:
46 | result_dir = os.path.join(result_dir, suffix)
47 |
48 | make_dirs([result_dir])
49 |
50 | test_iter = system_configs.max_iter if testiter is None else testiter
51 | print("loading parameters at iteration: {}".format(test_iter))
52 |
53 | print("building neural network...")
54 | nnet = NetworkFactory(db)
55 | print("loading parameters...")
56 | nnet.load_params(test_iter)
57 |
58 | from testfile.test_line_pure import testing
59 |
60 | nnet.cuda()
61 | nnet.eval_mode()
62 | testing(db, nnet, result_dir, debug=debug)
63 |
64 | if __name__ == "__main__":
65 |
66 | args = parse_args()
67 |
68 | if args.suffix is None:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
70 | else:
71 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
72 | print("cfg_file: {}".format(cfg_file))
73 |
74 | with open(cfg_file, "r") as f:
75 | configs = json.load(f)
76 | configs["system"]["snapshot_name"] = args.cfg_file
77 | configs["system"]["data_dir"] = args.data_dir
78 | configs["system"]["cache_dir"] = args.cache_path
79 | configs["system"]["result_dir"] = args.result_path
80 | system_configs.update_config(configs["system"])
81 |
82 | train_split = system_configs.train_split
83 | val_split = system_configs.val_split
84 | test_split = system_configs.test_split
85 |
86 | split = {
87 | "training": train_split,
88 | "validation": val_split,
89 | "testing": test_split
90 | }[args.split]
91 |
92 | print("loading all datasets...")
93 | dataset = system_configs.dataset
94 | print("split: {}".format(split))
95 | testing_db = datasets[dataset](configs["db"], split)
96 |
97 | print("system config...")
98 | pprint.pprint(system_configs.full)
99 |
100 | print("db config...")
101 | pprint.pprint(testing_db.configs)
102 |
103 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
104 |
--------------------------------------------------------------------------------
/testfile/test_pure_line_cls.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetLine", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=50000, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument('--tar_data_path', dest="tar_data_path", type=str)
29 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
30 | parser.add_argument("--debug", action="store_true")
31 | parser.add_argument("--data_dir", dest="data_dir", default="c:/work/linedata(1023)", type=str)
32 |
33 | args = parser.parse_args()
34 | return args
35 |
36 | def make_dirs(directories):
37 | for directory in directories:
38 | if not os.path.exists(directory):
39 | os.makedirs(directory)
40 |
41 | def test(db, split, testiter, debug=False, suffix=None):
42 | with torch.no_grad():
43 | result_dir = system_configs.result_dir
44 | result_dir = os.path.join(result_dir, str(testiter), split)
45 |
46 | if suffix is not None:
47 | result_dir = os.path.join(result_dir, suffix)
48 |
49 | make_dirs([result_dir])
50 |
51 | test_iter = system_configs.max_iter if testiter is None else testiter
52 | print("loading parameters at iteration: {}".format(test_iter))
53 |
54 | print("building neural network...")
55 | nnet = NetworkFactory(db)
56 | print("loading parameters...")
57 | nnet.load_params(test_iter)
58 |
59 | from testfile.test_line_cls_pure_real import testing
60 |
61 | nnet.cuda()
62 | nnet.eval_mode()
63 | testing(db, nnet, result_dir, debug=debug)
64 |
65 | if __name__ == "__main__":
66 |
67 | args = parse_args()
68 |
69 | if args.suffix is None:
70 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
71 | else:
72 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
73 | print("cfg_file: {}".format(cfg_file))
74 |
75 | with open(cfg_file, "r") as f:
76 | configs = json.load(f)
77 | configs["system"]["snapshot_name"] = args.cfg_file
78 | configs["system"]["data_dir"] = args.data_dir
79 | configs["system"]["cache_dir"] = args.cache_path
80 | configs["system"]["result_dir"] = args.result_path
81 | configs["system"]["tar_data_dir"] = args.tar_data_path
82 | system_configs.update_config(configs["system"])
83 |
84 | train_split = system_configs.train_split
85 | val_split = system_configs.val_split
86 | test_split = system_configs.test_split
87 |
88 | split = {
89 | "training": train_split,
90 | "validation": val_split,
91 | "testing": test_split
92 | }[args.split]
93 |
94 | print("loading all datasets...")
95 | dataset = system_configs.dataset
96 | print("split: {}".format(split))
97 | testing_db = datasets[dataset](configs["db"], split)
98 |
99 | print("system config...")
100 | pprint.pprint(system_configs.full)
101 |
102 | print("db config...")
103 | pprint.pprint(testing_db.configs)
104 |
105 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
106 |
--------------------------------------------------------------------------------
/testfile/test_pure_pie.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetPurePie", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=55000, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument('--cache_path', dest="cache_path", type=str)
27 | parser.add_argument('--result_path', dest="result_path", type=str)
28 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
29 | parser.add_argument("--debug", action="store_true")
30 | parser.add_argument("--data_dir", dest="data_dir", default="c:/work/piedata(1008)", type=str)
31 |
32 | args = parser.parse_args()
33 | return args
34 |
35 | def make_dirs(directories):
36 | for directory in directories:
37 | if not os.path.exists(directory):
38 | os.makedirs(directory)
39 |
40 | def test(db, split, testiter, debug=False, suffix=None):
41 | with torch.no_grad():
42 | result_dir = system_configs.result_dir
43 | result_dir = os.path.join(result_dir, str(testiter), split)
44 |
45 | if suffix is not None:
46 | result_dir = os.path.join(result_dir, suffix)
47 |
48 | make_dirs([result_dir])
49 |
50 | test_iter = system_configs.max_iter if testiter is None else testiter
51 | print("loading parameters at iteration: {}".format(test_iter))
52 |
53 | print("building neural network...")
54 | nnet = NetworkFactory(db)
55 | print("loading parameters...")
56 | nnet.load_params(test_iter)
57 |
58 | from testfile.test_pie_pure import testing
59 |
60 | nnet.cuda()
61 | nnet.eval_mode()
62 | testing(db, nnet, result_dir, debug=debug)
63 |
64 | if __name__ == "__main__":
65 |
66 | args = parse_args()
67 |
68 | if args.suffix is None:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
70 | else:
71 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
72 | print("cfg_file: {}".format(cfg_file))
73 |
74 | with open(cfg_file, "r") as f:
75 | configs = json.load(f)
76 | configs["system"]["snapshot_name"] = args.cfg_file
77 | configs["system"]["data_dir"] = args.data_dir
78 | configs["system"]["cache_dir"] = args.cache_path
79 | configs["system"]["result_dir"] = args.result_path
80 | system_configs.update_config(configs["system"])
81 |
82 | train_split = system_configs.train_split
83 | val_split = system_configs.val_split
84 | test_split = system_configs.test_split
85 |
86 | split = {
87 | "training": train_split,
88 | "validation": val_split,
89 | "testing": test_split
90 | }[args.split]
91 |
92 | print("loading all datasets...")
93 | dataset = system_configs.dataset
94 | print("split: {}".format(split))
95 | testing_db = datasets[dataset](configs["db"], split)
96 |
97 | print("system config...")
98 | pprint.pprint(system_configs.full)
99 |
100 | print("db config...")
101 | pprint.pprint(testing_db.configs)
102 |
103 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
104 |
--------------------------------------------------------------------------------
/testfile/test_pure_sku.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNet", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=None, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="training", type=str)
26 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
27 | parser.add_argument("--debug", action="store_true")
28 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
29 |
30 | args = parser.parse_args()
31 | return args
32 |
33 | def make_dirs(directories):
34 | for directory in directories:
35 | if not os.path.exists(directory):
36 | os.makedirs(directory)
37 |
38 | def test(db, split, testiter, debug=False, suffix=None):
39 | with torch.no_grad():
40 | result_dir = system_configs.result_dir
41 | result_dir = os.path.join(result_dir, str(testiter), split)
42 |
43 | if suffix is not None:
44 | result_dir = os.path.join(result_dir, suffix)
45 |
46 | make_dirs([result_dir])
47 |
48 | test_iter = system_configs.max_iter if testiter is None else testiter
49 | print("loading parameters at iteration: {}".format(test_iter))
50 |
51 | print("building neural network...")
52 | nnet = NetworkFactory(db)
53 | print("loading parameters...")
54 | nnet.load_params(test_iter)
55 |
56 | from testfile.test_chart_pure import testing
57 |
58 | nnet.cuda()
59 | nnet.eval_mode()
60 | testing(db, nnet, result_dir, debug=debug)
61 |
62 | if __name__ == "__main__":
63 |
64 | args = parse_args()
65 |
66 | if args.suffix is None:
67 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
68 | else:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
70 | print("cfg_file: {}".format(cfg_file))
71 |
72 | with open(cfg_file, "r") as f:
73 | configs = json.load(f)
74 | configs["system"]["snapshot_name"] = args.cfg_file
75 | configs["system"]["data_dir"] = args.data_dir + '/juluo/skudata/'
76 | configs["system"]["cache_dir"] = args.data_dir + '/juluo/cache_pure_sku/'
77 | configs["system"]["result_dir"] = args.data_dir + '/juluo/cache_pure_sku/'
78 | system_configs.update_config(configs["system"])
79 |
80 | train_split = system_configs.train_split
81 | val_split = system_configs.val_split
82 | test_split = system_configs.test_split
83 |
84 | split = {
85 | "training": train_split,
86 | "validation": val_split,
87 | "testing": test_split
88 | }[args.split]
89 |
90 | print("loading all datasets...")
91 | dataset = system_configs.dataset
92 | print("split: {}".format(split))
93 | testing_db = datasets[dataset](configs["db"], split)
94 |
95 | print("system config...")
96 | pprint.pprint(system_configs.full)
97 |
98 | print("db config...")
99 | pprint.pprint(testing_db.configs)
100 |
101 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
102 |
--------------------------------------------------------------------------------
/testfile/test_try.py:
--------------------------------------------------------------------------------
1 | import json
2 | import argparse
3 | import matplotlib
4 | matplotlib.use("Agg")
5 | from config import system_configs
6 | from db.datasets import datasets
7 | def parse_args():
8 | parser = argparse.ArgumentParser(description="Test CornerNet")
9 | parser.add_argument("cfg_file", help="config file", type=str)
10 | parser.add_argument("--testiter", dest="testiter",
11 | help="test at iteration i",
12 | default=None, type=int)
13 | parser.add_argument("--split", dest="split",
14 | help="which split to use",
15 | default="validation", type=str)
16 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
17 | parser.add_argument("--debug", action="store_true")
18 |
19 | args = parser.parse_args()
20 | return args
21 |
22 | if __name__ == "__main__":
23 |
24 | args = parse_args()
25 |
26 | if args.suffix is None:
27 | cfg_file = "/mnt/d/CornerNet-CloudVersion/config/CornerNet.json"
28 | else:
29 | cfg_file = "/mnt/d/CornerNet-CloudVersion/config/CornerNet.json"
30 | print("cfg_file: {}".format(cfg_file))
31 |
32 | with open(cfg_file, "r") as f:
33 | configs = json.load(f)
34 |
35 | configs["system"]["snapshot_name"] = args.cfg_file
36 | system_configs.update_config(configs["system"])
37 |
38 | train_split = system_configs.train_split
39 | val_split = system_configs.val_split
40 | test_split = system_configs.test_split
41 |
42 | split = {
43 | "training": train_split,
44 | "validation": val_split,
45 | "testing": test_split
46 | }[args.split]
47 |
48 | print("loading all datasets...")
49 | dataset = system_configs.dataset
50 | print("split: {}".format(split))
51 | db = datasets[dataset](configs["db"], split)
52 | categories = db.configs["categories"]
53 | if db.split != "trainval":
54 | db_inds = db.db_inds
55 | else:
56 | db_inds = db.db_inds[:5000]
57 | image_ids = [db.image_ids(ind) for ind in db_inds]
58 | result_json = json.load(open("./test/results.json", "r"))
59 | for i in range(5):
60 | cls_ids = list([i+1])
61 | db.evaluate(result_json, cls_ids, image_ids)
--------------------------------------------------------------------------------
/testfile/test_xy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import json
4 | import torch
5 | import pprint
6 | import argparse
7 |
8 | import matplotlib
9 | matplotlib.use("Agg")
10 |
11 | from config import system_configs
12 | from nnet.py_factory import NetworkFactory
13 | from db.datasets import datasets
14 |
15 | torch.backends.cudnn.benchmark = False
16 |
17 | def parse_args():
18 | parser = argparse.ArgumentParser(description="Test CornerNet")
19 | parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetSimpleXY", type=str)
20 | parser.add_argument("--testiter", dest="testiter",
21 | help="test at iteration i",
22 | default=None, type=int)
23 | parser.add_argument("--split", dest="split",
24 | help="which split to use",
25 | default="validation", type=str)
26 | parser.add_argument("--suffix", dest="suffix", default=None, type=str)
27 | parser.add_argument("--debug", action="store_true")
28 | parser.add_argument("--data_dir", dest="data_dir", default="./data", type=str)
29 |
30 | args = parser.parse_args()
31 | return args
32 |
33 | def make_dirs(directories):
34 | for directory in directories:
35 | if not os.path.exists(directory):
36 | os.makedirs(directory)
37 |
38 | def test(db, split, testiter, debug=False, suffix=None):
39 | with torch.no_grad():
40 | result_dir = system_configs.result_dir
41 | result_dir = os.path.join(result_dir, str(testiter), split)
42 |
43 | if suffix is not None:
44 | result_dir = os.path.join(result_dir, suffix)
45 |
46 | make_dirs([result_dir])
47 |
48 | test_iter = system_configs.max_iter if testiter is None else testiter
49 | print("loading parameters at iteration: {}".format(test_iter))
50 |
51 | print("building neural network...")
52 | nnet = NetworkFactory(db)
53 | print("loading parameters...")
54 | nnet.load_params(test_iter)
55 |
56 | from testfile.test_chart_xy import testing
57 |
58 | nnet.cuda()
59 | nnet.eval_mode()
60 | testing(db, nnet, result_dir, debug=debug)
61 |
62 | if __name__ == "__main__":
63 |
64 | args = parse_args()
65 |
66 | if args.suffix is None:
67 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
68 | else:
69 | cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
70 | print("cfg_file: {}".format(cfg_file))
71 |
72 | with open(cfg_file, "r") as f:
73 | configs = json.load(f)
74 | configs["system"]["snapshot_name"] = args.cfg_file
75 | configs["system"]["data_dir"] = args.data_dir + '/juluo/chartdata/'
76 | configs["system"]["cache_dir"] = args.data_dir + '/juluo/cache_xy_new/'
77 | configs["system"]["result_dir"] = args.data_dir + '/juluo/cache_xy_new/'
78 | system_configs.update_config(configs["system"])
79 |
80 | train_split = system_configs.train_split
81 | val_split = system_configs.val_split
82 | test_split = system_configs.test_split
83 |
84 | split = {
85 | "training": train_split,
86 | "validation": val_split,
87 | "testing": test_split
88 | }[args.split]
89 |
90 | print("loading all datasets...")
91 | dataset = system_configs.dataset
92 | print("split: {}".format(split))
93 | testing_db = datasets[dataset](configs["db"], split)
94 |
95 | print("system config...")
96 | pprint.pprint(system_configs.full)
97 |
98 | print("db config...")
99 | pprint.pprint(testing_db.configs)
100 |
101 | test(testing_db, args.split, args.testiter, args.debug, args.suffix)
102 |
--------------------------------------------------------------------------------
/tqdm/__init__.py:
--------------------------------------------------------------------------------
1 | from ._tqdm import tqdm
2 | from ._tqdm import trange
3 | from ._tqdm_gui import tqdm_gui
4 | from ._tqdm_gui import tgrange
5 | from ._tqdm_pandas import tqdm_pandas
6 | from ._main import main
7 | from ._monitor import TMonitor, TqdmSynchronisationWarning
8 | from ._version import __version__ # NOQA
9 | from ._tqdm import TqdmTypeError, TqdmKeyError, TqdmWarning, \
10 | TqdmDeprecationWarning, TqdmExperimentalWarning, \
11 | TqdmMonitorWarning
12 |
13 | __all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
14 | 'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
15 | 'TqdmTypeError', 'TqdmKeyError',
16 | 'TqdmWarning', 'TqdmDeprecationWarning',
17 | 'TqdmExperimentalWarning',
18 | 'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
19 | '__version__']
20 |
21 |
22 | def tqdm_notebook(*args, **kwargs): # pragma: no cover
23 | """See tqdm._tqdm_notebook.tqdm_notebook for full documentation"""
24 | from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook
25 | return _tqdm_notebook(*args, **kwargs)
26 |
27 |
28 | def tnrange(*args, **kwargs): # pragma: no cover
29 | """
30 | A shortcut for tqdm_notebook(xrange(*args), **kwargs).
31 | On Python3+ range is used instead of xrange.
32 | """
33 | from ._tqdm_notebook import tnrange as _tnrange
34 | return _tnrange(*args, **kwargs)
35 |
--------------------------------------------------------------------------------
/tqdm/__main__.py:
--------------------------------------------------------------------------------
1 | from ._main import main
2 | main()
3 |
--------------------------------------------------------------------------------
/tqdm/_monitor.py:
--------------------------------------------------------------------------------
1 | from threading import Event, Thread
2 | from time import time
3 | from warnings import warn
4 | __all__ = ["TMonitor", "TqdmSynchronisationWarning"]
5 |
6 |
7 | class TqdmSynchronisationWarning(RuntimeWarning):
8 | """tqdm multi-thread/-process errors which may cause incorrect nesting
9 | but otherwise no adverse effects"""
10 | pass
11 |
12 |
13 | class TMonitor(Thread):
14 | """
15 | Monitoring thread for tqdm bars.
16 | Monitors if tqdm bars are taking too much time to display
17 | and readjusts miniters automatically if necessary.
18 |
19 | Parameters
20 | ----------
21 | tqdm_cls : class
22 | tqdm class to use (can be core tqdm or a submodule).
23 | sleep_interval : fload
24 | Time to sleep between monitoring checks.
25 | """
26 |
27 | # internal vars for unit testing
28 | _time = None
29 | _event = None
30 |
31 | def __init__(self, tqdm_cls, sleep_interval):
32 | Thread.__init__(self)
33 | self.daemon = True # kill thread when main killed (KeyboardInterrupt)
34 | self.was_killed = Event()
35 | self.woken = 0 # last time woken up, to sync with monitor
36 | self.tqdm_cls = tqdm_cls
37 | self.sleep_interval = sleep_interval
38 | if TMonitor._time is not None:
39 | self._time = TMonitor._time
40 | else:
41 | self._time = time
42 | if TMonitor._event is not None:
43 | self._event = TMonitor._event
44 | else:
45 | self._event = Event
46 | self.start()
47 |
48 | def exit(self):
49 | self.was_killed.set()
50 | self.join()
51 | return self.report()
52 |
53 | def run(self):
54 | cur_t = self._time()
55 | while True:
56 | # After processing and before sleeping, notify that we woke
57 | # Need to be done just before sleeping
58 | self.woken = cur_t
59 | # Sleep some time...
60 | self.was_killed.wait(self.sleep_interval)
61 | # Quit if killed
62 | if self.was_killed.is_set():
63 | return
64 | # Then monitor!
65 | # Acquire lock (to access _instances)
66 | with self.tqdm_cls.get_lock():
67 | cur_t = self._time()
68 | # Check tqdm instances are waiting too long to print
69 | instances = self.tqdm_cls._instances.copy()
70 | for instance in instances:
71 | # Check event in loop to reduce blocking time on exit
72 | if self.was_killed.is_set():
73 | return
74 | # Avoid race by checking that the instance started
75 | if not hasattr(instance, 'start_t'): # pragma: nocover
76 | continue
77 | # Only if mininterval > 1 (else iterations are just slow)
78 | # and last refresh exceeded maxinterval
79 | if instance.miniters > 1 and \
80 | (cur_t - instance.last_print_t) >= \
81 | instance.maxinterval:
82 | # force bypassing miniters on next iteration
83 | # (dynamic_miniters adjusts mininterval automatically)
84 | instance.miniters = 1
85 | # Refresh now! (works only for manual tqdm)
86 | instance.refresh(nolock=True)
87 | if instances != self.tqdm_cls._instances: # pragma: nocover
88 | warn("Set changed size during iteration" +
89 | " (see https://github.com/tqdm/tqdm/issues/481)",
90 | TqdmSynchronisationWarning)
91 |
92 | def report(self):
93 | return not self.was_killed.is_set()
94 |
--------------------------------------------------------------------------------
/tqdm/_tqdm_pandas.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | __author__ = "github.com/casperdcl"
4 | __all__ = ['tqdm_pandas']
5 |
6 |
7 | def tqdm_pandas(tclass, *targs, **tkwargs):
8 | """
9 | Registers the given `tqdm` instance with
10 | `pandas.core.groupby.DataFrameGroupBy.progress_apply`.
11 | It will even close() the `tqdm` instance upon completion.
12 |
13 | Parameters
14 | ----------
15 | tclass : tqdm class you want to use (eg, tqdm, tqdm_notebook, etc)
16 | targs and tkwargs : arguments for the tqdm instance
17 |
18 | Examples
19 | --------
20 | >>> import pandas as pd
21 | >>> import numpy as np
22 | >>> from tqdm import tqdm, tqdm_pandas
23 | >>>
24 | >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
25 | >>> tqdm_pandas(tqdm, leave=True) # can use tqdm_gui, optional kwargs, etc
26 | >>> # Now you can use `progress_apply` instead of `apply`
27 | >>> df.groupby(0).progress_apply(lambda x: x**2)
28 |
29 | References
30 | ----------
31 | https://stackoverflow.com/questions/18603270/
32 | progress-indicator-during-pandas-operations-python
33 | """
34 | from tqdm import TqdmDeprecationWarning
35 |
36 | if isinstance(tclass, type) or (getattr(tclass, '__name__', '').startswith(
37 | 'tqdm_')): # delayed adapter case
38 | TqdmDeprecationWarning("""\
39 | Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm, ...)`.
40 | """, fp_write=getattr(tkwargs.get('file', None), 'write', sys.stderr.write))
41 | tclass.pandas(*targs, **tkwargs)
42 | else:
43 | TqdmDeprecationWarning("""\
44 | Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm(...))`.
45 | """, fp_write=getattr(tclass.fp, 'write', sys.stderr.write))
46 | type(tclass).pandas(deprecated_t=tclass)
47 |
--------------------------------------------------------------------------------
/tqdm/_version.py:
--------------------------------------------------------------------------------
1 | # Definition of the version number
2 | import os
3 | from io import open as io_open
4 |
5 | __all__ = ["__version__"]
6 |
7 | # major, minor, patch, -extra
8 | version_info = 4, 23, 0
9 |
10 | # Nice string for the version
11 | __version__ = '.'.join(map(str, version_info))
12 |
13 |
14 | # auto -extra based on commit hash (if not tagged as release)
15 | scriptdir = os.path.dirname(__file__)
16 | gitdir = os.path.abspath(os.path.join(scriptdir, "..", ".git"))
17 | if os.path.isdir(gitdir): # pragma: nocover
18 | extra = None
19 | # Open config file to check if we are in tqdm project
20 | with io_open(os.path.join(gitdir, "config"), 'r') as fh_config:
21 | if 'tqdm' in fh_config.read():
22 | # Open the HEAD file
23 | with io_open(os.path.join(gitdir, "HEAD"), 'r') as fh_head:
24 | extra = fh_head.readline().strip()
25 | # in a branch => HEAD points to file containing last commit
26 | if 'ref:' in extra:
27 | # reference file path
28 | ref_file = extra[5:]
29 | branch_name = ref_file.rsplit('/', 1)[-1]
30 |
31 | ref_file_path = os.path.abspath(os.path.join(gitdir, ref_file))
32 | # check that we are in git folder
33 | # (by stripping the git folder from the ref file path)
34 | if os.path.relpath(
35 | ref_file_path, gitdir).replace('\\', '/') != ref_file:
36 | # out of git folder
37 | extra = None
38 | else:
39 | # open the ref file
40 | with io_open(ref_file_path, 'r') as fh_branch:
41 | commit_hash = fh_branch.readline().strip()
42 | extra = commit_hash[:8]
43 | if branch_name != "master":
44 | extra += '.' + branch_name
45 |
46 | # detached HEAD mode, already have commit hash
47 | else:
48 | extra = extra[:8]
49 |
50 | # Append commit hash (and branch) to version string if not tagged
51 | if extra is not None:
52 | try:
53 | with io_open(os.path.join(gitdir, "refs", "tags",
54 | 'v' + __version__)) as fdv:
55 | if fdv.readline().strip()[:8] != extra[:8]:
56 | __version__ += '-' + extra
57 | except Exception as e:
58 | if "No such file" not in str(e):
59 | raise
60 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .tqdm import stdout_to_tqdm
2 |
3 | from .image import crop_image
4 | from .image import color_jittering_, lighting_, normalize_
5 |
--------------------------------------------------------------------------------
/utils/image.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import random
4 |
5 | def grayscale(image):
6 | return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
7 |
8 | def normalize_(image, mean, std):
9 | image -= mean
10 | image /= std
11 |
12 | def lighting_(data_rng, image, alphastd, eigval, eigvec):
13 | alpha = data_rng.normal(scale=alphastd, size=(3, ))
14 | image += np.dot(eigvec, eigval * alpha)
15 |
16 | def blend_(alpha, image1, image2):
17 | image1 *= alpha
18 | image2 *= (1 - alpha)
19 | image1 += image2
20 |
21 | def saturation_(data_rng, image, gs, gs_mean, var):
22 | alpha = 1. + data_rng.uniform(low=-var, high=var)
23 | blend_(alpha, image, gs[:, :, None])
24 |
25 | def brightness_(data_rng, image, gs, gs_mean, var):
26 | alpha = 1. + data_rng.uniform(low=-var, high=var)
27 | image *= alpha
28 |
29 | def contrast_(data_rng, image, gs, gs_mean, var):
30 | alpha = 1. + data_rng.uniform(low=-var, high=var)
31 | blend_(alpha, image, gs_mean)
32 |
33 | def color_jittering_(data_rng, image):
34 | functions = [brightness_, contrast_, saturation_]
35 | random.shuffle(functions)
36 |
37 | gs = grayscale(image)
38 | gs_mean = gs.mean()
39 | for f in functions:
40 | f(data_rng, image, gs, gs_mean, 0.4)
41 |
42 | def crop_image(image, center, size):
43 | cty, ctx = center
44 | height, width = size
45 | im_height, im_width = image.shape[0:2]
46 | cropped_image = np.zeros((height, width, image.shape[2]), dtype=image.dtype)
47 |
48 | x0, x1 = max(0, ctx - width // 2), min(ctx + width // 2, im_width)
49 | y0, y1 = max(0, cty - height // 2), min(cty + height // 2, im_height)
50 |
51 | left, right = ctx - x0, x1 - ctx
52 | top, bottom = cty - y0, y1 - cty
53 |
54 | cropped_cty, cropped_ctx = height // 2, width // 2
55 | y_slice = slice(cropped_cty - top, cropped_cty + bottom)
56 | x_slice = slice(cropped_ctx - left, cropped_ctx + right)
57 | cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
58 |
59 | border = np.array([
60 | cropped_cty - top,
61 | cropped_cty + bottom,
62 | cropped_ctx - left,
63 | cropped_ctx + right
64 | ], dtype=np.float32)
65 |
66 | offset = np.array([
67 | cty - height // 2,
68 | ctx - width // 2
69 | ])
70 |
71 | return cropped_image, border, offset
72 |
--------------------------------------------------------------------------------
/utils/tqdm.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import numpy as np
3 | import contextlib
4 |
5 | from tqdm import tqdm
6 |
7 | class TqdmFile(object):
8 | dummy_file = None
9 | def __init__(self, dummy_file):
10 | self.dummy_file = dummy_file
11 |
12 | def write(self, x):
13 | if len(x.rstrip()) > 0:
14 | tqdm.write(x, file=self.dummy_file)
15 |
16 | @contextlib.contextmanager
17 | def stdout_to_tqdm():
18 | save_stdout = sys.stdout
19 | try:
20 | sys.stdout = TqdmFile(sys.stdout)
21 | yield save_stdout
22 | except Exception as exc:
23 | raise exc
24 | finally:
25 | sys.stdout = save_stdout
26 |
--------------------------------------------------------------------------------