├── benchmarks
├── __init__.py
├── onnx_files
│ ├── __init__.py
│ └── full_dnns
│ │ ├── lenet.srdfg
│ │ ├── resnet18.srdfg
│ │ └── resnet18_train.srdfg
└── srdfg_files
│ ├── svm_wifi_30_20.srdfg
│ └── svm_wifi_inf_30_20.srdfg
├── tests
├── old
│ ├── __init__.py
│ ├── test_mgdfg.py
│ ├── test_serialization.py
│ ├── test_pmlang.py
│ └── test_load_store_mgdfg.py
├── neuroweaver
│ └── __init__.py
├── reference_implementations
│ ├── __init__.py
│ └── ppo.py
├── pmlang_examples
│ ├── outputs
│ │ └── placeholder.txt
│ ├── recommender.pm
│ ├── classification.pm
│ ├── linear.pm
│ ├── logistic.pm
│ ├── lenet.pm
│ └── backpropagation.pm
├── __init__.py
├── onnx_examples
│ ├── svm_200.onnx
│ ├── svm_54.onnx
│ ├── linear_55.onnx
│ ├── logreg_54.onnx
│ ├── svm_1740.onnx
│ ├── svm_7129.onnx
│ ├── backprop_784.onnx
│ ├── linear_16384.onnx
│ ├── linear_784.onnx
│ ├── linear_8000.onnx
│ ├── logreg_200.onnx
│ ├── logreg_2000.onnx
│ ├── logreg_6033.onnx
│ └── logreg_iris.onnx
├── test_tvm.py
├── test_rl_algorithms.py
├── test_domain_ops.py
├── test_neuroweaver.py
├── test_verilog_gen.py
├── test_dnns.py
├── test_serialization.py
├── tabla_examples
│ ├── linear_3.json
│ └── logistic_3.json
└── test_transformations.py
├── polymath
├── pmlang
│ ├── compiler.py
│ ├── __init__.py
│ ├── antlr_generator
│ │ ├── __init__.py
│ │ ├── parser_gen.sh
│ │ ├── PMLang.tokens
│ │ ├── PMLangLexer.tokens
│ │ └── graphutils.py
│ └── mapping.py
├── tools
│ ├── __init__.py
│ └── srdfg_helpers.py
├── codegen
│ ├── tabla
│ │ ├── __init__.py
│ │ ├── tabla_utils.py
│ │ ├── tabla_translate.py
│ │ └── sigmoid_lookup.csv
│ ├── tvmgen
│ │ ├── __init__.py
│ │ ├── tvm_translate.py
│ │ └── utils.py
│ ├── dnnweavergen
│ │ ├── dnnweaver2
│ │ │ ├── fpga
│ │ │ │ ├── __init__.py
│ │ │ │ └── memspace.py
│ │ │ ├── utils
│ │ │ │ ├── __init__.py
│ │ │ │ └── utils.py
│ │ │ ├── optimizer
│ │ │ │ └── __init__.py
│ │ │ ├── scalar
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dtypes.py
│ │ │ │ └── ops.py
│ │ │ ├── simulator
│ │ │ │ ├── __init__.py
│ │ │ │ ├── accelerator.py
│ │ │ │ └── stats.py
│ │ │ ├── tensorOps
│ │ │ │ ├── __init__.py
│ │ │ │ └── NodeOp.py
│ │ │ ├── __init__.py
│ │ │ ├── benchmarks
│ │ │ │ ├── __init__.py
│ │ │ │ └── test.py
│ │ │ ├── tensor.py
│ │ │ └── tf_utils
│ │ │ │ └── helper.py
│ │ ├── __init__.py
│ │ ├── dnnweaver_translate.py
│ │ ├── yolo_tinyv2.cfg
│ │ ├── dnnweaver_pass.py
│ │ └── utils.py
│ └── codegen_utils.py
├── srdfg
│ ├── templates
│ │ ├── __init__.py
│ │ ├── mpc.py
│ │ ├── optimizers.py
│ │ ├── data_analytics.py
│ │ └── tensor_transformations.py
│ ├── from_onnx
│ │ └── __init__.py
│ ├── from_pytorch
│ │ └── __init__.py
│ ├── serialization
│ │ ├── gen_proto.sh
│ │ ├── __init__.py
│ │ ├── ndarray.proto
│ │ ├── ndarray_pb2.py
│ │ ├── pmlang_mgdfg.py
│ │ └── srdfgv3.proto
│ ├── __init__.py
│ ├── passes
│ │ ├── pass_utils.py
│ │ └── node_mapping.py
│ ├── graph.py
│ ├── random.py
│ └── template_utils.py
├── __main__.py
├── run.sh
├── __init__.py
└── polymath_entry.py
├── setup.cfg
├── docs
├── polymath.pdf
├── index.rst
├── polymath.rst
└── conf.py
├── .travis.yml
├── requirements.txt
├── description.json
├── Makefile
├── README.md
├── .gitignore
├── examples
└── example_graphs.py
├── setup.py
└── README.txt
/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/old/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/pmlang/compiler.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/tools/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/benchmarks/onnx_files/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/neuroweaver/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/tabla/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/tvmgen/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/srdfg/templates/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/srdfg/from_onnx/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/polymath/srdfg/from_pytorch/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/reference_implementations/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tool:pytest]
2 | testpaths = tests
--------------------------------------------------------------------------------
/tests/pmlang_examples/outputs/placeholder.txt:
--------------------------------------------------------------------------------
1 | t
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/fpga/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/optimizer/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/scalar/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/simulator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/tensorOps/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/polymath/pmlang/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
--------------------------------------------------------------------------------
/docs/polymath.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/docs/polymath.pdf
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division, absolute_import
2 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import division,absolute_import, print_function
2 |
--------------------------------------------------------------------------------
/tests/onnx_examples/svm_200.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/svm_200.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/svm_54.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/svm_54.onnx
--------------------------------------------------------------------------------
/polymath/srdfg/serialization/gen_proto.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | protoc -I./ --python_out=./ srdfgv3.proto ndarray.proto
--------------------------------------------------------------------------------
/tests/onnx_examples/linear_55.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/linear_55.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/logreg_54.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/logreg_54.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/svm_1740.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/svm_1740.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/svm_7129.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/svm_7129.onnx
--------------------------------------------------------------------------------
/polymath/__main__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from .polymath_entry import main
3 |
4 | if __name__ == '__main__':
5 | sys.exit(main())
--------------------------------------------------------------------------------
/polymath/srdfg/serialization/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 | sys.path.append(str(Path(__file__).parent))
--------------------------------------------------------------------------------
/tests/onnx_examples/backprop_784.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/backprop_784.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/linear_16384.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/linear_16384.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/linear_784.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/linear_784.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/linear_8000.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/linear_8000.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/logreg_200.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/logreg_200.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/logreg_2000.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/logreg_2000.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/logreg_6033.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/logreg_6033.onnx
--------------------------------------------------------------------------------
/tests/onnx_examples/logreg_iris.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/tests/onnx_examples/logreg_iris.onnx
--------------------------------------------------------------------------------
/benchmarks/onnx_files/full_dnns/lenet.srdfg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/benchmarks/onnx_files/full_dnns/lenet.srdfg
--------------------------------------------------------------------------------
/benchmarks/srdfg_files/svm_wifi_30_20.srdfg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/benchmarks/srdfg_files/svm_wifi_30_20.srdfg
--------------------------------------------------------------------------------
/benchmarks/onnx_files/full_dnns/resnet18.srdfg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/benchmarks/onnx_files/full_dnns/resnet18.srdfg
--------------------------------------------------------------------------------
/benchmarks/srdfg_files/svm_wifi_inf_30_20.srdfg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/benchmarks/srdfg_files/svm_wifi_inf_30_20.srdfg
--------------------------------------------------------------------------------
/benchmarks/onnx_files/full_dnns/resnet18_train.srdfg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/he-actlab/polymath/HEAD/benchmarks/onnx_files/full_dnns/resnet18_train.srdfg
--------------------------------------------------------------------------------
/polymath/srdfg/serialization/ndarray.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package numproto.protobuf;
4 |
5 | message NDArray {
6 | bytes ndarray = 1;
7 | }
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - '3.6'
4 | cache: pip
5 | install:
6 | - pip install -r requirements.txt
7 | script:
8 | - make
9 | deploy:
10 | provider: pypi
11 | skip_existing: true
12 | user: skinzer
--------------------------------------------------------------------------------
/polymath/srdfg/templates/mpc.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | import numpy as np
3 |
4 |
5 |
6 | class matern32(pm.Template):
7 | def define_graph(self, x, y, variance, lengthscale):
8 | sqrt3 = pm.sqrt(3.0)
9 | i = pm.index(0, x.shape[0]-1, "i")
10 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | pathlib
3 | pytools
4 | graphviz
5 | numproto
6 | six
7 | onnx
8 | onnxruntime
9 | pytest>=3.2.1
10 | pytest-cov>=2.5.1
11 | pylint<=1.9.3
12 | protobuf
13 | indexed
14 | dataclasses
15 | tqdm
16 | jsonschema
17 | networkx
18 | torch
19 | onnxoptimizer
--------------------------------------------------------------------------------
/polymath/srdfg/__init__.py:
--------------------------------------------------------------------------------
1 | from google.protobuf.json_format import MessageToJson
2 | from typing import Union, List, Dict
3 | from pytools import memoize_method
4 | from typing import TYPE_CHECKING, Dict
5 |
6 | FUNC_WRAPPER_NODES = ["func_op", "slice_op", "sum", "prod",
7 | "argmin", "argmax", "amin", "amax"]
--------------------------------------------------------------------------------
/polymath/tools/srdfg_helpers.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | SKIP_OP_TYPES = (pm.state, pm.output, pm.temp, pm.write, pm.placeholder)
3 |
4 | def print_graph_ops(graph: pm.Node):
5 | for name, node in graph.nodes.items():
6 | if not isinstance(node, SKIP_OP_TYPES):
7 | print(f"{node.op_name}:{node.name}")
8 |
--------------------------------------------------------------------------------
/polymath/pmlang/antlr_generator/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 |
4 | LOOPY_IF = "{prefix} = if({predicate}, {true_stmt}, {false_stmt})"
5 | LOOPY_ASSIGN = "{prefix} = {expr}"
6 | INDEX_DOMAIN = "{low} <= {var} <= {upper}"
7 | ASSUMPTION_DOMAIN = "{var} >= {low}"
8 | FULL_DOMAIN = "{{[{dom_names}]: {domains}}}"
9 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/__init__.py:
--------------------------------------------------------------------------------
1 | from polymath.codegen.dnnweavergen.dnnweaver2.graph import get_default_graph
2 | from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype
3 |
4 |
5 | def get_tensor(shape, name=None, dtype=FQDtype.FP32, trainable=True, data=None):
6 | g = get_default_graph()
7 | return g.tensor(shape=shape, name=name, dtype=dtype, trainable=trainable, data=data)
8 |
--------------------------------------------------------------------------------
/description.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "polymath",
3 | "description": "A Programming stack for ",
4 | "version": "0.1.0",
5 | "author": "Sean Kinzer",
6 | "author_email": "skinzer@eng.ucsd.edu",
7 | "license": "License :: OSI Approved :: Apache Software License",
8 | "classifiers": [
9 | "Development Status :: 3 - Alpha",
10 | "Programming Language :: Python :: 3"
11 | ],
12 | "url": "https://github.com/heactlab/polymath",
13 | "python_requires": ">=3.6.0"
14 | }
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY : all tests docs lint_tests code_tests clean install
2 |
3 | all : tests docs
4 |
5 | tests : code_tests lint_tests
6 |
7 | install :
8 | pip install -r requirements.txt
9 |
10 | lint_tests :
11 | pylint polymath
12 |
13 | code_tests :
14 | py.test --cov polymath --cov-fail-under=100 --cov-report=term-missing --cov-report=html --verbose --durations=5 -s
15 |
16 | docs :
17 | sphinx-build -b doctest docs build
18 | sphinx-build -nWT docs build
19 |
20 | clean :
21 | rm -rf build/
22 |
23 |
--------------------------------------------------------------------------------
/polymath/pmlang/antlr_generator/parser_gen.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ANTLR="java -Xmx500M -cp '/usr/local/lib/antlr-4.7.1-complete.jar:$CLASSPATH' org.antlr.v4.Tool"
4 | # Generate PMLang.tokens, PMLangLexer.interp, PMLangLexer.py, PMLangLexer.tokens, PMLangListener.py, and PMLangParser.py
5 | $ANTLR -o ./ -listener -Dlanguage=Python3 -no-visitor -lib ./ ./PMLang.g4
6 |
7 | # Change names of python files:
8 | mv PMLangLexer.py lexer.py
9 | mv PMLangParser.py parser.py
10 | mv PMLangListener.py listener.py
11 |
12 |
13 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 |
3 | benchmark_list = [\
4 | 'alexnet-d',
5 | 'alexnet-q',
6 | 'alexnet-w',
7 | 'googlenet-q',
8 | 'resnet-34-w',
9 | 'svhn-d',
10 | 'svhn-q',
11 | 'cifar-10-q'
12 | ]
13 |
14 | def get_graph(bench, train=True):
15 | bench = bench.lower()
16 | module_name = 'dnnweaver2.benchmarks.' + bench
17 | # print(module_name)
18 | b = importlib.import_module(module_name)
19 | return b.get_graph(train=train)
20 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/utils/utils.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | def floor_a_by_b(a, b):
4 | return int(float(a) / b)
5 |
6 | def ceil_a_by_b(a, b):
7 | return int(math.ceil(float(a) / b))
8 |
9 |
10 | def log2(a):
11 | return math.log(a) / math.log(2)
12 |
13 | def lookup_pandas_dataframe(data, lookup_dict):
14 | '''
15 | Lookup a pandas dataframe using a key-value dict
16 | '''
17 | data = data.drop_duplicates()
18 | for key in lookup_dict:
19 | data = data.loc[data[key] == lookup_dict[key]]
20 |
21 | # assert len(data) == 1, ("Found {} entries for dict {}".format(len(data), lookup_dict))
22 | return data
23 |
24 |
--------------------------------------------------------------------------------
/polymath/codegen/tvmgen/tvm_translate.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | import numpy as np
3 | from polymath.codegen.tvmgen.tvm_pass import TVMPass, TVM_OPS
4 | import json
5 |
6 | def generate_tvm(graph, input_dict, filepath, context_dict=None):
7 | assert len(input_dict) > 0
8 | shape_dict = {k: v.shape if isinstance(v, np.ndarray) else v for k,v in input_dict.items()}
9 | shape_dict['populate'] = False
10 | shape_pass = pm.NormalizeGraph(shape_dict)
11 | lower_pass = pm.Lower(TVM_OPS)
12 | tvm_pass = TVMPass()
13 | shaped = shape_pass(graph)
14 | lowered = lower_pass(shaped)
15 | result = tvm_pass(lowered)
16 | return tvm_pass.tvm_ir['tvm_code']
--------------------------------------------------------------------------------
/polymath/srdfg/passes/pass_utils.py:
--------------------------------------------------------------------------------
1 | import numba
2 |
3 | def is_string(var):
4 | if var[0] == '"' and var[-1] == '"':
5 | return True
6 | else:
7 | return False
8 |
9 | def is_number(var):
10 | try:
11 | float(var)
12 | return True
13 | except ValueError:
14 | pass
15 |
16 | try:
17 | import unicodedata
18 | unicodedata.numeric(var)
19 | return True
20 | except (TypeError, ValueError):
21 | pass
22 | return False
23 |
24 |
25 | def is_literal(var):
26 | str = is_string(var)
27 | num = is_number(var)
28 | if str or num:
29 | return True
30 | else:
31 | return False
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver_translate.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | import numpy as np
3 | from polymath.codegen.dnnweavergen.dnnweaver_pass import DNNWeaverPass, DNNWEAVER_OPS
4 | import json
5 |
6 | def generate_dnnweaver(graph, input_dict, filepath, debug=False, add_kwargs=False, context_dict=None):
7 | shape_dict = {k: v.shape if isinstance(v, np.ndarray) else v for k,v in input_dict.items()}
8 | shape_dict['populate'] = False
9 | shape_pass = pm.NormalizeGraph(shape_dict, debug=debug)
10 | lower_pass = pm.Lower(DNNWEAVER_OPS, debug=debug)
11 | dnnw_pass = DNNWeaverPass(debug=debug)
12 | shaped = shape_pass(graph)
13 | lowered = lower_pass(shaped)
14 | result = dnnw_pass(lowered)
15 | return dnnw_pass.dnnw_ir['dnnweaver_code'], result
--------------------------------------------------------------------------------
/polymath/codegen/tabla/tabla_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pathlib import Path
3 | LUT_PATH = f"{Path(f'{__file__}').parent}"
4 | SIGMOID_PATH = f"{LUT_PATH}/sigmoid_lookup.csv"
5 |
6 | def get_sigmoid_lut():
7 | lvals = np.loadtxt(SIGMOID_PATH, delimiter=",")
8 | return np.asarray([i[1] for i in lvals])
9 |
10 | def sigmoid_lut(val, minv=-256, maxv=255, div_size=128):
11 | val = np.floor(val/4).astype(np.int)
12 | val = val.clip(minv, maxv) + maxv + 1
13 | lut = get_sigmoid_lut()
14 | return np.floor(lut[val]/div_size)
15 |
16 |
17 | def get_gaussian_lut():
18 | lvals = np.loadtxt(SIGMOID_PATH, delimiter=",")
19 | return np.asarray([i[1] for i in lvals])
20 |
21 | def gaussian_lut(val, minv=-256, maxv=255):
22 | raise NotImplementedError
--------------------------------------------------------------------------------
/polymath/srdfg/graph.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | from indexed import IndexedOrderedDict
3 | class Graph(dict):
4 |
5 | def __init__(self, *args, **kwargs):
6 | super(Graph, self).__init__(*args, **kwargs)
7 |
8 | def __hash__(self):
9 | return hash(tuple([hash(node) for _, node in self.items()]))
10 |
11 | def as_list(self):
12 | return [v for _, v in self.items()]
13 |
14 | def last(self):
15 | return self[next(reversed(self))]
16 |
17 | def item_by_index(self, key):
18 | return list(self.values())[key]
19 |
20 | def item_index(self, key):
21 | return list(self.keys()).index(key)
22 |
23 | def func_hash(self):
24 | return hash(tuple([(node.func_hash()) for _, node in self.items()]))
25 |
26 |
--------------------------------------------------------------------------------
/polymath/srdfg/templates/optimizers.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from .template_utils import _get_elem_indices
3 | from polymath.srdfg.util import squeeze_shape
4 | from numbers import Integral
5 | import numpy as np
6 | import functools
7 |
8 | class sgd(pm.Template):
9 | def define_graph(self, param, grad, lr=0.01, momentum=0.9, weight_decay=0.0, dampening=0.0, nesterov=False):
10 | data_idx, grad_idx, indices = _get_elem_indices(param, grad, param)
11 |
12 | if momentum != 0:
13 | param[indices] = param[data_idx] * momentum - lr * grad[grad_idx]
14 | else:
15 | param[indices] = param[data_idx] - lr * grad[grad_idx]
16 |
17 |
18 | @property
19 | def inputs(self):
20 | return (self.args[0], self.args[1])
21 |
22 | @property
23 | def outputs(self):
24 | return (self.args[0],)
25 |
--------------------------------------------------------------------------------
/tests/pmlang_examples/recommender.pm:
--------------------------------------------------------------------------------
1 |
2 | rec_model(input float x1[k], input float x2[k], input float r1[m], input float y1[m], input float r2[n], input float y2[n],
3 | state float w1[m][k], state float w2[n][k], param int mu=1) {
4 | // m = 3;
5 | // n = 3;
6 | // k = 2;
7 | index i[0:m-1], j[0:n-1], l[0:k-1];
8 | h1[i] = sum[l](w1[i][l] * x2[l]) * r1[i];
9 | h2[j] = sum[l](x1[l] * w2[j][l]) * r2[j];
10 | d1[i] = h1[i] - y1[i];
11 | d2[j] = h2[j] - y2[j];
12 | g1[i][l] = d1[i] * x2[l];
13 | g2[j][l] = d2[j] * x1[l];
14 | w1[i][l] = w1[i][l] - 1.0 * g1[i][l];
15 | w2[j][l] = w2[j][l] - g2[j][l];
16 |
17 |
18 | }
19 |
20 |
21 | main()
22 | {
23 | float x1_input[2], x2_input[2], r1_output[3], y1_output[3], r2_output[3], y2_output[3];
24 | float w1_model[3][2], w2_model[3][2];
25 |
26 | rec_model(x1_input, x2_input, r1_output, y1_output, r2_output, y2_output, w1_model, w2_model);
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/tests/test_tvm.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from tests.util import linear, op_counts, logistic, svm, reco, dense, conv,\
3 | two_layer_dense, lenet, tvm_lenet
4 | from pathlib import Path
5 | # import tvm
6 | import pytest
7 | import pprint
8 | import numpy as np
9 | import copy
10 |
11 | import pickle
12 | from onnx import numpy_helper, helper, defs
13 |
14 |
15 | # TODO: Fix this
16 | def test_lenet():
17 | import tvm
18 |
19 | graph, inp_info, out_info, key = lenet(coarse=True)
20 | coarse_cpy = pickle.loads(pickle.dumps(inp_info))
21 | res = graph(key, coarse_cpy)
22 | np.testing.assert_allclose(res, out_info[key])
23 | tvm_code = pm.generate_tvm(graph, inp_info, "")
24 | pm_mod = tvm.IRModule.from_expr(tvm_code)
25 | pm_mod = tvm.relay.transform.InferType()(pm_mod)
26 |
27 |
28 | net = tvm_lenet()
29 | mod = tvm.IRModule.from_expr(net)
30 | mod = tvm.relay.transform.InferType()(mod)
31 |
32 | print(pm_mod)
33 | print(mod)
34 |
--------------------------------------------------------------------------------
/tests/pmlang_examples/classification.pm:
--------------------------------------------------------------------------------
1 | //spring read_data(output float x[m], output float y, param str type="csv",
2 | // param str data_path="dataset1.txt")
3 | //{
4 | //
5 | // index i[0:m-1];
6 | //
7 | // lines = fread(data_path, type, m+1);
8 | // x[i] = float(lines[i]);
9 | // y = float(lines[m]);
10 | //
11 | //
12 | //}
13 | svd(input float x[m], input float y, state float w[m], param float mu=1.0)
14 | {
15 | index i[0:m-1];
16 |
17 | h = sum[i](w[i] * x[i]);
18 | c = y * h;
19 |
20 | ny = 0 - y;
21 | p = ((c > 1) * ny);
22 | gi[i] = p* x[i];
23 |
24 | // SGD added
25 | g[i] = mu * gi[i];
26 | w[i] = w[i] - g[i];
27 | }
28 |
29 | //training_process(output float w[m])
30 | //{
31 | // float x[m], y;
32 | //
33 | // read_data(x,y);
34 | // svd(x,y,w);
35 | //}
36 | //
37 | //reservoir store_data(input float w[m],param str type="csv", param str model_path="model_path.txt")
38 | //{
39 | // fwrite(w, model_path, type);
40 | //}
41 |
42 |
43 |
44 | main()
45 | {
46 | float w_model[3], x_input[3], y_input;
47 | svd(x_input, y_input, w_model);
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/tests/pmlang_examples/linear.pm:
--------------------------------------------------------------------------------
1 | // Need to define
2 | training_record(output float x[m], output float y, param str type="csv", param str path="dataset1.txt")
3 | {
4 | index i[0:m-1];
5 | // Read a file, one line at a time to populate a FIFO queue
6 | lines[i] = fread(path, type, m+1, 1);
7 |
8 | // The arguments below will format the data prior to adding to the output
9 | // queues
10 | x[i] = float(lines[i]);
11 | y = float(lines[m]);
12 | }
13 |
14 |
15 |
16 | linear_regression(input float x[m],input float y, state float w[m], param float mu=1.0)
17 | {
18 |
19 | index i[0:m-1], j[0:m-1];
20 |
21 | // Pop values off of the queue and perform operations on them,
22 | // updating a maintained state flow
23 |
24 | h = sum[i](w[i] * x[i]);
25 | d = h - y;
26 | g[i] = d * x[i];
27 |
28 | // SGD added
29 | w[i] = w[i] - mu*g[i];
30 |
31 | }
32 |
33 |
34 | trained_model(input float w[m], param str path="results.txt", param str type="csv"){
35 |
36 | fwrite(w, path, type);
37 |
38 | }
39 |
40 | // Starting point in the program
41 | main(input float x_input[784], output float y_output)
42 | {
43 | float w_model[784];
44 |
45 | training_record(x_input, y_output,"csv");
46 | linear_regression(x_input, y_output, w_model);
47 |
48 | trained_model(w_model);
49 | }
50 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | ..
2 |
3 | PolyMath: A Programming Stack for Cross-Domain Acceleration.
4 | ============================================================
5 |
6 | PolyMath is a cross-domain stack comprised of a high-level cross-domain language, a multi-granular intermediate representation, and a compilation infrastructure that provides the expressiveness to bring together Robotics, Digital Signal Processing, Deep Learning, and Data Analytics, while targeting a variety of accelerators.
7 |
8 | This repository implements the following:
9 |
10 | * A Python-Embedded language
11 | * A compiler for the embedded language which converts high-level programs to an mg-DFG
12 | * Serialization and de-serialization to protobuf for the mg-DFG to allow portability of programs
13 | * A compiler pass infrastructure for defining optimization transformations or analyses on mg-DFGs
14 | * Lowering compiler passes which transform the mg-DFG to different types of accelerator. Currently, `TVM `_ and `TABLA `_ lowering passes are provided, but similar passes can be extended to other types of accelerator.
15 |
16 | .. toctree::
17 | :maxdepth: 2
18 | :caption: Contents
19 |
20 | polymath
21 |
22 |
23 | Indices and tables
24 | ==================
25 |
26 | * :ref:`genindex`
27 | * :ref:`modindex`
28 | * :ref:`search`
--------------------------------------------------------------------------------
/tests/pmlang_examples/logistic.pm:
--------------------------------------------------------------------------------
1 | // Need to define
2 | training_record(output float x[m], output float y, param str type="csv", param str path="dataset1.txt")
3 | {
4 | index i[0:m-1];
5 | // Read a file, one line at a time to populate a FIFO queue
6 | lines = fread(path, type, m+1, 1);
7 |
8 | // The arguments below will format the data prior to adding to the output
9 | // queues
10 | x[i] = float(lines[i]);
11 | y = float(lines[m]);
12 | }
13 |
14 |
15 |
16 | logistic_regression(input float x[m],input float y, state float w[m], param float mu=1.0)
17 | {
18 |
19 | index i[0:m-1], j[0:m-1];
20 |
21 | // Pop values off of the queue and perform operations on them,
22 | // updating a maintained state flow
23 |
24 | h = sigmoid(sum[i](w[i] * x[i]));
25 | d = h - y;
26 | g[i] = d * x[i];
27 |
28 | // SGD added
29 | w[i] = w[i] - mu*g[i];
30 |
31 | }
32 |
33 |
34 | trained_model(input float w[m], param str path="results.txt", param str type="csv"){
35 |
36 | fwrite(w, path, type);
37 |
38 | }
39 |
40 | // Starting point in the program
41 | main()
42 | {
43 | float w_model[784];
44 | float x_input[784];
45 | float y_output;
46 |
47 | training_record(x_input, y_output,"csv");
48 | logistic_regression(x_input, y_output, w_model);
49 |
50 | trained_model(w_model);
51 | }
52 |
--------------------------------------------------------------------------------
/polymath/codegen/tabla/tabla_translate.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from polymath.codegen.tabla.tabla_pass import TablaPass
3 | import json
4 |
5 | def generate_tabla(graph, input_dict, filepath, context_dict=None, add_kwargs=False, debug=True):
6 | assert len(input_dict) > 0
7 | shape_pass = pm.NormalizeGraph(input_dict, debug=debug)
8 | context_dict = context_dict or {}
9 |
10 | lower_pass = pm.Lower({}, debug=debug)
11 | print(f"Starting graph normalization...")
12 |
13 | shaped = shape_pass(graph)
14 | # for k, n in shaped.nodes.items():
15 | # if "tempz" in k:
16 | # print(f"{n.name} - {n.op_name} - {n.shape}")
17 | print(f"Finished graph normalization. Executing lower pass.")
18 | lowered = lower_pass(shaped)
19 |
20 | # print(list(lowered.nodes.keys()))
21 | print(f"Finished graph lowering, generating TABLA dfg.")
22 | for k in list(context_dict.keys()):
23 | if k not in lowered.nodes:
24 | context_dict.pop(k)
25 | tabla_pass = TablaPass(context_dict, add_kwargs=add_kwargs, debug=debug)
26 | res = tabla_pass(lowered)
27 | print(f"Finished generating TABLA dfg, now storing to JSON file at {filepath}.")
28 |
29 | tabla_nodes = [node for _, node in tabla_pass.dfg.items()]
30 |
31 | with open(filepath, "w") as f:
32 | json.dump(tabla_nodes, f, indent=4)
33 |
34 | return tabla_nodes, res
35 |
36 |
37 |
--------------------------------------------------------------------------------
/docs/polymath.rst:
--------------------------------------------------------------------------------
1 | polymath package
2 | ==================
3 |
4 | .. automodule:: polymath.mgdfg.base
5 | :members:
6 | :exclude-members: get_active_graph, instantiate_node, instantiate_graph, set_name, evaluate_dependencies
7 | :show-inheritance:
8 |
9 | .. automodule:: polymath.mgdfg.nodes
10 | :members:
11 | :show-inheritance:
12 |
13 | .. automodule:: polymath.mgdfg.util
14 | :members:
15 | :show-inheritance:
16 |
17 | .. automodule:: polymath.mgdfg.graph
18 | :members:
19 | :show-inheritance:
20 |
21 | .. automodule:: polymath.mgdfg.index
22 | :members:
23 | :show-inheritance:
24 |
25 | .. automodule:: polymath.mgdfg.group_nodes
26 | :members:
27 | :show-inheritance:
28 |
29 | .. automodule:: polymath.mgdfg.nonlinear
30 | :members:
31 | :show-inheritance:
32 |
33 | .. automodule:: polymath.mgdfg.template
34 | :members:
35 | :show-inheritance:
36 |
37 | .. automodule:: polymath.mgdfg.serialization.serialize
38 | :members:
39 | :show-inheritance:
40 |
41 | .. automodule:: polymath.mgdfg.passes
42 | :members:
43 | :show-inheritance:
44 |
45 | .. automodule:: polymath.mgdfg.passes.compiler_passes
46 | :members:
47 | :show-inheritance:
48 |
49 | .. automodule:: polymath.codegen.tabla.tabla_translate
50 | :members:
51 | :show-inheritance:
52 |
53 | .. automodule:: polymath.codegen.tvmgen.tvm_translate
54 | :members:
55 | :show-inheritance:
56 |
--------------------------------------------------------------------------------
/polymath/srdfg/random.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .index import index
3 | from .domain import Domain
4 | from .base import Node, nodeop, func_op, slice_op, var_index, call
5 | from polymath import DEFAULT_SHAPES, UNSET_SHAPE
6 |
7 | class Random(Node):
8 |
9 | def __init__(self, target, *args, **kwargs):
10 | super(Random, self).__init__(*args, target=f"{target.__module__}.{target.__name__}", **kwargs)
11 | self.target = target
12 |
13 | def _evaluate(self, val, **kwargs):
14 | if "target" in kwargs:
15 | kwargs.pop("target")
16 | if "domain" in kwargs:
17 | kwargs.pop("domain")
18 | val = self.target(val, **kwargs)
19 |
20 | if not self.is_shape_finalized():
21 | self.shape = val.shape
22 | return val
23 |
24 | @property
25 | def domain(self):
26 | return self.kwargs["domain"]
27 |
28 | def compute_shape(self):
29 | raise NotImplemented
30 |
31 | def __call__(self, val, **kwargs):
32 | return call(self, val, **kwargs)
33 |
34 | def __repr__(self):
35 | return ""% \
36 | (self.name, self.op_name)
37 |
38 | class choice(Random):
39 | def __init__(self, input_node, **kwargs):
40 | super(choice, self).__init__(_choice, input_node, **kwargs)
41 |
42 | def _choice(a, size=None, replace=True, p=None):
43 | return np.random.choice(a, size=size, replace=replace, p=p)
--------------------------------------------------------------------------------
/polymath/codegen/codegen_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import importlib
3 | import pydot
4 | import logging
5 | import sys
6 |
7 | CMLANG_CAST_MAP = {
8 | "int" : np.int,
9 | "str" : np.str,
10 | "bool" :np.bool,
11 | "float": np.float,
12 | "complex": np.complex
13 | }
14 |
15 |
16 | def get_func(function_name):
17 | mod_id, func_id = function_name.rsplit('.', 1)
18 | try:
19 | mod = importlib.import_module(mod_id)
20 | except ModuleNotFoundError:
21 | logging.error(f"Unable to import {mod_id}. Exiting")
22 | exit(1)
23 | func = getattr(mod, func_id)
24 | return func
25 |
26 | def visualize(input_proto,graph, graph_name, output_dir, output_file):
27 | rankdir = "TB"
28 | pydot_graph = pydot.Dot(name=input_proto, rankdir=rankdir)
29 |
30 | out_graph = GetPydotGraph(graph, name=graph_name, rankdir=rankdir)
31 | filename = output_dir + '/' + output_file[:-3] + '.dot'
32 | pydot_graph.add_subgraph(out_graph)
33 |
34 | pydot_graph.write(filename, format='raw')
35 | pdf_filename = filename[:-3] + 'png'
36 | try:
37 | pydot_graph.write_png(pdf_filename)
38 |
39 | except Exception:
40 | print(
41 | 'Error when writing out the png file. Pydot requires graphviz '
42 | 'to convert dot files to pdf, and you may not have installed '
43 | 'graphviz. On ubuntu this can usually be installed with "sudo '
44 | 'apt-get install graphviz". We have generated the .dot file '
45 | 'but will not be able to generate png file for now.'
46 | )
47 |
--------------------------------------------------------------------------------
/tests/test_rl_algorithms.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | # import gym
3 | # from tests.reference_implementations.rl_algorithms import create_actor_critic, TORCH_NP_FN_MAP,\
4 | # lognormalize, probs_to_logits, logits_to_probs
5 | # from tests.reference_implementations.ppo import MLPActorCritic
6 | import torch
7 | import numpy as np
8 | from torch import nn
9 | import random
10 |
11 | RANDOM_SEED = 0
12 |
13 | # @pytest.mark.parametrize('env_name, hidden_sizes, activation', [
14 | # ('CartPole-v0', (64, 64), nn.ReLU)
15 | # ])
16 | # def test_ppo1(env_name, hidden_sizes, activation):
17 | # env = gym.make(env_name)
18 | # env.seed(RANDOM_SEED)
19 | #
20 | # ac_ref = MLPActorCritic(env.observation_space, env.action_space,
21 | # hidden_sizes=hidden_sizes, activation=activation)
22 | # np_act = TORCH_NP_FN_MAP[activation.__name__]
23 | # ac_np = create_actor_critic(env.observation_space, env.action_space,
24 | # hidden_sizes, np_act, ac_ref, env)
25 | # # torch.manual_seed(RANDOM_SEED)
26 | # # np.random.seed(RANDOM_SEED)
27 | # # random.seed(RANDOM_SEED)
28 | # # env.seed(RANDOM_SEED)
29 | # #
30 | # # o, ep_ret, ep_len = env.reset(), 0, 0
31 | # # o = np.float32(o)
32 | # # # [-0.08411545]
33 | # # # tensor(-0.7085)
34 | # # torch.set_deterministic(True)
35 | # # with torch.no_grad():
36 | # #
37 | # # o_torch = torch.as_tensor(o, dtype=torch.float32)
38 | # # a, v, logp = ac_ref.step(o_torch)
39 | # #
40 | # # a_np, logp_np, v_np = ac_np.step(o)
41 | #
42 | # # Numpy implementation
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Welcome to PolyMath!
2 |
3 | PolyMath is a framework comprised of both a high-level language and an embedded Python language for compilation on heterogenous hardware.
4 |
5 | This document will help you get up and running.
6 |
7 | ### Step 0: Check prerequisites
8 | The following dependencies must be met by your system:
9 | * python >= 3.7 (For [PEP 560](https://www.python.org/dev/peps/pep-0560/) support)
10 |
11 |
12 | ### Step 1: Clone the PolyMath source code
13 | ```console
14 | $ git clone https://github.com/he-actlab/polymath
15 | $ cd polymath
16 | ```
17 |
18 |
19 | ### Step 2: Create a [Python virtualenv](https://docs.python.org/3/tutorial/venv.html)
20 | Note: You may choose to skip this step if you are doing a system-wide install for multiple users.
21 | Please DO NOT skip this step if you are installing for personal use and/or you are a developer.
22 | ```console
23 | $ python -m venv general
24 | $ source general/bin/activate
25 | $ python -m pip install pip --upgrade
26 | ```
27 |
28 | ### Step 3: Install PolyMath
29 | If you already have a working installation of Python 3.7 or Python 3.8, the easiest way to install GeneSys is:
30 | ```console
31 | $ pip install -e .
32 | ```
33 |
34 | ### Step 4: Run an example
35 | You can look at the examples in the `examples/` directory to see how the PolyMath language works.
36 |
37 | ## Citing us
38 | If you use this work, please cite our paper, PolyMath, published in the 2021 IEEE International Symposium on High Performance Computer Architecture (HPCA).
39 |
40 | ```
41 | S. Kinzer, J.K. Kim, S. Ghodrati, B. Yatham, A. Althoff, D. Mahajan, S. Lerner, and H. Esmailzadeh, "A Computational Stack for Cross-Domain Acceleration", in the IEEE International Symposium on High Performance Computer Architecture (HPCA), 2021.
42 | ```
43 |
44 |
45 |
--------------------------------------------------------------------------------
/polymath/pmlang/antlr_generator/PMLang.tokens:
--------------------------------------------------------------------------------
1 | T__0=1
2 | T__1=2
3 | T__2=3
4 | T__3=4
5 | T__4=5
6 | T__5=6
7 | T__6=7
8 | T__7=8
9 | T__8=9
10 | T__9=10
11 | T__10=11
12 | T__11=12
13 | T__12=13
14 | T__13=14
15 | T__14=15
16 | T__15=16
17 | T__16=17
18 | T__17=18
19 | T__18=19
20 | T__19=20
21 | T__20=21
22 | T__21=22
23 | T__22=23
24 | INPUT=24
25 | OUTPUT=25
26 | STATE=26
27 | PARAMETER=27
28 | SPRING=28
29 | RESERVOIR=29
30 | COMPONENT=30
31 | INDEX=31
32 | FLOW=32
33 | ARRAYMUL=33
34 | ARRAYDIV=34
35 | ARRAYRDIV=35
36 | POW=36
37 | BREAK=37
38 | RETURN=38
39 | FUNCTION=39
40 | GROUP_FUNCTION=40
41 | FOR=41
42 | WHILE=42
43 | END=43
44 | GLOBAL=44
45 | IF=45
46 | CLEAR=46
47 | ELSE=47
48 | ELSEIF=48
49 | LE_OP=49
50 | GE_OP=50
51 | EQ_OP=51
52 | NE_OP=52
53 | TRANSPOSE=53
54 | NCTRANSPOSE=54
55 | SEMI=55
56 | STRING_LITERAL=56
57 | IDENTIFIER=57
58 | DECIMAL_INTEGER=58
59 | OCT_INTEGER=59
60 | HEX_INTEGER=60
61 | BIN_INTEGER=61
62 | IMAG_NUMBER=62
63 | FLOAT_NUMBER=63
64 | EQ=64
65 | WHITESPACE=65
66 | NEWLINE=66
67 | BLOCKCOMMENT=67
68 | LINECOMMENT=68
69 | '('=1
70 | ')'=2
71 | '{'=3
72 | '}'=4
73 | ','=5
74 | '['=6
75 | ']'=7
76 | ':'=8
77 | '+'=9
78 | '-'=10
79 | '*'=11
80 | '/'=12
81 | '%'=13
82 | '<'=14
83 | '>'=15
84 | '?'=16
85 | 'int'=17
86 | 'float'=18
87 | 'str'=19
88 | 'bool'=20
89 | 'complex'=21
90 | 'fxp'=22
91 | '_'=23
92 | 'input'=24
93 | 'output'=25
94 | 'state'=26
95 | 'param'=27
96 | 'spring'=28
97 | 'reservoir'=29
98 | 'component'=30
99 | 'index'=31
100 | 'flow'=32
101 | '.*'=33
102 | '.\\'=34
103 | './'=35
104 | '^'=36
105 | 'break'=37
106 | 'return'=38
107 | 'function'=39
108 | 'for'=41
109 | 'while'=42
110 | 'end'=43
111 | 'global'=44
112 | 'if'=45
113 | 'clear'=46
114 | 'else'=47
115 | 'elseif'=48
116 | '<='=49
117 | '>='=50
118 | '=='=51
119 | '!='=52
120 | 'transpose'=53
121 | '.\''=54
122 | ';'=55
123 | '='=64
124 |
--------------------------------------------------------------------------------
/polymath/pmlang/antlr_generator/PMLangLexer.tokens:
--------------------------------------------------------------------------------
1 | T__0=1
2 | T__1=2
3 | T__2=3
4 | T__3=4
5 | T__4=5
6 | T__5=6
7 | T__6=7
8 | T__7=8
9 | T__8=9
10 | T__9=10
11 | T__10=11
12 | T__11=12
13 | T__12=13
14 | T__13=14
15 | T__14=15
16 | T__15=16
17 | T__16=17
18 | T__17=18
19 | T__18=19
20 | T__19=20
21 | T__20=21
22 | T__21=22
23 | T__22=23
24 | INPUT=24
25 | OUTPUT=25
26 | STATE=26
27 | PARAMETER=27
28 | SPRING=28
29 | RESERVOIR=29
30 | COMPONENT=30
31 | INDEX=31
32 | FLOW=32
33 | ARRAYMUL=33
34 | ARRAYDIV=34
35 | ARRAYRDIV=35
36 | POW=36
37 | BREAK=37
38 | RETURN=38
39 | FUNCTION=39
40 | GROUP_FUNCTION=40
41 | FOR=41
42 | WHILE=42
43 | END=43
44 | GLOBAL=44
45 | IF=45
46 | CLEAR=46
47 | ELSE=47
48 | ELSEIF=48
49 | LE_OP=49
50 | GE_OP=50
51 | EQ_OP=51
52 | NE_OP=52
53 | TRANSPOSE=53
54 | NCTRANSPOSE=54
55 | SEMI=55
56 | STRING_LITERAL=56
57 | IDENTIFIER=57
58 | DECIMAL_INTEGER=58
59 | OCT_INTEGER=59
60 | HEX_INTEGER=60
61 | BIN_INTEGER=61
62 | IMAG_NUMBER=62
63 | FLOAT_NUMBER=63
64 | EQ=64
65 | WHITESPACE=65
66 | NEWLINE=66
67 | BLOCKCOMMENT=67
68 | LINECOMMENT=68
69 | '('=1
70 | ')'=2
71 | '{'=3
72 | '}'=4
73 | ','=5
74 | '['=6
75 | ']'=7
76 | ':'=8
77 | '+'=9
78 | '-'=10
79 | '*'=11
80 | '/'=12
81 | '%'=13
82 | '<'=14
83 | '>'=15
84 | '?'=16
85 | 'int'=17
86 | 'float'=18
87 | 'str'=19
88 | 'bool'=20
89 | 'complex'=21
90 | 'fxp'=22
91 | '_'=23
92 | 'input'=24
93 | 'output'=25
94 | 'state'=26
95 | 'param'=27
96 | 'spring'=28
97 | 'reservoir'=29
98 | 'component'=30
99 | 'index'=31
100 | 'flow'=32
101 | '.*'=33
102 | '.\\'=34
103 | './'=35
104 | '^'=36
105 | 'break'=37
106 | 'return'=38
107 | 'function'=39
108 | 'for'=41
109 | 'while'=42
110 | 'end'=43
111 | 'global'=44
112 | 'if'=45
113 | 'clear'=46
114 | 'else'=47
115 | 'elseif'=48
116 | '<='=49
117 | '>='=50
118 | '=='=51
119 | '!='=52
120 | 'transpose'=53
121 | '.\''=54
122 | ';'=55
123 | '='=64
124 |
--------------------------------------------------------------------------------
/tests/test_domain_ops.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | import numpy as np
3 | from itertools import product
4 | import pytest
5 | from polymath.srdfg.templates.template_utils import _get_elem_indices
6 |
7 |
8 | def test_index_op():
9 | with pm.Node(name="indexop") as graph:
10 | m = pm.parameter(name="m")
11 | n = pm.parameter(name="n")
12 | i = pm.index(0, m-1, name="i")
13 | j = pm.index(0, n-1, name="j")
14 | i_ = (i + 1).set_name("i_")
15 |
16 | k = (i + j).set_name("k")
17 | m_ = 5
18 | n_ = 3
19 | input_info = {"m": m_, "n": n_}
20 | res = graph("k", input_info)
21 | op1 = np.arange(0, m_)
22 | op2 = np.arange(0, n_)
23 | value = np.array(list(product(*(op1, op2))))
24 | value = np.array(list(map(lambda x: x[0]+x[1], value)))
25 | np.testing.assert_allclose(res, value)
26 |
27 | @pytest.mark.parametrize('a_shape, b_shape, c_shape',[
28 | ((8, 1), (1, 16), (8, 16)),
29 | ((8, 2), (2, 16), (8, 2, 16))
30 | ])
31 | def test_broadcast(a_shape, b_shape, c_shape):
32 |
33 | from einops import repeat
34 | with pm.Node(name="broadcast") as graph:
35 | a = pm.input("a", shape=a_shape)
36 | b = pm.input("b", shape=b_shape)
37 | c = pm.output("c", shape=c_shape)
38 | a_idx, b_idx, c_idx = _get_elem_indices(a, b, c)
39 |
40 | c[c_idx] = a[a_idx] + b[b_idx]
41 |
42 | a_np = np.random.randint(0, 32, np.prod(a_shape)).reshape(a_shape)
43 | b_np = np.random.randint(0, 32, np.prod(b_shape)).reshape(b_shape)
44 | if len(c_shape) > 2:
45 | c_np_out = np.zeros(c_shape)
46 | else:
47 | c_np_out = np.zeros((c_shape[0], 1, c_shape[1]))
48 |
49 | a_np_t = repeat(a_np, 'i k -> i k j', j=b_shape[1])
50 | b_np_t = repeat(b_np, 'i k -> j i k', j=a_shape[0])
51 | actual_res = (a_np_t + b_np_t).squeeze()
52 | graph_res = graph("c", {"a": a_np, "b": b_np})
53 |
54 | np.testing.assert_allclose(graph_res, actual_res)
55 |
--------------------------------------------------------------------------------
/tests/old/test_mgdfg.py:
--------------------------------------------------------------------------------
1 |
2 | # from polymath.mgdfg.base import Node, placeholder
3 | # from polymath.mgdfg.nodes import constant, index, var_index, variable,\
4 | # sum, pb_store, pb_load
5 | # import numpy as np
6 | # from .util import compare_nodes
7 | # from polymath.mgdfg.util import visualize
8 | #
9 | # import os
10 |
11 | #
12 | # def test_linear():
13 | # file = "linear.pm"
14 | # base_path = f"./pmlang_examples"
15 | # full_path = f"./pmlang_examples/{file}"
16 | #
17 | # pmlang_graph = parse_file(full_path)
18 | # mgdfg_gen(pmlang_graph.components)
19 | #
20 | #
21 | # def test_backprop():
22 | # file = "backpropagation.pm"
23 | # base_path = f"./pmlang_examples"
24 | # full_path = f"./pmlang_examples/{file}"
25 | # pmlang_graph = parse_file(full_path)
26 | # mgdfg_gen(pmlang_graph.components)
27 | #
28 | #
29 | # def test_logistic():
30 | # file = "logistic.pm"
31 | # base_path = f"./pmlang_examples"
32 | # full_path = f"./pmlang_examples/{file}"
33 | # pmlang_graph = parse_file(full_path)
34 | # mgdfg_gen(pmlang_graph.components)
35 | #
36 | # def test_recommender():
37 | # file = "recommender.pm"
38 | # base_path = f"./pmlang_examples"
39 | # full_path = f"./pmlang_examples/{file}"
40 | # pmlang_graph = parse_file(full_path)
41 | # mgdfg_gen(pmlang_graph.components)
42 | #
43 | # def test_lenet():
44 | # file = "lenet.pm"
45 | # base_path = f"./pmlang_examples"
46 | # full_path = f"./pmlang_examples/{file}"
47 | # pmlang_graph = parse_file(full_path)
48 | # mgdfg_gen(pmlang_graph.components)
49 | #
50 | # def test_yolo():
51 | # file = "yolodnn.pm"
52 | # base_path = f"./pmlang_examples"
53 | # full_path = f"./pmlang_examples/{file}"
54 | # pmlang_graph = parse_file(full_path)
55 | # mgdfg_gen(pmlang_graph.components)
56 | #
57 | # def test_resnet():
58 | # file = "resnet18.pm"
59 | # base_path = f"./pmlang_examples"
60 | # full_path = f"./pmlang_examples/{file}"
61 | # pmlang_graph = parse_file(full_path)
62 | # mgdfg_gen(pmlang_graph.components)
63 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/yolo_tinyv2.cfg:
--------------------------------------------------------------------------------
1 | [net]
2 | batch=64
3 | subdivisions=8
4 | width=416
5 | height=416
6 | channels=3
7 | momentum=0.9
8 | decay=0.0005
9 | angle=0
10 | saturation = 1.5
11 | exposure = 1.5
12 | hue=.1
13 |
14 | learning_rate=0.001
15 | max_batches = 40100
16 | policy=steps
17 | steps=-1,100,20000,30000
18 | scales=.1,10,.1,.1
19 |
20 | [convolutional]
21 | batch_normalize=1
22 | filters=16
23 | size=3
24 | stride=1
25 | pad=1
26 | activation=leaky
27 |
28 | [maxpool]
29 | size=2
30 | stride=2
31 |
32 | [convolutional]
33 | batch_normalize=1
34 | filters=32
35 | size=3
36 | stride=1
37 | pad=1
38 | activation=leaky
39 |
40 | [maxpool]
41 | size=2
42 | stride=2
43 |
44 |
45 |
46 | [convolutional]
47 | batch_normalize=1
48 | filters=64
49 | size=3
50 | stride=1
51 | pad=1
52 | activation=leaky
53 |
54 | [maxpool]
55 | size=2
56 | stride=2
57 |
58 | [convolutional]
59 | batch_normalize=1
60 | filters=128
61 | size=3
62 | stride=1
63 | pad=1
64 | activation=leaky
65 |
66 | [maxpool]
67 | size=2
68 | stride=2
69 |
70 | [convolutional]
71 | batch_normalize=1
72 | filters=256
73 | size=3
74 | stride=1
75 | pad=1
76 | activation=leaky
77 |
78 | [maxpool]
79 | size=2
80 | stride=2
81 |
82 | [convolutional]
83 | batch_normalize=1
84 | filters=512
85 | size=3
86 | stride=1
87 | pad=1
88 | activation=leaky
89 |
90 | [maxpool]
91 | size=2
92 | stride=1
93 |
94 | [convolutional]
95 | batch_normalize=1
96 | filters=1024
97 | size=3
98 | stride=1
99 | pad=1
100 | activation=leaky
101 |
102 | ###########
103 |
104 | [convolutional]
105 | batch_normalize=1
106 | size=3
107 | stride=1
108 | pad=1
109 | filters=1024
110 | activation=leaky
111 |
112 | [convolutional]
113 | size=1
114 | stride=1
115 | pad=1
116 | filters=125
117 | activation=linear
118 |
119 | [region]
120 | anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
121 | bias_match=1
122 | classes=20
123 | coords=4
124 | num=5
125 | softmax=1
126 | jitter=.2
127 | rescore=1
128 |
129 | object_scale=5
130 | noobject_scale=1
131 | class_scale=1
132 | coord_scale=1
133 |
134 | absolute=1
135 | thresh = .5
136 | random=1
137 |
--------------------------------------------------------------------------------
/polymath/codegen/tvmgen/utils.py:
--------------------------------------------------------------------------------
1 | from tvm.contrib import graph_runtime
2 | from tvm import relay
3 | import tvm
4 | import numpy as np
5 | from tvm.relay.testing import init
6 |
7 | def benchmark_execution(mod,
8 | params,
9 | measure=True,
10 | data_shape=(1, 3, 416, 416),
11 | out_shape=(1, 125, 14, 14),
12 | dtype='float32'):
13 | def get_tvm_output(mod, data, params, target, ctx, dtype='float32'):
14 | with relay.build_config(opt_level=1):
15 |
16 | graph, lib, params = relay.build(mod, target, params=params)
17 |
18 | m = graph_runtime.create(graph, lib, ctx)
19 | # set inputs
20 | m.set_input("data", data)
21 | m.set_input(**params)
22 | m.run()
23 | out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
24 |
25 | if measure:
26 | print("Evaluate graph_name runtime inference time cost...")
27 | ftimer = m.module.time_evaluator("run", ctx, number=1, repeat=20)
28 | # Measure in millisecond.
29 | prof_res = np.array(ftimer().results) *1000
30 | print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
31 | (np.mean(prof_res), np.std(prof_res)))
32 |
33 | return out.asnumpy()
34 |
35 | # random input
36 | data = np.random.uniform(size=data_shape).astype(dtype)
37 | target = "llvm"
38 | ctx = tvm.cpu(0)
39 |
40 | tvm_out = get_tvm_output(mod, tvm.nd.array(data.astype(dtype)), params,
41 | target, ctx, dtype)
42 |
43 | def execute_graph(net, print_mod=True, print_params=True, benchmark=True):
44 |
45 | mod, params = init.create_workload(net)
46 |
47 | if print_mod:
48 | print(f"Module: {mod}")
49 |
50 | if print_params:
51 | for p in params.keys():
52 | print(f"Key: {p}, shape: {params[p].shape}")
53 |
54 | if benchmark:
55 | # benchmark_execution(mod, params, data_shape=(1, 3, 416, 416), out_shape=(1, 125, 14, 14))
56 | # benchmark_execution(mod, params, data_shape=(1, 3, 416, 416), out_shape=(1, 125, 14, 14))
57 | benchmark_execution(mod, params, data_shape=(1, 3, 224, 224), out_shape=(1,1000))
58 |
--------------------------------------------------------------------------------
/polymath/srdfg/serialization/ndarray_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: ndarray.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf import descriptor as _descriptor
6 | from google.protobuf import message as _message
7 | from google.protobuf import reflection as _reflection
8 | from google.protobuf import symbol_database as _symbol_database
9 | # @@protoc_insertion_point(imports)
10 |
11 | _sym_db = _symbol_database.Default()
12 |
13 |
14 |
15 |
16 | DESCRIPTOR = _descriptor.FileDescriptor(
17 | name='ndarray.proto',
18 | package='numproto.protobuf',
19 | syntax='proto3',
20 | serialized_options=None,
21 | create_key=_descriptor._internal_create_key,
22 | serialized_pb=b'\n\rndarray.proto\x12\x11numproto.protobuf\"\x1a\n\x07NDArray\x12\x0f\n\x07ndarray\x18\x01 \x01(\x0c\x62\x06proto3'
23 | )
24 |
25 |
26 |
27 |
28 | _NDARRAY = _descriptor.Descriptor(
29 | name='NDArray',
30 | full_name='numproto.protobuf.NDArray',
31 | filename=None,
32 | file=DESCRIPTOR,
33 | containing_type=None,
34 | create_key=_descriptor._internal_create_key,
35 | fields=[
36 | _descriptor.FieldDescriptor(
37 | name='ndarray', full_name='numproto.protobuf.NDArray.ndarray', index=0,
38 | number=1, type=12, cpp_type=9, label=1,
39 | has_default_value=False, default_value=b"",
40 | message_type=None, enum_type=None, containing_type=None,
41 | is_extension=False, extension_scope=None,
42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
43 | ],
44 | extensions=[
45 | ],
46 | nested_types=[],
47 | enum_types=[
48 | ],
49 | serialized_options=None,
50 | is_extendable=False,
51 | syntax='proto3',
52 | extension_ranges=[],
53 | oneofs=[
54 | ],
55 | serialized_start=36,
56 | serialized_end=62,
57 | )
58 |
59 | DESCRIPTOR.message_types_by_name['NDArray'] = _NDARRAY
60 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
61 |
62 | NDArray = _reflection.GeneratedProtocolMessageType('NDArray', (_message.Message,), {
63 | 'DESCRIPTOR' : _NDARRAY,
64 | '__module__' : 'ndarray_pb2'
65 | # @@protoc_insertion_point(class_scope:numproto.protobuf.NDArray)
66 | })
67 | _sym_db.RegisterMessage(NDArray)
68 |
69 |
70 | # @@protoc_insertion_point(module_scope)
71 |
--------------------------------------------------------------------------------
/polymath/srdfg/template_utils.py:
--------------------------------------------------------------------------------
1 | from graphviz import Digraph
2 | import polymath.srdfg.base as poly
3 |
4 | from typing import TYPE_CHECKING, Dict
5 | from polymath.pmlang.antlr_generator.parser import InputStream, CommonTokenStream, PMLangParser
6 | from polymath.pmlang.antlr_generator.lexer import PMLangLexer
7 | import polymath.srdfg.serialization.mgdfgv2_pb2 as mgdfg
8 | import polymath.srdfg.base as poly
9 | if TYPE_CHECKING:
10 | from polymath.srdfg.template import Template
11 | from polymath.srdfg.graph_objects import Node, Edge
12 |
13 | def visualize_component(component: 'Template', filepath, verbose=True):
14 | vis_graph = Digraph(component.name)
15 | edge_count = 0
16 | for node in component._nodes:
17 | vis_graph.node(str(node.node_id), label=get_node_label(node, verbose=verbose))
18 | edge_count += len(node.in_edges)
19 | for in_edge_id in node.in_edges:
20 | in_edge = component.get_edge(in_edge_id)
21 | if in_edge.dest_id != node.node_id:
22 | raise ValueError(f"Destination id for input edge "
23 | f"does not have the correct destination id: "
24 | f"\nEdge dest: {in_edge.edge_str}\n\nNode id: {node.node_str}")
25 |
26 | vis_graph.edge(str(in_edge.source_id), str(in_edge.dest_id), label=get_edge_label(in_edge, verbose=verbose))
27 | print(f"Edge from nodes: {edge_count}\nEdges from Edges: {len(component.edges)}")
28 | name = f"{filepath}/{component.name}"
29 | vis_graph.render(name, view=False)
30 |
31 | def get_edge_label(edge: 'Edge', verbose=False) -> str:
32 | if verbose:
33 | return edge.edge_str
34 | else:
35 | return edge.edge_name
36 |
37 | def get_node_label(node: 'Node', verbose=False) -> str:
38 | if verbose:
39 | label = f"{node.node_str}"
40 | else:
41 | label = f"{node.op_name}"
42 | return label
43 |
44 | def parse_statement_str(statement: str):
45 | chars = InputStream(statement)
46 | lexer = PMLangLexer(chars)
47 | tokens = CommonTokenStream(lexer)
48 | parser = PMLangParser(tokens)
49 | parse_stmt = parser.statement()
50 | return parse_stmt.getChild(0)
51 |
52 | def node_from_pb(node: mgdfg.Node) -> 'Node':
53 | pass
54 |
55 | def serialize_graph(components: Dict[str, mgdfg.Template]):
56 | pass
57 |
58 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/tensor.py:
--------------------------------------------------------------------------------
1 | from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype
2 | import numpy as np
3 | import math
4 |
5 | class Tensor(object):
6 | """
7 | Tensor class for computations
8 | n-dimensional array
9 | """
10 | def __init__(self, shape, name, data, dtype=FQDtype.FP32, trainable=False):
11 | if isinstance(shape, int):
12 | shape = tuple([shape])
13 | self.shape = shape
14 | self.dtype = dtype
15 | self.name = name
16 | self.trainable = trainable
17 | self.op = None
18 | self.output_nodes = []
19 | self.data = data
20 |
21 | self.fpga_addr = None
22 | _pad = []
23 | for i in range(len(self.shape)):
24 | _pad.append((0,0))
25 |
26 | self.fpga_pad = tuple(_pad)
27 | _padded_shape = []
28 | for i in range(len(self.shape)):
29 | _padded_shape.append(self.shape[i] + self.fpga_pad[i][0] + self.fpga_pad[i][1])
30 |
31 | def initialize_data(self, value):
32 | self.data = value
33 |
34 | def __str__(self):
35 | if isinstance(self.shape, tuple):
36 | shape_str = '[' + ','.join([str(x) for x in self.shape]) + ']'
37 | else:
38 | shape_str = '[' + str(self.shape) + ']'
39 | return '{}{} ({})'.format(self.name, shape_str, self.dtype.__str__())
40 | # return '{}{}'.format(self.name, shape_str)
41 |
42 | @property
43 | def size(self):
44 | return np.prod(self.shape)
45 |
46 | @property
47 | def fpga_shape(self):
48 | _padded_shape = []
49 | for i in range(len(self.shape)):
50 | if isinstance(self.fpga_pad, int):
51 | _padded_shape.append(self.shape[i] + self.fpga_pad*2)
52 | elif isinstance(self.fpga_pad[i], int):
53 | _padded_shape.append(self.shape[i] + self.fpga_pad[i]*2)
54 | else:
55 | _padded_shape.append(self.shape[i] + self.fpga_pad[i][0] + self.fpga_pad[i][1])
56 | return tuple(_padded_shape)
57 |
58 | @property
59 | def fpga_size(self):
60 | return np.prod(self.fpga_shape)
61 |
62 | @property
63 | def fpga_size_in_bytes(self):
64 | return self.fpga_size * self.dtype.bits / 8
65 |
66 | @property
67 | def size_in_bytes(self):
68 | return int(math.ceil(float(self.size * self.dtype.bits) / 8))
69 |
70 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/scalar/dtypes.py:
--------------------------------------------------------------------------------
1 | class Dtype(object):
2 | def __init__(self, op_str):
3 | self.op_str = op_str
4 | def __str__(self):
5 | return str(self.op_str)
6 | def __eq__(self, other):
7 | if isinstance(other, self):
8 | return other.bits == self.bits
9 | else: return False
10 | def __ne__(self, other):
11 | return not self.__eq__(other)
12 |
13 | class FixedPoint(Dtype):
14 | def __init__(self, bits, frac_bits):
15 | self.op_str = 'FXP{}'.format(bits)
16 | self.bits = bits
17 | self.frac_bits = frac_bits
18 | self.int_bits = self.bits - self.frac_bits
19 | def __str__(self):
20 | return '{} ({},{})'.format(super(FixedPoint, self).__str__(), self.int_bits, self.frac_bits)
21 | def __eq__(self, other):
22 | if isinstance(other, FixedPoint):
23 | return other.bits == self.bits and other.frac_bits == self.frac_bits
24 | else:
25 | return False
26 | def __ne__(self, other):
27 | result = not self.__eq__(other)
28 | return result
29 |
30 | class Log(Dtype):
31 | def __init__(self, exp_bits):
32 | self.op_str = 'Log{}'.format(exp_bits)
33 | self.bits = 2
34 | self.exp_bits = exp_bits
35 |
36 | class Binary(FixedPoint):
37 | def __init__(self):
38 | self.bits = 1
39 | self.op_str = 'Binary'
40 | self.frac_bits = 0
41 | self.int_bits = 1
42 | def __str__(self):
43 | return 'Binary'
44 |
45 | class CustomFloat(Dtype):
46 | def __init__(self, bits, exp_bits):
47 | self.bits = bits
48 | self.exp_bits = exp_bits
49 | self.op_str = 'Custom Float({},{})'.format(self.bits, self.exp_bits)
50 |
51 | class Float(Dtype):
52 | def __init__(self, bits):
53 | assert bits in (16, 32)
54 | self.bits = bits
55 | self.op_str = 'FP{}'.format(self.bits)
56 |
57 | class DTypes(object):
58 | FP32 = Float(32)
59 | FP16 = Float(16)
60 | FXP32 = FixedPoint(32,16)
61 | FXP16 = FixedPoint(16,8)
62 | FXP8 = FixedPoint(8,8)
63 | FXP4 = FixedPoint(4,4)
64 | FXP2 = FixedPoint(2,2)
65 | Bin = Binary()
66 | FXP6 = FixedPoint(6,6)
67 | Log6 = Log(6)
68 | Log4 = Log(4)
69 | FP_16_5 = CustomFloat(16, 5)
70 | FP_12_5 = CustomFloat(12, 5)
71 | FP_8_5 = CustomFloat(8, 5)
72 |
73 | FQDtype = DTypes()
74 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | serial/__pycache__/
2 | ssa.txt
3 | dfg.json
4 | *.*~
5 | meeting_0831.txt
6 | tabla.dot
7 | tabla.ps
8 | clean.sh
9 | schedule.json
10 | tabla.jpeg
11 | !description.json
12 | !*_config.json
13 | *.dot
14 | *.ipynb*
15 | .idea
16 | venv/
17 | *.pyc
18 | ~$*.ppt*
19 | polymath/archive
20 | polymath/experimental
21 | polymath/onnx_cmstack/models
22 | tests/pmlang_examples/outputs/*
23 | prof
24 | !tests/pmlang_examples/outputs/placeholder.txt
25 | *.pb
26 | *.onnx
27 | !tests/onnx_examples/*.onnx
28 | archive/
29 | *.png
30 | *.pb
31 | *.dot
32 | # Byte-compiled / optimized / DLL files
33 | __pycache__/
34 | *.py[cod]
35 | *$py.class
36 |
37 | # C extensions
38 | *.so
39 |
40 | # Distribution / packaging
41 | .Python
42 | env/
43 | build/
44 | develop-eggs/
45 | dist/
46 | downloads/
47 | eggs/
48 | .eggs/
49 | lib/
50 | lib64/
51 | parts/
52 | sdist/
53 | var/
54 | *.egg-info/
55 | .installed.cfg
56 | *.egg
57 |
58 | # PyInstaller
59 | # Usually these files are written by a python script from a template
60 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
61 | *.manifest
62 | *.spec
63 |
64 | # Installer logs
65 | pip-log.txt
66 | pip-delete-this-directory.txt
67 |
68 | # Unit test / coverage reports
69 | htmlcov/
70 | .tox/
71 | .coverage
72 | .coverage.*
73 | .cache
74 | nosetests.xml
75 | coverage.xml
76 | *,cover
77 | .hypothesis/
78 | .pytest_cache/
79 |
80 | # Translations
81 | *.mo
82 | *.pot
83 |
84 | # Django stuff:
85 | *.log
86 | local_settings.py
87 |
88 | # Flask stuff:
89 | instance/
90 | .webassets-cache
91 |
92 | # Scrapy stuff:
93 | .scrapy
94 |
95 | # Sphinx documentation
96 | docs/_build/
97 |
98 | # PyBuilder
99 | target/
100 |
101 | # IPython Notebook
102 | .ipynb_checkpoints
103 |
104 | # pyenv
105 | .python-version
106 |
107 | # celery beat schedule file
108 | celerybeat-schedule
109 |
110 | # dotenv
111 | .env
112 |
113 | # virtualenv
114 | venv/
115 | ENV/
116 |
117 | # Spyder project settings
118 | .spyderproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # Playground for experiments
124 | playground/
125 | ~*
126 |
127 | # OSX files
128 | .DS_Store
129 |
130 | # VS Code project settings
131 | .vscode/
132 |
133 | # Docker interface
134 | di.yml
135 | Dockerfile
136 |
137 | # Data for examples
138 | mldata/
139 | # TODO info
140 | TODO
141 | # Numpy serialized files
142 | *.npz
143 |
144 | benchmarks/outputs/*
145 | scratch/
146 |
147 |
148 |
--------------------------------------------------------------------------------
/polymath/srdfg/serialization/pmlang_mgdfg.py:
--------------------------------------------------------------------------------
1 | from polymath.pmlang.antlr_generator.parser import FileStream, CommonTokenStream, PMLangParser, ParseTreeWalker
2 | from polymath.pmlang.symbols import PMLangListener
3 | from polymath.pmlang.antlr_generator.lexer import PMLangLexer
4 | import polymath.srdfg.serialization.mgdfgv2_pb2 as mgdfg
5 | from typing import Dict, Tuple
6 | import polymath.srdfg.template as temp
7 | import os
8 | import copy
9 |
10 | def parse_file(file_path: str) -> PMLangListener:
11 | input_file = FileStream(file_path)
12 | lexer = PMLangLexer(input_file)
13 | stream = CommonTokenStream(lexer)
14 | parser = PMLangParser(stream)
15 | tree = parser.pmlang()
16 | pmlang_graph = PMLangListener(file_path)
17 | walker = ParseTreeWalker()
18 | walker.walk(pmlang_graph, tree)
19 |
20 | return pmlang_graph
21 |
22 | def compile_to_pb(file_path: str, orig_listener=None):
23 | if orig_listener:
24 | listener = orig_listener
25 | else:
26 | listener = parse_file(file_path)
27 |
28 | output_dir, output_file = os.path.split(file_path)
29 | graph_name = output_file[:-3]
30 |
31 | program = mgdfg.Program(name=graph_name)
32 | for comp_name, comp in listener.components.items():
33 | program.templates[comp_name].CopyFrom(comp.serialize())
34 | new_graph = mgdfg_gen(listener.components)
35 | program.graph.CopyFrom(new_graph.serialize())
36 | return program
37 |
38 |
39 | def store_pb(dir_path: str, program: mgdfg.Program):
40 | file_path = f"{dir_path}/{program.name}.pb"
41 | with open(file_path, "wb") as program_file:
42 | program_file.write(program.SerializeToString())
43 |
44 | def load_pb(filepath: str) -> mgdfg.Program:
45 | new_program = mgdfg.Program()
46 | with open(filepath, "rb") as program_file:
47 | new_program.ParseFromString(program_file.read())
48 | return new_program
49 |
50 | def mgdfg_gen(templates: Dict[str, temp.Template]) -> temp.Node:
51 | main_node = temp.Node(0, "main")
52 | main_node.instantiate([], templates["main"], templates)
53 | return main_node
54 |
55 | def mgdfg_from_pb(file_path: str) -> Tuple[Dict[str,temp.Template], temp.Node]:
56 | program = load_pb(file_path)
57 | comp_dict = {}
58 | main_node = temp.Node(0, "main")
59 | main_node.deserialize(program.graph)
60 | for comp in program.templates:
61 | comp_dict[comp] = temp.Template(comp)
62 | comp_dict[comp].deserialize(program.templates[comp])
63 |
64 | return (comp_dict, main_node)
65 |
66 | def create_component_with_name(comp_name: str) -> temp.Template:
67 | return temp.Template(comp_name)
--------------------------------------------------------------------------------
/polymath/srdfg/serialization/srdfgv3.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package srdfg;
4 | import "ndarray.proto";
5 |
6 | message Node {
7 | repeated Node nodes = 1;
8 | string name = 2;
9 | string op_name = 3;
10 | message Shape {
11 | oneof value {
12 | int32 shape_const = 1;
13 | string shape_id = 2;
14 | }
15 | }
16 | repeated Shape shape = 4;
17 | repeated string dependencies = 5;
18 | repeated Attribute args = 6;
19 | map kwargs = 7;
20 | string module = 8;
21 | int64 uuid = 9;
22 | int64 graph_id = 10;
23 | }
24 |
25 | message Attribute {
26 |
27 | enum Type {
28 | UNDEFINED = 0;
29 | // Basic types.
30 | FLOAT = 1; // float
31 | UINT8 = 2; // uint8_t
32 | INT8 = 3; // int8_t
33 | UINT16 = 4; // uint16_t
34 | INT16 = 5; // int16_t
35 | INT32 = 6; // int32_t
36 | INT64 = 7; // int64_t
37 | STRING = 8; // string
38 | BOOL = 9; // bool
39 | FLOAT16 = 10;
40 | DOUBLE = 11;
41 | UINT32 = 12;
42 | UINT64 = 13;
43 | COMPLEX64 = 14; // complex with float32 real and imaginary components
44 | COMPLEX128 = 15;
45 | NDARRAY = 16;
46 | NODE = 17;
47 | FLOATS = 18; // float
48 | UINT8S = 19; // uint8_t
49 | INT8S = 20; // int8_t
50 | UINT16S = 21; // uint16_t
51 | INT16S = 22; // int16_t
52 | INT32S = 23; // int32_t
53 | INT64S = 24; // int64_t
54 | STRINGS = 25; // string
55 | BOOLS = 26; // bool
56 | FLOAT16S = 27;
57 | DOUBLES = 28;
58 | UINT32S = 29;
59 | UINT64S = 30;
60 | COMPLEX64S = 31; // complex with float32 real and imaginary components
61 | COMPLEX128S = 32;
62 | NDARRAYS = 33;
63 | NODES = 34;
64 | DOM = 35;
65 | DOMS = 36;
66 | MAP = 37;
67 | };
68 | float f = 2;
69 | int32 i32 = 3;
70 | int64 i64 = 5;
71 | bytes s = 6;
72 | double d = 7;
73 | uint64 ui64 = 8;
74 | bool b = 9;
75 | Node n = 10;
76 | numproto.protobuf.NDArray nda = 11;
77 | Domain dom = 12;
78 | repeated float fs = 13;
79 | repeated int32 i32s = 14;
80 | repeated int64 i64s = 15;
81 | repeated bytes ss = 16;
82 | repeated double ds = 17;
83 | repeated uint64 ui64s = 18;
84 | repeated bool bs = 19;
85 | repeated Node ns = 20;
86 | repeated numproto.protobuf.NDArray ndas = 21;
87 | repeated Domain doms = 22;
88 | map mapping = 23;
89 | Type type = 24;
90 | }
91 | message Domain {
92 | repeated Attribute domains = 1;
93 | }
--------------------------------------------------------------------------------
/polymath/srdfg/templates/data_analytics.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from polymath.srdfg.util import squeeze_shape
3 | from numbers import Integral
4 | import numpy as np
5 | import functools
6 |
7 |
8 | class svm_classifier_train(pm.Template):
9 | def define_graph(self, x, w, y, mu, m):
10 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
11 | h = pm.sum([i], (x[i] * w[i]), name="h")
12 | c = (y*h).set_name("c")
13 | ny = (0 - y).set_name("ny")
14 | p = ((c > 1)*ny).set_name("p")
15 | g = (p * x[i]).set_name("g")
16 | w[i] = w[i] - mu * g[i]
17 |
18 | class logistic_regressor_train(pm.Template):
19 |
20 | def define_graph(self, x, w, y, mu, m):
21 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
22 | h = pm.sigmoid(pm.sum([i], (x[i] * w[i]), name="h"))
23 | d = (h - y).set_name("h-y")
24 | g = (d * x[i]).set_name("d*x")
25 | w[i] = w[i] - mu * g[i]
26 |
27 |
28 | class linear_regressor(pm.Template):
29 |
30 | def define_graph(self, x, w, y_pred, mu, m):
31 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
32 | y_pred.write(pm.sum([i], (x[i] * w[i]), name="h"))
33 |
34 |
35 | class logistic_regressor(pm.Template):
36 |
37 | def define_graph(self, x, w, y_pred, mu, m):
38 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
39 | y_pred.write(pm.sigmoid(pm.sum([i], (x[i] * w[i]), name="h")))
40 |
41 |
42 | class mc_logistic_regressor_train(pm.Template):
43 |
44 | def define_graph(self, x, w, y, y_pred, mu, m):
45 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
46 | h = pm.temp(name="h", shape=(m))
47 | h = pm.sigmoid(pm.sum([i], (x[i] * w[i]), name="h"))
48 | d = (h - y).set_name("h-y")
49 | g = (d * x[i]).set_name("d*x")
50 | w[i] = w[i] - mu * g[i]
51 |
52 | class mc_logistic_regressor(pm.Template):
53 |
54 | def define_graph(self, x, w, y_pred, mu, m):
55 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
56 | h = pm.sigmoid(pm.sum([i], (x[i] * w[i]), name="h"))
57 |
58 | class linear_regressor_train(pm.Template):
59 |
60 | def define_graph(self, x, w, y, mu, m):
61 | i = pm.index(0, (m - 1).set_name("m-1"), name="i")
62 | h = pm.sum([i], (x[i] * w[i]), name="h")
63 | d = (h - y).set_name("h-y")
64 | g = (d * x[i]).set_name("d*x")
65 | w[i] = w[i] - mu * g[i]
66 |
67 |
68 |
69 |
70 | class ppo(pm.Template):
71 | def define_graph(self, obs, action, states,
72 | gamma=0.99,
73 | clip=0.2,
74 | ent_coeff=0.01,
75 | lam=0.95,
76 | adam_eps=1e-5):
77 | pass
78 |
79 |
80 |
81 |
82 | # TODO: Add reshape operator, constant operator, gemm
83 |
84 |
--------------------------------------------------------------------------------
/polymath/srdfg/passes/node_mapping.py:
--------------------------------------------------------------------------------
1 | from . import is_literal, is_number
2 | from polymath.srdfg.serialization.mgdfg_pb2 import Component
3 |
4 |
5 | def map_nodes(graph, templates, mapped_components, config_map):
6 |
7 | for n in graph.sub_graph:
8 | op_cat = n.op_cat
9 | if op_cat == 'component':
10 | if n.op_type in config_map['ops'].keys():
11 | n.op_cat = 'mapped_node'
12 | mapped_components.append(n.op_type)
13 | else:
14 | map_nodes(templates[n.op_type], templates, mapped_components, config_map)
15 | elif n.op_type in mapped_components:
16 | n.op_cat = 'mapped_node'
17 |
18 |
19 | def update_node(node, context, carg_map):
20 | new = Component(name=context + node.name)
21 | inputs = []
22 | outputs = []
23 | states = []
24 | parameters = []
25 | for inp in node.input:
26 | if is_number(inp):
27 | i = str(inp)
28 | else:
29 | i = inp
30 | if is_literal(i):
31 | inputs.append(i)
32 | elif i in carg_map.keys():
33 | # inputs.append(carg_map[i])
34 | inputs.append(carg_map[i].name)
35 | else:
36 | inputs.append(context + i)
37 | new.input.extend(inputs)
38 |
39 | for o in node.output:
40 | if is_number(o):
41 | out = str(o)
42 | else:
43 | out = o
44 |
45 | if is_literal(out):
46 | outputs.append(out)
47 | elif out in carg_map.keys():
48 | # outputs.append(carg_map[out])
49 | outputs.append(carg_map[out].name)
50 | else:
51 | outputs.append(context + out)
52 |
53 | new.output.extend(outputs)
54 |
55 | for st in node.state:
56 | if is_number(st):
57 | s = str(st)
58 | else:
59 | s = st
60 |
61 | if is_literal(s):
62 | states.append(s)
63 | elif s in carg_map.keys():
64 | # states.append(carg_map[s])
65 | states.append(carg_map[s].name)
66 | else:
67 | states.append(context + s)
68 | new.state.extend(states)
69 |
70 |
71 | for para in node.parameters:
72 | if is_number(para):
73 | p = str(para)
74 | else:
75 | p = para
76 |
77 | if is_literal(p):
78 | parameters.append(p)
79 | elif p in carg_map.keys():
80 | # parameters.append(carg_map[p])
81 | parameters.append(carg_map[p].name)
82 | else:
83 | parameters.append(context + p)
84 |
85 | new.parameters.extend(parameters)
86 |
87 | for attr in node.attributes:
88 | new.attributes[attr].CopyFrom(node.attributes[attr])
89 | new.op_type = node.op_type
90 |
91 | return new
--------------------------------------------------------------------------------
/tests/test_neuroweaver.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from tests.neuroweaver.nn import nn_impl, nn_impl_
4 | from tests.neuroweaver.hotspot import hotspot_impl
5 | from tests.neuroweaver.pathfinder import pathfinder_impl
6 | from tests.neuroweaver.hotspot3d import impl_3D
7 | import polymath as pm
8 | from pathlib import Path
9 |
10 | HOME = Path.home()
11 | CWD = Path(f"{__file__}").parent
12 | TABLA_PATH = f"{HOME}/ACTLab/rtml/project.rtml/tablav2/benchmarks/dfgs/polymath_generated"
13 | BASE_PATH = f"{CWD}/pmlang_examples"
14 | OUTPATH = f"{BASE_PATH}/outputs"
15 |
16 | @pytest.mark.parametrize('num, latitude, longitude',[
17 | (3, 30, 90),
18 | ])
19 | def test_nn(num, latitude, longitude):
20 | # graph, inp_info, out_info, keys = nn_impl(num, latitude, longitude, coarse=True)
21 | graph, inp_info, out_info, keys = nn_impl_(num, latitude, longitude, coarse=True)
22 | shape_dict = {"num": num}
23 |
24 | tabla_path = f"{TABLA_PATH}/{graph.name}_{num}.json"
25 | tabla_ir, tabla_graph = pm.generate_tabla(graph, shape_dict, tabla_path, debug=False)
26 |
27 | # test_out = graph("max_dist2", inp_info)
28 | # print()
29 | # print(test_out)
30 | # test_out = graph("sqrtz", inp_info)
31 | # print(test_out.shape)
32 | # print(graph("sqrtz", inp_info))
33 | # print(graph("max_idx1", inp_info))
34 | # print(graph("max_idx9", inp_info))
35 | # print(test_out)
36 | # np.testing.assert_allclose(out_info[keys[0]], test_out)
37 |
38 | @pytest.mark.parametrize('rows, cols',[
39 | (10, 10),
40 | ])
41 | def test_pathfinder(rows, cols):
42 | graph = pathfinder_impl()
43 | tabla_path = f"{TABLA_PATH}/{graph.name}_{rows}_{cols}.json"
44 | shape_dict = {"rows": rows, "cols": cols}
45 | tabla_ir, tabla_graph = pm.generate_tabla(graph, shape_dict, tabla_path, debug=False)
46 |
47 | @pytest.mark.parametrize('num, latitude, longitude',[
48 | (3, 30, 90),
49 | ])
50 | def test_hotspot(num, latitude, longitude):
51 | # graph, inp_info, out_info, keys = nn_impl(num, latitude, longitude, coarse=True)
52 | graph = hotspot_impl()
53 | shape_dict = {"row": 12, "col": 12}
54 |
55 | tabla_path = f"{OUTPATH}/{graph.name}_tabla.json"
56 | tabla_ir, tabla_graph = pm.generate_tabla(graph, shape_dict, tabla_path, debug=False)
57 |
58 |
59 | @pytest.mark.parametrize('num, latitude, longitude',[
60 | (3, 30, 90),
61 | ])
62 | def test_hotspot3d(num, latitude, longitude):
63 | # graph, inp_info, out_info, keys = nn_impl(num, latitude, longitude, coarse=True)
64 | graph = impl_3D()
65 | numCols = 4
66 | numRows = 4
67 | layers = 4
68 | shape_dict = {"numRows": numRows, "numCols": numCols, "layers":layers}
69 |
70 | tabla_path = f"{OUTPATH}/{graph.name}_tabla.json"
71 | tabla_ir, tabla_graph = pm.generate_tabla(graph, shape_dict, tabla_path, debug=False)
--------------------------------------------------------------------------------
/examples/example_graphs.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from pathlib import Path
3 | BENCH_DIR = Path(f"{Path(__file__).parent}/../benchmarks/onnx_files")
4 | CWD = Path(f"{__file__}").parent
5 | BASE_PATH = f"{CWD}/pmlang_examples"
6 | OUTPATH = f"{BASE_PATH}/outputs"
7 | ONNX_FILE_DIR = Path(f"{Path(__file__).parent}/onnx_examples")
8 |
9 |
10 | def reco(m_=3, n_=3, k_=2):
11 | with pm.Node(name="recommender") as graph:
12 | mu = pm.placeholder("mu")
13 | m = pm.placeholder("m")
14 | n = pm.placeholder("n")
15 | k = pm.placeholder("k")
16 | x1 = pm.placeholder("x1", shape=k)
17 | x2 = pm.placeholder("x2", shape=k)
18 |
19 | r1 = pm.placeholder("r1", shape=m)
20 | y1 = pm.placeholder("y1", shape=m)
21 |
22 | r2 = pm.placeholder("r2", shape=n)
23 | y2 = pm.placeholder("y2", shape=n)
24 |
25 | w1 = pm.placeholder("w1", shape=(m, k))
26 | w2 = pm.placeholder("w2", shape=(n, k))
27 | i = pm.index(0, m - 1, name="i")
28 | j = pm.index(0, n - 1, name="j")
29 | l = pm.index(0, k - 1, name="l")
30 | h1_sum = pm.sum([l], (w1[i, l] * x2[l]).set_name("w1*x2")).set_name("h1_sum")
31 | h1 = (h1_sum[i] * r1[i]).set_name("h1")
32 | h2_sum = pm.sum([l], (x1[l] * w2[j, l]).set_name("x1*w2")).set_name("h2_sum")
33 | h2 = (h2_sum[j] * r2[j]).set_name("h2")
34 | #
35 | d1 = (h1[i] - y1[i]).set_name("d1")
36 | d2 = (h2[j] - y2[j]).set_name("d2")
37 | g1 = (d1[i] * x2[l]).set_name("g1")
38 | g2 = (d2[j] * x1[l]).set_name("g2")
39 | w1_ = (w1[i, l] - g1[i, l]).set_name("w1_")
40 | w2_ = (w2[i, l] - g2[i, l]).set_name("w2_")
41 |
42 | shape_val_pass = pm.NormalizeGraph({"m": m_, "n": n_, "k": k_})
43 | new_graph, res = shape_val_pass(graph)
44 | return new_graph
45 |
46 | def linear_reg():
47 | with pm.Node(name="linear_reg") as graph:
48 | m = pm.placeholder("m")
49 | x = pm.placeholder("x", shape=(m), type_modifier="input")
50 | y = pm.placeholder("y", type_modifier="input")
51 | w = pm.placeholder("w", shape=(m), type_modifier="state")
52 | mu = pm.parameter(name="mu", default=1.0)
53 | i = pm.index(0, (graph["m"]-1).set_name("m-1"), name="i")
54 | h = pm.sum([i], (x[i] * w[i]).set_name("x*w"), name="h")
55 | d = (h-y).set_name("h-y")
56 | g = (d*x).set_name("d*x")
57 | w_ = (w - (mu*g).set_name("mu*g")).set_name("w-mu*g")
58 |
59 | def lenet_from_onnx():
60 | filename = f"mnist-lenet.onnx"
61 | filepath = f"{BENCH_DIR}/full_dnns/{filename}"
62 | assert Path(filepath).exists()
63 | graph = pm.from_onnx(filepath)
64 | print_skip_nodes = ['write', 'output', 'var_index', 'input', 'index']
65 | for k,v in graph.nodes.items():
66 | if v.op_name not in print_skip_nodes:
67 | print(f"{k} - {v.op_name}")
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/fpga/memspace.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import mmap
3 | import os
4 | import numpy as np
5 | import array
6 | import codecs
7 |
8 | decode_hex = codecs.getdecoder("hex_codec")
9 | def to_bytes(n, length, endianess='big'):
10 | h = '%x' % n
11 | s = decode_hex(('0'*(len(h) % 2) + h).zfill(length*2))[0]
12 | return s if endianess == 'big' else s[::-1]
13 |
14 | class FPGAMemSpace(object):
15 | def __init__(self,
16 | pci_cl_ctrl_device='/dev/xdma/card0/user',
17 | c2h_dma_device='/dev/xdma/card0/c2h0',
18 | h2c_dma_device='/dev/xdma/card0/h2c0',
19 | log_level=logging.INFO):
20 | self.log = logging.getLogger('FPGA Memspace')
21 | self.log.setLevel(log_level)
22 |
23 | self.log.debug('Opening device: {}'.format(pci_cl_ctrl_device))
24 | self.pci_cl_ctrl_fd = open(pci_cl_ctrl_device, 'r+b', buffering=0)
25 | self.pci_cl_ctrl_mmap = mmap.mmap(self.pci_cl_ctrl_fd.fileno(), 32*1024, prot=mmap.PROT_READ|mmap.PROT_WRITE)
26 |
27 | self.log.debug('Opening device: {}'.format(h2c_dma_device))
28 | self.h2c_fd = os.open(h2c_dma_device, os.O_RDWR)
29 |
30 | self.log.debug('Opening device: {}'.format(c2h_dma_device))
31 | self.c2h_fd = os.open(c2h_dma_device, os.O_RDWR)
32 |
33 | self.inst_buffer_addr = 0x100000000
34 |
35 | def write(self, namespace, addr, data):
36 | assert namespace in ('pci_cl_data', 'pci_cl_ctrl', 'ddr')
37 |
38 | if namespace == 'pci_cl_ctrl':
39 | self.pci_cl_ctrl_mmap.seek(addr)
40 | self.pci_cl_ctrl_mmap.write(to_bytes(data, 4, 'little'))
41 | elif namespace == 'pci_cl_data':
42 | os.lseek(self.h2c_fd, addr+self.inst_buffer_addr, 0)
43 | os.write(self.h2c_fd, data)
44 | else:
45 | self.log.debug('Writing data {} with dtype {} to address {}'.format(
46 | data, data.dtype, addr))
47 | os.lseek(self.h2c_fd, addr, 0)
48 | os.write(self.h2c_fd, data)
49 |
50 | def read(self, namespace, addr, size=None):
51 | assert namespace in ('pci_cl_data', 'pci_cl_ctrl', 'ddr')
52 |
53 | if namespace == 'pci_cl_ctrl':
54 | self.pci_cl_ctrl_mmap.seek(addr)
55 | v = self.pci_cl_ctrl_mmap.read(4)
56 | if isinstance(v, bytes):
57 | v = v.decode('utf-8')
58 | v = '0x'+''.join([hex(ord(i))[2:].zfill(2) for i in reversed(v)])
59 | return int(v, 16)
60 | elif namespace == 'pci_cl_data':
61 | os.lseek(self.c2h_fd, addr+self.inst_buffer_addr, 0)
62 | return np.array(array.array('i', os.read(self.c2h_fd, size)), dtype=np.int32)
63 | else:
64 | self.log.debug('Reading tensor of size {} Bytes from address {}'.format(size, addr))
65 | os.lseek(self.c2h_fd, addr, 0)
66 | return os.read(self.c2h_fd, int(size))
67 |
68 |
--------------------------------------------------------------------------------
/tests/test_verilog_gen.py:
--------------------------------------------------------------------------------
1 | from polymath.srdfg.passes.compiler_passes import NormalizeGraph, Lower
2 | import polymath as pm
3 | import pprint
4 | import numpy as np
5 | from pathlib import Path
6 | from tests.util import count_nodes, linear, reco
7 |
8 | def test_linear_reg():
9 | m_ = 3
10 | graph, input_info, out_info, keys = linear(m=m_, coarse=True)
11 | coarse_eval = graph(keys, input_info)
12 | np.testing.assert_allclose(coarse_eval, out_info["w"])
13 |
14 |
15 |
16 | fgraph, input_info, out_info, keys = linear(m=m_, coarse=False)
17 | lower_pass = Lower({})
18 | lowered_graph = lower_pass(fgraph, {})
19 | all_vals = lowered_graph(keys, input_info)
20 | out = np.asarray(all_vals).reshape(out_info["w"].shape)
21 |
22 | np.testing.assert_allclose(out, out_info["w"])
23 | cwd = Path(f"{__file__}").parent
24 | base_path = f"{cwd}/pmlang_examples"
25 | full_path = f"{base_path}/outputs"
26 | pb_path = f"{full_path}/{graph.name}.srdfg"
27 |
28 | pm.pb_store(lowered_graph, full_path)
29 |
30 | loaded_node = pm.pb_load(pb_path)
31 | _, input_info, out_info, keys = linear(m=m_, coarse=False)
32 |
33 | loaded_res = loaded_node(keys, input_info)
34 | out = np.asarray(loaded_res).reshape(out_info["w"].shape)
35 | np.testing.assert_allclose(out, out_info["w"])
36 |
37 | def test_reco():
38 | m_ = 3
39 | n_ = 3
40 | k_ = 2
41 | shape_dict = {"m": n_, "k": k_, "n": n_}
42 | graph, input_info, out_info, keys = reco(coarse=True, **shape_dict)
43 | coarse_eval = graph(keys, input_info)
44 | np.testing.assert_allclose(coarse_eval[0], out_info["w1"])
45 | np.testing.assert_allclose(coarse_eval[1], out_info["w2"])
46 |
47 |
48 | fgraph, input_info, out_info, keys = reco(coarse=False, **shape_dict)
49 | lower_pass = Lower({})
50 | lowered_graph = lower_pass(fgraph, {})
51 | all_vals = lowered_graph(keys, input_info)
52 | w1_elems = np.prod(out_info["w1"].shape)
53 | w2_elems = np.prod(out_info["w2"].shape)
54 | out1 = np.asarray(list(all_vals[0:w1_elems])).reshape(out_info["w1"].shape)
55 | out2 = np.asarray(list(all_vals[w1_elems:])).reshape(out_info["w2"].shape)
56 |
57 | np.testing.assert_allclose(out1, out_info["w1"])
58 | np.testing.assert_allclose(out2, out_info["w2"])
59 | cwd = Path(f"{__file__}").parent
60 | base_path = f"{cwd}/pmlang_examples"
61 | full_path = f"{base_path}/outputs"
62 | pb_path = f"{full_path}/{graph.name}.srdfg"
63 |
64 | pm.pb_store(lowered_graph, full_path)
65 |
66 | loaded_node = pm.pb_load(pb_path)
67 | _, input_info, out_info, keys = reco(coarse=False, **shape_dict)
68 |
69 | loaded_res = loaded_node(keys, input_info)
70 | lres1 = np.asarray(list(loaded_res[0:w1_elems])).reshape(out_info["w1"].shape)
71 | lres2 = np.asarray(list(loaded_res[w1_elems:])).reshape(out_info["w2"].shape)
72 | np.testing.assert_allclose(lres1, out_info["w1"])
73 | np.testing.assert_allclose(lres2, out_info["w2"])
74 |
75 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/scalar/ops.py:
--------------------------------------------------------------------------------
1 | from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import Dtype
2 |
3 | class ScalarOp(object):
4 | def __init__(self, op_str, dtype):
5 | self.op_str = op_str
6 | self.dtype = dtype
7 | def __str__(self):
8 | if isinstance(self.dtype, Dtype):
9 | return '{}({})'.format(self.op_str, self.dtype.__str__())
10 | else:
11 | ret = str(self.op_str)
12 | ret += '('
13 | ret += ','.join([x.__str__() for x in self.dtype])
14 | ret += ')'
15 | return ret
16 |
17 |
18 | class ScalarOpTypes(object):
19 | def __init__(self):
20 | self.MulOp = {}
21 | self.MacOp = {}
22 | self.SqrOp = {}
23 | self.CmpOp = {}
24 | self.AddOp = {}
25 | self.SubOp = {}
26 | self.RshiftOp = {}
27 | def MUL(self, dtypes):
28 | assert len(dtypes) == 2
29 | dtype_str = tuple(d.__str__() for d in dtypes)
30 | if dtype_str not in self.MulOp.keys():
31 | self.MulOp[dtype_str] = ScalarOp('Multiply', dtypes)
32 | return self.MulOp[dtype_str]
33 |
34 | def MAC(self, dtypes):
35 | assert len(dtypes) == 3
36 | dtype_str = tuple(d.__str__() for d in dtypes)
37 | if dtype_str not in self.MacOp.keys():
38 | self.MacOp[dtype_str] = ScalarOp('Multiply-Accumulate', dtypes)
39 | return self.MacOp[dtype_str]
40 |
41 | def SQR(self, dtypes):
42 | assert isinstance(dtypes, Dtype)
43 |
44 | dtype_str = dtypes.__str__()
45 |
46 | if dtype_str not in self.SqrOp.keys():
47 | self.SqrOp[dtype_str] = ScalarOp('Square', dtypes)
48 | return self.SqrOp[dtype_str]
49 | def CMP(self, dtypes):
50 | assert isinstance(dtypes, Dtype), 'Got Dtypes: {}'.format(dtypes)
51 | dtype_str = dtypes.__str__()
52 |
53 | if dtype_str not in self.CmpOp.keys():
54 | self.CmpOp[dtype_str] = ScalarOp('Compare', dtypes)
55 | return self.CmpOp[dtype_str]
56 |
57 | def ADD(self, dtypes):
58 | assert len(dtypes) == 2
59 | dtype_str = tuple(d.__str__() for d in dtypes)
60 |
61 | if dtype_str not in self.AddOp.keys():
62 | self.AddOp[dtype_str] = ScalarOp('Addition', dtypes)
63 | return self.AddOp[dtype_str]
64 | def SUB(self, dtypes):
65 | assert len(dtypes) == 2
66 | dtype_str = tuple(d.__str__() for d in dtypes)
67 |
68 | if dtype_str not in self.SubOp.keys():
69 | self.SubOp[dtype_str] = ScalarOp('Subtract', dtypes)
70 | return self.SubOp[dtype_str]
71 | def RSHIFT(self, dtypes):
72 | assert isinstance(dtypes, Dtype), 'Got Dtypes: {}'.format(dtypes)
73 | dtype_str = dtypes.__str__()
74 |
75 | if dtype_str not in self.RshiftOp.keys():
76 | self.RshiftOp[dtype_str] = ScalarOp('Rshift', dtypes)
77 | return self.RshiftOp[dtype_str]
78 |
79 |
80 | Ops = ScalarOpTypes()
81 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/benchmarks/test.py:
--------------------------------------------------------------------------------
1 | from codegen.dnnweavergen.dnnweaver2.graph import Graph, get_default_graph
2 |
3 | from codegen.dnnweavergen.dnnweaver2.tensorOps.cnn import conv2D, maxPool, flatten, matmul, addBias, batch_norm, reorg, concat, leakyReLU
4 | from codegen.dnnweavergen.dnnweaver2 import get_tensor
5 | import logging
6 | from codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype, FixedPoint
7 |
8 | from codegen.dnnweavergen.dnnweaver2 import get_tensor
9 |
10 |
11 | def yolo_convolution(tensor_in, filters=32, kernel_size=3,
12 | batch_normalize=True, act='leakyReLU',
13 | c_dtype=None, w_dtype=None,
14 | s_dtype=None, bn_dtype=None):
15 |
16 | input_channels = tensor_in.shape[-1]
17 |
18 | weights = get_tensor(shape=(filters, kernel_size, kernel_size, input_channels),
19 | name='weights',
20 | dtype=w_dtype)
21 | biases = get_tensor(shape=(filters),
22 | name='biases',
23 | dtype=FixedPoint(32,w_dtype.frac_bits + tensor_in.dtype.frac_bits))
24 | conv = conv2D(tensor_in, weights, biases, pad='SAME', dtype=c_dtype)
25 |
26 | if batch_normalize:
27 | with get_default_graph().name_scope('batch_norm'):
28 | mean = get_tensor(shape=(filters), name='mean', dtype=FixedPoint(16,c_dtype.frac_bits))
29 | scale = get_tensor(shape=(filters), name='scale', dtype=s_dtype)
30 | bn = batch_norm(conv, mean=mean, scale=scale, dtype=bn_dtype)
31 | else:
32 | bn = conv
33 |
34 | if act == 'leakyReLU':
35 | with get_default_graph().name_scope(act):
36 | act = leakyReLU(bn, dtype=bn.dtype)
37 | elif act == 'linear':
38 | with get_default_graph().name_scope(act):
39 | act = bn
40 | else:
41 | logging.error('Unknown activation type {}'.format(act))
42 |
43 | return act
44 |
45 |
46 | def get_graph(train=False):
47 | g = Graph('YOLOv2-Test: 16-bit', dataset='imagenet', log_level=logging.INFO)
48 | batch_size = 1
49 |
50 | with g.as_default():
51 |
52 | with g.name_scope('inputs'):
53 | # Input dimensions are (Batch_size, Height, Width, Channels)
54 | i = get_tensor(shape=(batch_size,28,28,1), name='data', dtype=FQDtype.FXP16, trainable=False)
55 |
56 | with g.name_scope('conv0'):
57 | # Weight dimensions are (Output Channels, Kernel Height, Kernel Width, Input Channels)
58 | weights = get_tensor(shape=(20, 5, 5, 1),
59 | name='weights',
60 | dtype=FixedPoint(16,12))
61 | # Bias dimensions are (Output Channels,)
62 | biases = get_tensor(shape=(20),
63 | name='biases',
64 | dtype=FixedPoint(32,20))
65 | # Intermediate data dimensions are (Batch_size, Height, Width, Channels)
66 | conv = conv2D(i, weights, biases, pad='VALID', dtype=FixedPoint(16,12))
67 |
68 | return g
69 |
70 |
--------------------------------------------------------------------------------
/tests/test_dnns.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | import numpy as np
3 | from polymath.srdfg.from_onnx.converter import get_value_info_shape
4 | from .util import np_nms, onnx_nms, torch_nms, t_torch_nms
5 | import pytest
6 | from pathlib import Path
7 |
8 | BENCH_DIR = Path(f"{Path(__file__).parent}/../benchmarks/onnx_files")
9 | CWD = Path(f"{__file__}").parent
10 | BASE_PATH = f"{CWD}/pmlang_examples"
11 | OUTPATH = f"{BASE_PATH}/outputs"
12 | ONNX_FILE_DIR = Path(f"{Path(__file__).parent}/onnx_examples")
13 | def test_lenet():
14 | pass
15 | # @pytest.mark.parametrize('max_output_per_class, iou_threshold, score_threshold, center_point_box',[
16 | # (10, 0.5, 0.0, 0)
17 | # ])
18 | # def test_nms(max_output_per_class, iou_threshold, score_threshold, center_point_box):
19 | # # boxes = np.array([[
20 | # # [1.0, 1.0, 0.0, 0.0],
21 | # # [0.0, 0.1, 1.0, 1.1],
22 | # # [0.0, 0.9, 1.0, -0.1],
23 | # # [0.0, 10.0, 1.0, 11.0],
24 | # # [1.0, 10.1, 0.0, 11.1],
25 | # # [1.0, 101.0, 0.0, 100.0]
26 | # # ]]).astype(np.float32)
27 | # # scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
28 | # boxes = np.array([[
29 | # [1.0, 1.0, 0.0, 0.0],
30 | # [0.0, 0.1, 1.0, 1.1],
31 | # [0.0, 0.9, 1.0, -0.1],
32 | # [0.0, 10.0, 1.0, 11.0],
33 | # [1.0, 10.1, 0.0, 11.1],
34 | # [1.0, 101.0, 0.0, 100.0]
35 | # ]]).astype(np.float32)
36 | # scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
37 | # onnx_res, valid_res = onnx_nms(boxes, scores, max_output_per_class, iou_threshold, score_threshold)
38 | # # test_res = valid_res[:, 2]
39 | #
40 | # np_res = np_nms(boxes, scores, max_output_per_class, iou_threshold, score_threshold)
41 | # torch_nms(boxes, scores, max_output_per_class, iou_threshold, score_threshold)
42 | # # onnx_nms(boxes, scores, max_output_per_class, iou_threshold, score_threshold)
43 | #
44 | # def test_mrcnn_backbone():
45 | # filename = f"backbone_mrcnn.onnx"
46 | # filepath = f"{BENCH_DIR}/full_dnns/mask_rcnn/{filename}"
47 | # assert Path(filepath).exists()
48 | # graph = pm.from_onnx(filepath)
49 | #
50 | # def test_mrcnn_ops():
51 | # # filename = f"mask_rcnn/backbone_mrcnn.onnx"
52 | # filename = f"lenet.onnx"
53 | # filepath = f"{BENCH_DIR}/full_dnns/{filename}"
54 | # assert Path(filepath).exists()
55 | #
56 | # import onnx
57 | # model = onnx.load(filepath)
58 | # onnx.checker.check_model(model)
59 | # graph = onnx.shape_inference.infer_shapes(model).graph
60 | #
61 | # val_info = {}
62 | #
63 | # for v in graph.value_info:
64 | # # print(v)
65 | # val_info[v.name] = tuple([dim.dim_value for dim in v.type.tensor_type.shape.dim])
66 | #
67 | # # val_info[v.name] = get_value_info_shape(v)
68 | # # print(val_info)
69 | # for n in graph.node:
70 | # if n.op_type == "Pad":
71 | # print(f"Input shape: {val_info[n.input[0]]}\n"
72 | # f"Output shape: {val_info[n.output[0]]}\n")
73 | # # print(n.input[0])
74 | # # print(n.output[0])
75 | # # print(n.name)
76 | # # print(f"\n")
77 | #
78 | #
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/tests/pmlang_examples/lenet.pm:
--------------------------------------------------------------------------------
1 | avg_pool2d(input float in[n][ic][h][w], output float out[n][ic][oh][ow], param int strides=2, param int pool_size=2, param int padding=0){
2 |
3 | index b[0:n-1], c[0:ic-1], y[0:oh-1], x[0:ow-1];
4 | index m[0:pool_size-1], k[0:pool_size-1];
5 | index y_pad[padding:oh + padding -1], x_pad[padding:ow + padding -1];
6 |
7 | padded_input[b][c][y][x] = 0;
8 | padded_input[b][c][y_pad][x_pad] = in[b][c][y_pad - padding][x_pad - padding];
9 | out[b][c][y][x] = (1/(pool_size^2))*sum[m][k](padded_input[b][c][strides*y + m][strides*x + k]);
10 |
11 | }
12 |
13 | batch_flatten(input float in[k][m][n][p], output float out[k][l]) {
14 | index i[0:l-1], j[0:k-1];
15 | second[i] = floor(i/m)%n;
16 | third[i] = floor(i/(m%n));
17 | //out[j][i] = in[j][i%m][floor(i/m)%n][third[i]];
18 | out[j][i] = in[j][i%m][second[i]][third[i]];
19 | }
20 | dense(input float in[n][m],
21 | state float weights[m][p],
22 | output float out[n][p]){
23 | float a[n][p];
24 |
25 | index i[0:n-1], j[0:p-1], k[0:m-1];
26 | // out[i][j] = sum[k](in[i][k]*w[k][j]) + b[j];
27 | out[i][j] = sum[k](in[i][k]*weights[k][j]);
28 | }
29 | sigmoid(input float in[n][m], output float out[n][m]){
30 | index i[0:n-1], j[0:m-1];
31 |
32 | out[i][j] = 1.0 / (1.0 + e()^(-in[i][j]));
33 | }
34 |
35 | relu(input float in[n][m], output float out[n][m]) {
36 | index i[0:n-1], j[0:m-1];
37 | out[i][j] = in[i][j] > 0 ? 1.0: 0.0;
38 |
39 | }
40 | softmax(input float y[n][m], output float out[m]){
41 | index i[0:m-1], j[0:m-1];
42 |
43 | out[i] = e()^(y[0][i])/sum[j](e()^(y[0][j]));
44 |
45 | }
46 | store_model(input float model[m], param str type="csv", param str model_path="model.txt"){
47 |
48 | fwrite(model, model_path, type);
49 | }
50 |
51 | conv2d(input float in[n][ic][h][w], state float kernels[oc][ic][kh][kw],
52 | output float result[n][oc][oh][ow],param int padding=0, param int strides=1){
53 |
54 | //Compute padding needed for the image
55 | index b[0:n-1], c[0:oc-1], y[0:oh-1], x[0:ow-1];
56 | index dy[0:kh-1], dx[0:kw-1], k[0:ic-1];
57 | index y_pad[padding:oh + padding -1], x_pad[padding:ow + padding -1];
58 |
59 | padded_input[b][k][y][x] = 0;
60 | padded_input[b][k][y_pad][x_pad] = in[b][k][y_pad - padding][x_pad - padding];
61 |
62 | result[b][c][y][x] = sum[dy][dx][k](padded_input[b][k][strides*y + dy][strides*x + dx]*kernels[c][k][dy][dx]);
63 | }
64 |
65 |
66 | main(input float data[1][1][32][32], output float y_pred[10]){
67 | float c1_out[1][6][28][28], s2_out[1][6][14][14], c3_out[1][16][10][10], s4_out[1][16][5][5],s4_batch_flattened[1][400],c5_out[1][120], c6_out[1][84], c7_out[1][10];
68 | float f1_weight[6][1][5][5], f2_weight[16][6][5][5], f3_weight[120][400], f4_weight[84][120], f5_weight[10][84];
69 | float b4[120], b5[10];
70 | // read_image(image, y);
71 | conv2d(data, f1_weight, c1_out);
72 | avg_pool2d(c1_out, s2_out);
73 | conv2d(s2_out, f2_weight, c3_out);
74 | avg_pool2d(c3_out, s4_out);
75 | batch_flatten(s4_out,s4_batch_flattened);
76 | dense(s4_batch_flattened,f3_weight,c5_out);
77 | dense(c5_out,f4_weight, c6_out);
78 | dense(c6_out,f5_weight, c7_out);
79 | softmax(c7_out, y_pred);
80 | // store_model(b5);
81 |
82 | }
--------------------------------------------------------------------------------
/tests/pmlang_examples/backpropagation.pm:
--------------------------------------------------------------------------------
1 | // read_image(output float image[w][h][channels], output float y[m], param str path="data.txt", param str type="csv")
2 | // {
3 | // index i[0:w-1], j[0:h-1], k[0:channels-1];
4 | //
5 | // lines = fread(path, type);
6 | // image[i][j][k] = float(lines[i + j*w + channels*w*h]);
7 | // val = float(lines[m]);
8 | // y[j] = j == val ? 1.0 : 0.0;
9 | // }
10 |
11 |
12 | avg_pool2d(input float in[n][ic][h][w], output float out[n][ic][oh][ow], param int strides=2, param int pool_size=2, param int padding=0){
13 |
14 | index b[0:n-1], c[0:ic-1], y[0:oh-1], x[0:ow-1];
15 | index m[0:pool_size-1], i[0:pool_size-1];
16 | index y_pad[padding:oh + padding -1], x_pad[padding:ow + padding -1];
17 |
18 | padded_input[b][c][y][x] = 0;
19 | padded_input[b][c][y_pad][x_pad] = in[b][c][y_pad - padding][x_pad - padding];
20 | out[b][c][y][x] = (1/(pool_size^2))*sum[m][i](padded_input[b][c][strides*y + m][strides*x + n]);
21 |
22 | }
23 |
24 | batch_flatten(input float in[k][m][n][p], output float out[k][l]) {
25 | index i[0:l-1], j[0:k-1];
26 | out[j][i] = in[ i%m ][floor(i/m)%n][floor(i/(m%n))];
27 | }
28 | dense(input float in[n][m],
29 | state float w[m][p],
30 | output float out[n][p]){
31 | float a[n][p];
32 |
33 | index i[0:n-1], j[0:p-1], k[0:m-1];
34 | // out[i][j] = sum[k](in[i][k]*w[k][j]) + b[j];
35 | out[i][j] = sum[k](in[i][k]*w[k][j]);
36 | }
37 | sigmoid(input float in[n][m], output float out[n][m]){
38 | index i[0:n-1], j[0:m-1];
39 |
40 | out[i][j] = 1.0 / (1.0 + e()^(-in[i][j]));
41 | }
42 |
43 | relu(input float in[n][m], output float out[n][m]) {
44 | index i[0:n-1], j[0:m-1];
45 | out[i][j] = in[i][j] > 0 ? 1.0: 0.0;
46 |
47 | }
48 | softmax(input float y[n][m], output float out[m]){
49 | index i[0:m-1], j[0:m-1];
50 |
51 | out[i] = e()^(y[0][i])/sum[j](e()^(y[0][j]));
52 |
53 | }
54 | store_model(input float model[m], param str type="csv", param str model_path="model.txt"){
55 |
56 | fwrite(model, model_path, type);
57 | }
58 |
59 | conv2d(input float x[n][ic][h][w], state float kernels[oc][ic][kh][kw],
60 | output float result[n][oc][oh][ow], param int padding=0, param int strides=1){
61 |
62 | //Compute padding needed for the image
63 | index b[0:n-1], c[0:oc-1], y[0:oh-1], i[0:ow-1];
64 | index dy[0:kh-1], dx[0:kw-1], k[0:ic-1];
65 | index y_pad[padding:oh + padding -1], x_pad[padding:ow + padding -1];
66 |
67 | padded_input[b][k][y][i] = 0;
68 | padded_input[b][k][y_pad][x_pad] = x[b][c][y_pad - padding][x_pad - padding];
69 |
70 | result[b][c][y][i] = sum[dy][dx][ic](padded_input[b][k][strides*i + dx][strides*y + dy]*kernels[c][k][dy][dx]);
71 | }
72 | main(){
73 | float image[1][1][32][32], y[10], y_pred[10], c1_out[1][6][28][28], s2_out[1][6][14][14], c3_out[1][16][10][10], s4_out[1][16][5][5],s4_batch_flattened[1][400],c5_out[1][120], c6_out[1][84], c7_out[1][10];
74 | float f1[6][1][5][5], f2[16][6][5][5], f3[120][400], f4[84][120], f5[10][84];
75 | float b4[120], b5[10];
76 | test = 0;
77 | // read_image(image, y);
78 | conv2d(image, f1, c1_out, test);
79 | avg_pool2d(c1_out, s2_out);
80 | conv2d(s2_out, f2, c3_out);
81 | avg_pool2d(c3_out, s4_out);
82 | batch_flatten(s4_out,s4_batch_flattened);
83 | dense(s4_batch_flattened,f3,c5_out);
84 | dense(c5_out,f4, c6_out);
85 | dense(c6_out,f5, c7_out);
86 | softmax(c7_out, y_pred);
87 | // store_model(b5);
88 |
89 | }
--------------------------------------------------------------------------------
/tests/old/test_serialization.py:
--------------------------------------------------------------------------------
1 | #
2 | # from polymath.mgdfg.serialization.pmlang_mgdfg import mgdfg_gen, parse_file, compile_to_pb, store_pb, load_pb
3 | #
4 | # def test_symbols_to_mgdfg():
5 | # file = "recommender.pm"
6 | # base_path = f"./pmlang_examples"
7 | # full_path = f"./pmlang_examples/{file}"
8 | #
9 | # pmlang_graph = parse_file(full_path)
10 | #
11 | # lr_comp = pmlang_graph.components["rec_model"]
12 | #
13 | # for var_name, var in lr_comp.symbols.items():
14 | # var.serialize()
15 | #
16 | # def test_node_to_mgdfg():
17 | # file = "recommender.pm"
18 | # base_path = f"./pmlang_examples"
19 | # full_path = f"./pmlang_examples/{file}"
20 | # pmlang_graph = parse_file(full_path)
21 | #
22 | # lr_comp = pmlang_graph.components["rec_model"]
23 | #
24 | # for node in lr_comp.nodes:
25 | # node.serialize()
26 | #
27 | # def test_edge_to_mgdfg():
28 | # file = "recommender.pm"
29 | # base_path = f"./pmlang_examples"
30 | # full_path = f"./pmlang_examples/{file}"
31 | #
32 | # pmlang_graph = parse_file(full_path)
33 | #
34 | # lr_comp = pmlang_graph.components["rec_model"]
35 | #
36 | # for edge in lr_comp.edges:
37 | # edge.serialize()
38 | #
39 | #
40 | # def test_expr_to_mgdfg():
41 | # file = "recommender.pm"
42 | # base_path = f"./pmlang_examples"
43 | # full_path = f"./pmlang_examples/{file}"
44 | # pmlang_graph = parse_file(full_path)
45 | #
46 | # lr_comp = pmlang_graph.components["rec_model"]
47 | #
48 | # for expr in lr_comp.expressions:
49 | # expr.serialize()
50 | #
51 | # def test_comp_to_mgdfg():
52 | # file = "recommender.pm"
53 | # base_path = f"./pmlang_examples"
54 | # full_path = f"./pmlang_examples/{file}"
55 | # pmlang_graph = parse_file(full_path)
56 | # lr_comp = pmlang_graph.components["main"]
57 | # test_pb = lr_comp.serialize()
58 | #
59 | #
60 | # def test_serialization_linear():
61 | # file = "linear.pm"
62 | # base_path = f"./pmlang_examples"
63 | # full_path = f"./pmlang_examples/{file}"
64 | # pb_object = compile_to_pb(full_path)
65 | #
66 | # def test_serialization_backprop():
67 | # file = "backpropagation.pm"
68 | # base_path = f"./pmlang_examples"
69 | # full_path = f"./pmlang_examples/{file}"
70 | # pb_object = compile_to_pb(full_path)
71 | #
72 | # def test_serialization_logistic():
73 | # file = "logistic.pm"
74 | # base_path = f"./pmlang_examples"
75 | # full_path = f"./pmlang_examples/{file}"
76 | # pb_object = compile_to_pb(full_path)
77 | #
78 | #
79 | # def test_serialization_recommender():
80 | # file = "recommender.pm"
81 | # base_path = f"./pmlang_examples"
82 | # full_path = f"./pmlang_examples/{file}"
83 | # pb_object = compile_to_pb(full_path)
84 | #
85 | # def test_serialization_lenet():
86 | # file = "lenet.pm"
87 | # base_path = f"./pmlang_examples"
88 | # full_path = f"./pmlang_examples/{file}"
89 | # pb_object = compile_to_pb(full_path)
90 | #
91 | #
92 | # def test_serialization_yolo():
93 | # file = "yolodnn.pm"
94 | # base_path = f"./pmlang_examples"
95 | # full_path = f"./pmlang_examples/{file}"
96 | # pb_object = compile_to_pb(full_path)
97 | #
98 | # def test_serialization_resnet():
99 | # file = "resnet18.pm"
100 | # base_path = f"./pmlang_examples"
101 | # full_path = f"./pmlang_examples/{file}"
102 | # pb_object = compile_to_pb(full_path)
103 | #
104 | #
105 | #
106 | #
107 | #
108 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/tensorOps/NodeOp.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from polymath.codegen.dnnweavergen.dnnweaver2.tensor import Tensor
3 | from polymath.codegen.dnnweavergen.dnnweaver2.graph import get_default_graph
4 | from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype
5 |
6 | class NodeOp(object):
7 | __metaclass__ = abc.ABCMeta
8 | def __init__(self, node_name, input_tensors=None):
9 | self.graph = get_default_graph()
10 | self.op_type = self._get_op_type()
11 | self.name = self.graph.get_op_name(node_name, self.op_type)
12 |
13 | self.dtype = self._get_output_dtype()
14 |
15 | if isinstance (input_tensors, Tensor):
16 | input_tensors = tuple([input_tensors])
17 | else:
18 | it = []
19 | for _it in input_tensors:
20 | if isinstance(_it, tuple):
21 | for __it in _it:
22 | it.append(__it)
23 | else:
24 | it.append(_it)
25 | input_tensors = tuple(it)
26 |
27 | # input_str = ','.join([x.__str__() for x in input_tensors])
28 | # print('## Creating op with name {} and inputs {}'.format(node_name, input_str))
29 |
30 | self.input_tensors = input_tensors
31 | self.output_tensors = self._create_output_tensors(self.name)
32 |
33 | self.input_loss = [None]*len(input_tensors)
34 |
35 | self.graph.create_node(self)
36 |
37 | self.incoming_gradients = None
38 |
39 | @abc.abstractmethod
40 | def _get_output_shape(self):
41 | pass
42 |
43 | @abc.abstractmethod
44 | def _get_output_dtype(self):
45 | pass
46 |
47 | def _create_output_tensors(self, name):
48 | out_name = name
49 | t = self.graph.tensor(self._get_output_shape(), out_name, dtype=self.dtype, trainable=False)
50 | t.op = self
51 | return t
52 |
53 | def _get_op_type(self):
54 | return self.__class__.__name__
55 |
56 | def _autograd(self, x, y):
57 | raise NotImplementedError('Backprop for class {} not implemented'.format(self.__class__.__name__))
58 |
59 | def _get_incoming_gradients(self, y, grad_dtype=FQDtype.FP32):
60 | if self.incoming_gradients is None:
61 | incoming_gradients = [op._autograd(self.output_tensors, y, grad_dtype=grad_dtype) for op in self.output_tensors.output_nodes if not isinstance(op, GradOp)]
62 | if len(incoming_gradients) > 1:
63 | op = AddGrad(incoming_gradients, self.name+'-addGrad', dtype=grad_dtype)
64 | incoming_gradients = [op.output_tensors]
65 | assert len(incoming_gradients) == 1, ' '.join([x.__str__() for x in incoming_gradients])
66 | self.incoming_gradients = tuple(incoming_gradients)
67 | return self.incoming_gradients
68 | else:
69 | return self.incoming_gradients
70 |
71 | @abc.abstractmethod
72 | def get_ops(self):
73 | pass
74 |
75 | class GradOp(NodeOp):
76 | def __init__(self, node_name, dtype=None, input_tensors=None):
77 | if dtype is None:
78 | dtype = get_default_graph().grad_dtype
79 |
80 | super(GradOp, self).__init__(node_name, dtype, input_tensors)
81 |
82 | def _autograd(self, x, y, grad_dtype):
83 | raise ValueError('Cannot backpropagate using GradOp {}'.format(self.__class__.__name__))
84 |
85 | class AddGrad(GradOp):
86 | def __init__(self, data, node_name, dtype=None):
87 | self.data = data
88 | input_tensors = data
89 | self.dtype=dtype
90 | super(AddGrad, self).__init__(node_name=node_name, input_tensors=input_tensors, dtype=dtype)
91 |
92 | def _get_output_shape(self):
93 | return self.data[0].shape
94 |
95 | def get_ops(self):
96 | return {}
97 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/simulator/accelerator.py:
--------------------------------------------------------------------------------
1 | from polymath.codegen.dnnweavergen.dnnweaver2.utils.utils import ceil_a_by_b, log2
2 | from polymath.codegen.dnnweavergen.dnnweaver2.simulator.stats import Stats
3 |
4 | class Accelerator(object):
5 | def __init__(self, N, M, prec, sram, mem_if_width, frequency):
6 | """
7 | accelerator object
8 | """
9 | self.N = N
10 | self.M = M
11 | self.sram = sram
12 | self.mem_if_width = mem_if_width
13 | self.frequency = frequency
14 | self.prec = prec
15 |
16 | def get_mem_read_cycles(self, dst, size):
17 | """
18 | Read instruction
19 | args:
20 | src_idx: index of source address
21 | dst: destination address
22 | size: size of data in bits
23 | """
24 | return ceil_a_by_b(size, self.mem_if_width)
25 |
26 | def get_mem_write_cycles(self, src, size):
27 | """
28 | Write instruction
29 | args:
30 | src_idx: index of source address
31 | src: destination address
32 | size: size of data in bits
33 | """
34 | return ceil_a_by_b(size, self.mem_if_width)
35 |
36 |
37 | def get_compute_stats(self, ic, oc, ow, oh, b, kw, kh, iprec, wprec, im2col=False):
38 | """
39 | Compute instruction
40 | args:
41 | ic: Input Channels
42 | oc: Output Channels
43 | ow: Output Width
44 | oh: Output Height
45 | kw: Output Height
46 | kh: Output Height
47 | b: Batch Size
48 | im2col: boolean. If true, we assume the cpu does im2col. Otherwise,
49 | we do convolutions channel-wise
50 | """
51 | compute_stats = Stats()
52 | compute_stats.total_cycles = self.get_compute_cycles(ic, oc, ow, oh,
53 | b, kw, kh,
54 | iprec,
55 | wprec,
56 | im2col)
57 | return compute_stats
58 |
59 |
60 | def get_compute_cycles(self, ic, oc, ow, oh, b, kw, kh, iprec, wprec, im2col=False):
61 | """
62 | Compute instruction
63 | args:
64 | ic: Input Channels
65 | oc: Output Channels
66 | ow: Output Width
67 | oh: Output Height
68 | kw: Output Height
69 | kh: Output Height
70 | b: Batch Size
71 | im2col: boolean. If true, we assume the cpu does im2col. Otherwise,
72 | we do convolutions channel-wise
73 | """
74 | _oc = ceil_a_by_b(oc, self.M)
75 | _ic = ceil_a_by_b(ic, self.N)
76 |
77 | loops = (b, _oc, oh, ow, kh, kw, _ic)
78 | loops = sorted(loops, reverse=True)
79 |
80 | overhead = 2
81 | cycles = 1
82 | for it in loops:
83 | cycles = overhead + it * cycles
84 |
85 | return cycles
86 |
87 | def __str__(self):
88 | ret = ''
89 | ret += 'Accelerator object'
90 | ret += '\n'
91 | ret += '\tPrecision: {}'.format(self.prec)
92 | ret += '\n'
93 | ret += '\tSystolic array size: {} -rows x {} -columns'.format(
94 | self.N,
95 | self.M)
96 |
97 | ret += '\n'
98 | ret += '\tIBUF size: {:>10,} Bytes'.format(self.sram['ibuf']//8)
99 | ret += '\n'
100 | ret += '\tWBUF size: {:>10,} Bytes'.format(self.sram['wbuf']//8)
101 | ret += '\n'
102 | ret += '\tOBUF size: {:>10,} Bytes'.format(self.sram['obuf']//8)
103 | ret += '\n'
104 | ret += '\tBBUF size: {:>10,} Bytes'.format(self.sram['bbuf']//8)
105 | ret += '\n'
106 | ret += 'Double buffering enabled. Sizes of SRAM are halved'
107 | return ret
108 |
--------------------------------------------------------------------------------
/polymath/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #python3 polymath_entry.py visualize --input test/pmlang_examples/test_example.pb
3 |
4 | #python3 polymath_entry.py cmlang --input test/pmlang_examples/test_example.cm
5 | #python3 polymath_entry.py c --input test/pmlang_examples/test_example.pb
6 | #python3 polymath_entry.py tabla --input test/pmlang_examples/test_example.pb
7 | #make clean -C test/pmlang_examples/test_example_ccode/
8 | #make -C test/pmlang_examples/test_example_ccode/
9 |
10 | #python3 polymath_entry.py onnx --input test/onnx_examples/mnist/model.onnx
11 | #python3 polymath_entry.py visualize --input test/onnx_examples/mnist/model.pb
12 |
13 | ##### TABLA HW TESTS ###################
14 | #python3 polymath_entry.py cmlang --input test/pmlang_examples/test_example.cm
15 | #python3 polymath_entry.py instructions --input test/pmlang_examples/test_example.pb
16 |
17 | python3 polymath_entry.py cmlang --input test/tabla_benchmarks/linear_regression/linear.cm
18 | #python3 polymath_entry.py c --input test/tabla_benchmarks/linear_regression/linear.pb
19 |
20 |
21 | #python3 polymath_entry.py tabla --input test/tabla_benchmarks/linear_regression/linear.pb
22 | #make clean -C test/tabla_benchmarks/linear_regression/linear_ccode/
23 | #make -C test/tabla_benchmarks/linear_regression/linear_ccode/
24 |
25 | python3 polymath_entry.py cmlang --input test/tabla_benchmarks/recommender/recommender.cm
26 | #python3 polymath_entry.py visualize --input test/tabla_benchmarks/recommender/recommender.pb
27 | #python3 polymath_entry.py c --input test/tabla_benchmarks/recommender/recommender.pb
28 | #python3 polymath_entry.py tabla --input test/tabla_benchmarks/recommender/recommender.pb
29 |
30 | python3 polymath_entry.py cmlang --input test/tabla_benchmarks/classification/classification.cm
31 | #python3 polymath_entry.py c --input test/tabla_benchmarks/classification/classification.pb
32 | #python3 polymath_entry.py tabla --input test/tabla_benchmarks/classification/classification.pb
33 |
34 | python3 polymath_entry.py cmlang --input test/examples/kmeans_paper.cm
35 | #python3 polymath_entry.py visualize --input test/pmlang_examples/test_example.pb
36 | #python3 polymath_entry.py c --input test/pmlang_examples/kmeans_paper.pb
37 | #make clean -C test/pmlang_examples/test_example_ccode/
38 | #make -C test/pmlang_examples/test_example_ccode/
39 |
40 | ##### TVM HW TESTS ###################
41 |
42 | #python3 polymath_entry.py cmlang --input test/tvm_benchmarks/lenet/lenet.cm
43 | #python3 polymath_entry.py c --input test/tvm_benchmarks/lenet/lenet.pb
44 | #python3 polymath_entry.py translate --input test/tvm_benchmarks/lenet/lenet.pb --tconfig codegen/tvmgen/tvm_config.json
45 |
46 |
47 | #python3 polymath_entry.py cmlang --input test/tvm_benchmarks/resnet18/resnet18.cm
48 | #python3 polymath_entry.py translate --input test/tvm_benchmarks/resnet18/resnet18.pb --tconfig codegen/tvmgen/tvm_config.json
49 |
50 | #python3 polymath_entry.py cmlang --input test/tvm_benchmarks/yolodnn/yolodnn.cm
51 | #python3 polymath_entry.py c --input test/tvm_benchmarks/yolodnn/yolodnn.pb
52 | #python3 polymath_entry.py tvm --input test/tvm_benchmarks/yolodnn/yolodnn.pb
53 |
54 | #python3 polymath_entry.py cmlang --input test/tvm_benchmarks/yolodnn/yolodnn.cm
55 | #python3 polymath_entry.py translate --input test/tvm_benchmarks/yolodnn/yolodnn.pb --tconfig codegen/tvmgen/tvm_config.json
56 |
57 |
58 | #python3 polymath_entry.py cmlang --input test/tvm_benchmarks/yolodnn/yolodnn_dnnweaver.cm
59 | #python3 polymath_entry.py translate --input test/tvm_benchmarks/yolodnn/yolodnn_dnnweaver.pb --tconfig codegen/dnnweavergen/dnnweaver_config.json
60 | #python3 polymath_entry.py instructions --input test/tvm_benchmarks/yolodnn/yolodnn_dnnweaver.pb
61 |
62 |
63 |
64 | ### Code Gen Tests #########
65 |
66 | python3 polymath_entry.py cmlang --input test/tabla_benchmarks/linear_regression/linear_test.cm
67 | python3 polymath_entry.py c --input test/tabla_benchmarks/linear_regression/linear_test.pb
68 |
69 |
70 | #python3 polymath_entry.py cmlang --input test/tvm_benchmarks/lenet/lenet.cm
71 | #python3 polymath_entry.py c --input test/tvm_benchmarks/lenet/lenet.pb
--------------------------------------------------------------------------------
/polymath/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division, absolute_import
2 |
3 | DEFAULT_SHAPES = [(1,), (1,)]
4 | UNSET_SHAPE = tuple([])
5 | SCALAR_IDX = (0,)
6 | # TODO: Need to add all func operators here from base class
7 | from polymath.srdfg.domain import Domain
8 | from polymath.srdfg.base import Node, nodeop, func_op, contains,\
9 | import_, control_dependencies, pow_, EvaluationError, Graph, int_, \
10 | mul, sub, add, call, var_index
11 | from polymath.srdfg.nodes import variable, predicate, assert_, str_format, identity, lazy_constant, try_,\
12 | placeholder, temp, parameter, slice_op, input, state, output, write
13 | from polymath.srdfg.index import index, index_op
14 | from polymath.srdfg.group_nodes import GroupNode, sum, prod, max, min, argmin, argmax, bitreverse
15 | from polymath.srdfg.nonlinear import NonLinear, sigmoid, log2, log10, exp, abs, sqrt, ceil, \
16 | floor, cast, tanh, square, log, rsqrt, clip, logical_not, logical_or
17 | from polymath.srdfg.template import Template
18 | from polymath.srdfg.transformations import Transformation, unsqueeze, squeeze, flatten, gather, \
19 | reshape, gather_elements, transpose, pad, flip
20 | from polymath.srdfg.util import Profiler, visualize, lower_graph, is_iterable
21 | from polymath.srdfg.serialization.serialize import pb_store, pb_load
22 |
23 | from polymath.srdfg.templates.data_analytics import linear_regressor_train,\
24 | svm_classifier_train, logistic_regressor_train, logistic_regressor
25 |
26 | from polymath.srdfg.templates.dnn import conv_bias, depthwise_conv, depthwise_conv_bias, dense, relu, avg_pool2d,\
27 | batch_flatten, softmax, relu1d, dense_sigmoid, batch_norm,\
28 | global_avg_pool, conv, max_pool, dropout, leaky_relu, avg_pool, lrn, \
29 | elem_tanh, elem_sigmoid, elem_cast, conv_transpose, cross_entropy_loss, log_softmax, \
30 | nll_loss, conv_transpose_bias, elem_floor, elem_ceil, elem_clip, elem_exp, topk,\
31 | split, elem_if, elem_sqrt, elem_log, roi_align, elem_where, scatter_elements, \
32 | loop, nms, concat, one_hot, gelu, bias_add
33 |
34 | from polymath.srdfg.templates.fused_dnn import conv_bias_relu,\
35 | conv_bias_relu_max_pool, \
36 | conv_bias_add_relu,\
37 | conv_bias_add_relu_global_avg_pool
38 |
39 | from polymath.srdfg.templates.optimizers import sgd
40 | from polymath.srdfg.templates.gradient_defs import gemm_grad, gemm_grad_no_bias, conv_grad, conv_grad_no_bias, \
41 | flatten_grad, elem_add_grad, relu_grad, batchnorm_grad, global_average_pool_grad, max_pool_grad,\
42 | cross_entropy_loss_grad, average_pool_grad, elem_tanh_grad
43 |
44 | from polymath.srdfg.templates.gradient_defs import AUTODIFF_OPS
45 |
46 | from polymath.srdfg.templates.math import elem_mul, elem_sub, elem_div, reduce_sum, matmul, gemm, \
47 | elem_add, elem_greater, lvmatmul, rvmatmul, gemm_no_bias, reduce_min, reduce_max, elem_min, elem_max,\
48 | elem_less, elem_not, elem_or, elem_and, elem_nonzero, reduce_prod, elem_equal, mean_var, reduce_mean,\
49 | elem_pow, reciprocal
50 |
51 | from polymath.srdfg.templates.tensor_transformations import coarse_flatten, elem_gather, tensor_transpose, onnx_reshape, \
52 | onnx_squeeze, onnx_identity, onnx_resize, \
53 | onnx_unsqueeze, tensor_pad, tensor_flip, tensor_reshape, tensor_squeeze, resize
54 |
55 | from polymath.srdfg.from_onnx.converter import from_onnx, get_attributes, get_value_info_shape, ONNX_OP_NAMES
56 | from polymath.srdfg.from_pytorch.converter import from_pytorch, get_attributes, get_value_info_shape, PYTORCH_OP_NAMES
57 | DNN_TRAINING_OPS = AUTODIFF_OPS + ONNX_OP_NAMES
58 |
59 | from polymath.srdfg.passes import register_pass, Pass
60 | from polymath.srdfg.passes.dnn_passes import UpdateBatchSize, CollectDNNShapes, RenameMultiDimOps, UpdateLayout, \
61 | FuseOps, SplitOps
62 | from polymath.srdfg.passes.compiler_passes import NormalizeGraph, Lower, CountNodes, CountOpTypes
63 | from polymath.srdfg.passes.autodiff import AutoDiffGraph, create_training_graph
64 | from polymath.codegen.tabla.tabla_translate import generate_tabla
65 |
66 | from polymath.tools.srdfg_helpers import print_graph_ops
67 |
--------------------------------------------------------------------------------
/polymath/polymath_entry.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import argparse
3 | import os
4 | from typing import Text
5 | import sys
6 |
7 | project_root = os.getcwd().rsplit("/", 1)[0]
8 | sys.path.insert(0, project_root)
9 |
10 | from antlr4 import FileStream, CommonTokenStream, ParseTreeWalker
11 | from polymath.srdfg.instructions.ir import AxelVM
12 | from polymath.pmlang.antlr_generator.lexer import PMLangLexer
13 | from polymath.pmlang.antlr_generator.parser import PMLangParser
14 | from polymath.pmlang.symbols_old import PMLangListener
15 | from polymath.srdfg.serialization import load_store
16 | from polymath.srdfg import visualize
17 | from polymath.srdfg.onnx_mgdfg.onnx_polymath import ONNXCMStack
18 | from polymath.codegen.loopygen.generator import LoopyGen
19 | from polymath.codegen.tabla.tabla_translate import TablaTranslation
20 | from polymath.codegen.translator import Translator
21 |
22 |
23 | def serialize_pmlang(pmlang_file, output_cmstack, viz=False):
24 | input_file = FileStream(pmlang_file)
25 | lexer = PMLangLexer(input_file)
26 | stream = CommonTokenStream(lexer)
27 | parser = PMLangParser(stream)
28 | tree = parser.pmlang()
29 |
30 | pmlang_graph = PMLangListener(pmlang_file, output_cmstack)
31 | walker = ParseTreeWalker()
32 | walker.walk(pmlang_graph, tree)
33 | output_dir, output_file = os.path.split(pmlang_file)
34 |
35 | outputfile = output_dir + '/' + output_file[:-3] + '.pb'
36 |
37 | load_store.save_program(pmlang_graph.program, outputfile)
38 |
39 |
40 | def generate_axelvm(input_cmstack, output_axelvm, viz=False):
41 | avm = AxelVM(input_cmstack)
42 | avm.generate_axelvm()
43 |
44 | def serialize_onnx(input_proto, output_cmstack, viz=False):
45 | converter = ONNXCMStack(input_proto)
46 | converter.run()
47 |
48 | def visualize_graph(fname):
49 | visualize.visualize_program(fname, rankdir='TB')
50 |
51 | def genccode(input_proto):
52 | code = LoopyGen(input_proto)
53 |
54 | def gentabla(input_proto):
55 | code = TablaTranslation(input_proto)
56 |
57 | def translate(input_proto, config):
58 | code = Translator(input_proto, config, ['map_nodes', 'flatten'])
59 |
60 | def main():
61 | parser = argparse.ArgumentParser(description="PolyMath compilation framework")
62 | parser.add_argument("action",
63 | type=Text,
64 | help="One of the following: 'pmlang', 'onnx', or 'instructions' which generates a"
65 | " serialized CMstack graph_name from either "
66 | "a CMLang file or an ONNX protobuf file, or generates instructions "
67 | "code from a CMStack file.",
68 | choices=["pmlang", "onnx", "instructions", "visualize", "c", "tabla", "translate"])
69 | parser.add_argument("--input",
70 | type=Text, required=True,
71 | help="The input pmlang, onnx protobuf, or cmstack protobuf file")
72 | parser.add_argument("--tconfig",
73 | type=Text, required=False,
74 | help="The configuration file for translation")
75 |
76 | parser.add_argument("--output",
77 | type=Text, required=False,
78 | help="The output cmstack protobuf filename or "
79 | "instructions filename")
80 |
81 | args = parser.parse_args()
82 |
83 | if args.action == 'pmlang':
84 | serialize_pmlang(args.input, args.output)
85 | elif args.action == 'onnx':
86 | serialize_onnx(args.input, args.output)
87 | elif args.action == 'instructions':
88 | generate_axelvm(args.input, args.output)
89 | elif args.action == 'visualize':
90 | visualize_graph(args.input)
91 | elif args.action == 'c':
92 | genccode(args.input)
93 | elif args.action == 'tabla':
94 | gentabla(args.input)
95 | elif args.action == 'translate':
96 | translate(args.input, args.tconfig)
97 |
98 |
99 | if __name__ == '__main__':
100 | main()
101 |
--------------------------------------------------------------------------------
/polymath/pmlang/mapping.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 | from __future__ import unicode_literals
5 |
6 | import logging
7 |
8 | import numpy as np # type: ignore
9 | from polymath.srdfg.serialization.mgdfg_pb2 import Tensor
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | TENSOR_TYPE_TO_NP_TYPE = {
14 | int(Tensor.FLOAT): np.dtype('float32'),
15 | int(Tensor.UINT8): np.dtype('uint8'),
16 | int(Tensor.INT8): np.dtype('int8'),
17 | int(Tensor.UINT16): np.dtype('uint16'),
18 | int(Tensor.INT16): np.dtype('int16'),
19 | int(Tensor.INT32): np.dtype('int32'),
20 | int(Tensor.INT64): np.dtype('int64'),
21 | int(Tensor.BOOL): np.dtype('bool'),
22 | int(Tensor.FLOAT16): np.dtype('float16'),
23 | int(Tensor.DOUBLE): np.dtype('float64'),
24 | int(Tensor.COMPLEX64): np.dtype('complex64'),
25 | int(Tensor.COMPLEX128): np.dtype('complex128'),
26 | int(Tensor.UINT32): np.dtype('uint32'),
27 | int(Tensor.UINT64): np.dtype('uint64'),
28 | int(Tensor.STRING): np.dtype(np.object)
29 | }
30 |
31 | NP_TYPE_TO_TENSOR_TYPE = {v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items()}
32 |
33 | TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE = {
34 | int(Tensor.FLOAT): int(Tensor.FLOAT),
35 | int(Tensor.UINT8): int(Tensor.INT32),
36 | int(Tensor.INT8): int(Tensor.INT32),
37 | int(Tensor.UINT16): int(Tensor.INT32),
38 | int(Tensor.INT16): int(Tensor.INT32),
39 | int(Tensor.INT32): int(Tensor.INT32),
40 | int(Tensor.INT64): int(Tensor.INT64),
41 | int(Tensor.BOOL): int(Tensor.INT32),
42 | int(Tensor.FLOAT16): int(Tensor.UINT16),
43 | int(Tensor.BFLOAT16): int(Tensor.UINT16),
44 | int(Tensor.DOUBLE): int(Tensor.DOUBLE),
45 | int(Tensor.COMPLEX64): int(Tensor.FLOAT),
46 | int(Tensor.COMPLEX128): int(Tensor.DOUBLE),
47 | int(Tensor.UINT32): int(Tensor.UINT32),
48 | int(Tensor.UINT64): int(Tensor.UINT64),
49 | int(Tensor.STRING): int(Tensor.STRING),
50 | }
51 |
52 | STORAGE_TENSOR_TYPE_TO_FIELD = {
53 | int(Tensor.FLOAT): 'float_data',
54 | int(Tensor.INT32): 'int32_data',
55 | int(Tensor.INT64): 'int64_data',
56 | int(Tensor.UINT16): 'int32_data',
57 | int(Tensor.DOUBLE): 'double_data',
58 | int(Tensor.COMPLEX64): 'float_data',
59 | int(Tensor.COMPLEX128): 'double_data',
60 | int(Tensor.UINT32): 'uint64_data',
61 | int(Tensor.UINT64): 'uint64_data',
62 | int(Tensor.STRING): 'string_data',
63 | int(Tensor.BOOL): 'int32_data',
64 | }
65 |
66 | STRING_TEXT_TO_TENSOR_TYPE = {
67 | 'float' : int(Tensor.FLOAT),
68 | 'int' : int(Tensor.INT32),
69 | 'complex' : int(Tensor.COMPLEX64),
70 | 'str' : int(Tensor.STRING),
71 | 'bool' : int(Tensor.BOOL),
72 | }
73 |
74 | STRING_TEXT_TO_BINEXP = {"*": "mul",
75 | "/": "div",
76 | "+": "add",
77 | "-": "sub",
78 | "<": "tlt",
79 | ">": "tgt",
80 | "<=": "tlte",
81 | ">=": "tgte",
82 | "==": "teq",
83 | "!=": "tne",
84 | "^": "exp",
85 | "%": "mod"
86 | }
87 |
88 | STRING_TEXT_TO_UNEXP = {"+" : "mov",
89 | "-": "neg"}
90 |
91 | STRING_TEXT_TO_FUNCTION = {"pi": "mov",
92 | "log": "log",
93 | "log2": "log2",
94 | "float": "cast",
95 | "int": "cast",
96 | "bin": "cast",
97 | "ceiling": "ceil",
98 | "floor": "floor",
99 | "e": "mov",
100 | "fread": "fread",
101 | "fwrite": "fwrite",
102 | "sigmoid" : "sigmoid"}
103 | STRING_FUNCTION_TO_STRING_TYPE = {"pi": 'float',
104 | "log": 'float',
105 | "log2": 'float',
106 | "float": 'float',
107 | "int": 'int',
108 | "bin": 'int',
109 | "random": 'float',
110 | "ceiling": 'float',
111 | "floor": 'float',
112 | "e": 'float',
113 | "fread": 'str',
114 | "fwrite": 'str',
115 | "sigmoid" : "float"}
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # Note: To use the 'upload' functionality of this file, you must:
5 | # $ pipenv install twine --dev
6 |
7 | import io
8 | import os
9 | import sys
10 | from shutil import rmtree
11 | import json
12 |
13 | from setuptools import find_packages, setup, Command
14 | with open('README.md') as fp:
15 | long_description = fp.read()
16 |
17 | with open('description.json') as fp:
18 | kwargs = json.load(fp)
19 | # Package meta-data.
20 | NAME = kwargs['name']
21 | DESCRIPTION = kwargs['description']
22 | URL = kwargs['url']
23 | EMAIL = kwargs['author_email']
24 | AUTHOR = kwargs['author']
25 | REQUIRES_PYTHON = kwargs['python_requires']
26 | VERSION = kwargs['version']
27 |
28 | # What packages are required for this module to be executed?
29 | REQUIRED = [
30 | 'numpy', 'pathlib', 'pytools', 'graphviz', 'numproto',
31 | 'six'
32 | ]
33 |
34 | # What packages are optional?
35 | EXTRAS = {
36 | # 'fancy feature': ['django'],
37 | }
38 |
39 | # The rest you shouldn't have to touch too much :)
40 | # ------------------------------------------------
41 | # Except, perhaps the License and Trove Classifiers!
42 | # If you do change the License, remember to change the Trove Classifier for that!
43 |
44 | here = os.path.abspath(os.path.dirname(__file__))
45 |
46 | # Import the README and use it as the long-description.
47 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file!
48 | try:
49 | with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
50 | long_description = '\n' + f.read()
51 | except FileNotFoundError:
52 | long_description = DESCRIPTION
53 |
54 | # Load the package's __version__.py module as a dictionary.
55 | about = {}
56 | if not VERSION:
57 | project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
58 | with open(os.path.join(here, project_slug, '__version__.py')) as f:
59 | exec(f.read(), about)
60 | else:
61 | about['__version__'] = VERSION
62 |
63 |
64 | class UploadCommand(Command):
65 | """Support setup.py upload."""
66 |
67 | description = 'Build and publish the package.'
68 | user_options = []
69 |
70 | @staticmethod
71 | def status(s):
72 | """Prints things in bold."""
73 | print('\033[1m{0}\033[0m'.format(s))
74 |
75 | def initialize_options(self):
76 | pass
77 |
78 | def finalize_options(self):
79 | pass
80 |
81 | def run(self):
82 | try:
83 | self.status('Removing previous builds…')
84 | rmtree(os.path.join(here, 'dist'))
85 | except OSError:
86 | pass
87 |
88 | self.status('Building Source and Wheel (universal) distribution…')
89 | os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
90 |
91 | self.status('Uploading the package to PyPI via Twine…')
92 | os.system('twine upload dist/*')
93 |
94 | self.status('Pushing git tags…')
95 | os.system('git tag v{0}'.format(about['__version__']))
96 | os.system('git push --tags')
97 |
98 | sys.exit()
99 |
100 |
101 | # Where the magic happens:
102 | setup(
103 | name=NAME,
104 | version=about['__version__'],
105 | description=DESCRIPTION,
106 | long_description=long_description,
107 | long_description_content_type='text/markdown',
108 | author=AUTHOR,
109 | author_email=EMAIL,
110 | python_requires=REQUIRES_PYTHON,
111 | url=URL,
112 | packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
113 | # If your package is a single module, use this instead of 'packages':
114 | # py_modules=['polymath'],
115 |
116 | # entry_points={
117 | # 'console_scripts': ['mycli=mymodule:cli'],
118 | # },
119 | install_requires=REQUIRED,
120 | extras_require=EXTRAS,
121 | include_package_data=True,
122 | license='MIT',
123 | classifiers=[
124 | # Trove classifiers
125 | # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
126 | 'License :: OSI Approved :: MIT License',
127 | 'Programming Language :: Python',
128 | 'Programming Language :: Python :: 3',
129 | 'Programming Language :: Python :: 3.6',
130 | 'Programming Language :: Python :: Implementation :: CPython',
131 | 'Programming Language :: Python :: Implementation :: PyPy'
132 | ],
133 | # $ setup.py publish support.
134 | cmdclass={
135 | 'upload': UploadCommand,
136 | },
137 | )
138 |
--------------------------------------------------------------------------------
/tests/reference_implementations/ppo.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.signal
3 | from gym.spaces import Box, Discrete
4 |
5 | import torch
6 | import torch.nn as nn
7 | from torch.distributions.normal import Normal
8 | from torch.distributions.categorical import Categorical
9 |
10 |
11 | def combined_shape(length, shape=None):
12 | if shape is None:
13 | return (length,)
14 | return (length, shape) if np.isscalar(shape) else (length, *shape)
15 |
16 |
17 | def mlp(sizes, activation, output_activation=nn.Identity):
18 | layers = []
19 | for j in range(len(sizes) - 1):
20 | act = activation if j < len(sizes) - 2 else output_activation
21 | layers += [nn.Linear(sizes[j], sizes[j + 1]), act()]
22 | return nn.Sequential(*layers)
23 |
24 |
25 | def count_vars(module):
26 | return sum([np.prod(p.shape) for p in module.parameters()])
27 |
28 |
29 | def discount_cumsum(x, discount):
30 | """
31 | magic from rllab for computing discounted cumulative sums of vectors.
32 |
33 | input:
34 | vector x,
35 | [x0,
36 | x1,
37 | x2]
38 |
39 | output:
40 | [x0 + discount * x1 + discount^2 * x2,
41 | x1 + discount * x2,
42 | x2]
43 | """
44 | return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
45 |
46 |
47 | class Actor(nn.Module):
48 |
49 | def _distribution(self, obs):
50 | raise NotImplementedError
51 |
52 | def _log_prob_from_distribution(self, pi, act):
53 | raise NotImplementedError
54 |
55 | def forward(self, obs, act=None):
56 | # Produce action distributions for given observations, and
57 | # optionally compute the log likelihood of given actions under
58 | # those distributions.
59 | pi = self._distribution(obs)
60 | logp_a = None
61 | if act is not None:
62 | logp_a = self._log_prob_from_distribution(pi, act)
63 | return pi, logp_a
64 |
65 |
66 | class MLPCategoricalActor(Actor):
67 |
68 | def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
69 | super().__init__()
70 | self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
71 |
72 | def _distribution(self, obs):
73 | logits = self.logits_net(obs)
74 | return Categorical(logits=logits)
75 |
76 | def _log_prob_from_distribution(self, pi, act):
77 | return pi.log_prob(act)
78 |
79 |
80 | class MLPGaussianActor(Actor):
81 |
82 | def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
83 | super().__init__()
84 | log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
85 | self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
86 | self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
87 |
88 | def _distribution(self, obs):
89 | mu = self.mu_net(obs)
90 | std = torch.exp(self.log_std)
91 | return Normal(mu, std)
92 |
93 | def _log_prob_from_distribution(self, pi, act):
94 | return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution
95 |
96 |
97 | class MLPCritic(nn.Module):
98 |
99 | def __init__(self, obs_dim, hidden_sizes, activation):
100 | super().__init__()
101 | self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)
102 |
103 | def forward(self, obs):
104 | return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
105 |
106 |
107 | class MLPActorCritic(nn.Module):
108 |
109 | def __init__(self, observation_space, action_space,
110 | hidden_sizes=(64, 64), activation=nn.Tanh):
111 | super().__init__()
112 |
113 | obs_dim = observation_space.shape[0]
114 |
115 | # policy builder depends on action space
116 | if isinstance(action_space, Box):
117 | self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)
118 | elif isinstance(action_space, Discrete):
119 | self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)
120 |
121 | # build value function
122 | self.v = MLPCritic(obs_dim, hidden_sizes, activation)
123 |
124 | def step(self, obs):
125 | with torch.no_grad():
126 | pi = self.pi._distribution(obs)
127 | a = pi.sample()
128 | logp_a = self.pi._log_prob_from_distribution(pi, a)
129 | v = self.v(obs)
130 | return a.numpy(), v.numpy(), logp_a.numpy()
131 |
132 | def act(self, obs):
133 | return self.step(obs)[0]
--------------------------------------------------------------------------------
/tests/old/test_pmlang.py:
--------------------------------------------------------------------------------
1 | # from polymath.pmlang.antlr_generator.parser import FileStream, CommonTokenStream, PMLangParser, ParseTreeWalker
2 | # from polymath.pmlang.antlr_generator.lexer import PMLangLexer
3 | # from polymath.pmlang.symbols import PMLangListener
4 | # from polymath.mgdfg.template_utils import visualize_component, parse_statement_str
5 | # from polymath.mgdfg.serialization.pmlang_mgdfg import parse_file
6 | #
7 | # import os
8 | #
9 | # def test_temp():
10 | # class TempAssign(object):
11 | # def __init__(self):
12 | # self.test_val = 1
13 | #
14 | # class TempChild(object):
15 | # def __init__(self, parent):
16 | # self.parent = parent
17 | # temp_assign = TempAssign()
18 | # temp_child = TempChild(temp_assign)
19 | # print(f"Parent: {temp_assign.test_val}\tChild: {temp_child.parent.test_val}")
20 | # temp_child.parent.test_val = 2
21 | # print(f"Parent: {temp_assign.test_val}\tChild: {temp_child.parent.test_val}")
22 | #
23 | # def test_backprop():
24 | # file = "backpropagation.pm"
25 | # base_path = f"./pmlang_examples"
26 | # full_path = f"./pmlang_examples/{file}"
27 | # pmlang_graph = parse_file(full_path)
28 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
29 | # visualize_component(pmlang_graph.components["avg_pool2d"], ext_full_path)
30 | #
31 | #
32 | # def test_linear():
33 | # file = "linear.pm"
34 | # base_path = f"./pmlang_examples"
35 | # full_path = f"./pmlang_examples/{file}"
36 | #
37 | # pmlang_graph = parse_file(full_path)
38 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
39 | # visualize_component(pmlang_graph.components["linear_regression"], ext_full_path)
40 | #
41 | # def test_logistic():
42 | # file = "logistic.pm"
43 | # base_path = f"./pmlang_examples"
44 | # full_path = f"./pmlang_examples/{file}"
45 | # pmlang_graph = parse_file(full_path)
46 | #
47 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
48 | # visualize_component(pmlang_graph.components["main"], ext_full_path)
49 | #
50 | # def test_recommender():
51 | # file = "recommender.pm"
52 | # base_path = f"./pmlang_examples"
53 | # full_path = f"./pmlang_examples/{file}"
54 | # pmlang_graph = parse_file(full_path)
55 | #
56 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
57 | # visualize_component(pmlang_graph.components["rec_model"], ext_full_path)
58 | # # load_store.save_program(pmlang_graph.program, output_mgdfg)
59 | #
60 | # def test_lenet():
61 | # file = "lenet.pm"
62 | # base_path = f"./pmlang_examples"
63 | # full_path = f"./pmlang_examples/{file}"
64 | # pmlang_graph = parse_file(full_path)
65 | #
66 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
67 | # visualize_component(pmlang_graph.components["batch_flatten"], ext_full_path)
68 | #
69 | # def test_yolo():
70 | # file = "yolodnn.pm"
71 | # base_path = f"./pmlang_examples"
72 | # full_path = f"./pmlang_examples/{file}"
73 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
74 | # pmlang_graph = parse_file(full_path)
75 | # visualize_component(pmlang_graph.components["batch_norm"], ext_full_path)
76 | #
77 | #
78 | #
79 | # def test_resnet():
80 | # file = "resnet18.pm"
81 | # base_path = f"./pmlang_examples"
82 | # full_path = f"./pmlang_examples/{file}"
83 | # pmlang_graph = parse_file(full_path)
84 | #
85 | #
86 | # def test_statement_str_parse():
87 | # statements = {"h = sum[i](w[i] * x[i]);": "assignment",
88 | # "w[i] = w[i] - mu*g[i];": "assignment",
89 | # "out[i][j] = 1.0 / (1.0 + e()^(-in[i][j]));": "assignment",
90 | # "out[i][j][y][x] = in[i][j][y][x] > 0 ? in[i][j][y][x] : in[i][j][y][x]*alpha;": "assignment",
91 | # "result[b][c][y][i] = sum[dy][dx][ic](padded_input[b][k][strides*i + dx][strides*y + dy]*kernels[c][k][dy][dx]);": "assignment",
92 | # "add_bias(conv1_out, c1_bias,c1_bias_out);": "expression",
93 | # "float conv1_weight[oc][ic][kernel_size][kernel_size], conv1_out[n][oc][oh][ow];": "declaration",
94 | # "h = sigmoid(sum[i](w[i] * x[i]));": "assignment"
95 | # }
96 | #
97 | # for stat, stat_type in statements.items():
98 | # ast_obj = parse_statement_str(stat)
99 | # if stat_type == "assignment":
100 | # assert isinstance(ast_obj, PMLangParser.Assignment_statementContext)
101 | # elif stat_type == "declaration":
102 | # assert isinstance(ast_obj, PMLangParser.Declaration_statementContext)
103 | # elif stat_type == "expression":
104 | # assert isinstance(ast_obj, PMLangParser.Expression_statementContext)
105 | #
106 | #
107 |
--------------------------------------------------------------------------------
/tests/test_serialization.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import polymath as pm
3 | import numpy as np
4 | import pytest
5 | from .util import logistic, linear, reco, svm, compare_tabla_dfg, set_shape_and_lower,\
6 | unwound_fft, backprop, conv, lenet
7 | import pickle
8 |
9 | CWD = Path(f"{__file__}").parent
10 | BASE_PATH = f"{CWD}/pmlang_examples"
11 | OUTPATH = f"{BASE_PATH}/outputs"
12 |
13 | def test_linear_serialize():
14 |
15 | with pm.Node(name="linear_reg") as graph:
16 | m = pm.placeholder("m")
17 | x_ = pm.placeholder("x", shape=(m))
18 | y_ = pm.placeholder("y")
19 | w_ = pm.placeholder("w", shape=(m))
20 | mu = pm.parameter(name="mu", default=1.0)
21 | i = pm.index(0, (m-1).set_name("m-1"), name="i")
22 | h = pm.sum([i], (x_[i] * w_[i]).set_name("x*w"), name="h")
23 | d = (h-y_).set_name("h-y")
24 | g = (d*x_[i]).set_name("d*x")
25 | w_ = ((w_[i]) - (mu*g[i])).set_name("w_out")
26 | x = np.random.randint(1, 5, 5)
27 | y = np.random.randint(1, 5, 1)[0]
28 | w = np.random.randint(1, 5, 5)
29 | graph_res = graph("w_out", {"x": x, "y": y, "w": w})
30 | actual_res = w - ((np.sum(x*w) - y)*x)*1.0
31 | np.testing.assert_allclose(graph_res, actual_res)
32 | cwd = Path(f"{__file__}").parent
33 | base_path = f"{cwd}/pmlang_examples"
34 | full_path = f"{base_path}/outputs"
35 | pm.pb_store(graph, full_path)
36 |
37 | def test_linear_deserialize():
38 |
39 | graph_name = "linear_reg1"
40 | with pm.Node(name=graph_name) as graph:
41 | m = pm.placeholder("m")
42 | x_ = pm.placeholder("x", shape=(m))
43 | y_ = pm.placeholder("y")
44 | w_ = pm.placeholder("w", shape=(m))
45 | mu = pm.parameter(name="mu", default=1.0)
46 | i = pm.index(0, (m-1).set_name("m-1"), name="i")
47 | h = pm.sum([i], (x_[i] * w_[i]).set_name("x*w"), name="h")
48 | d = (h-y_).set_name("h-y")
49 | g = (d*x_[i]).set_name("d*x")
50 | mug = (mu*g[i]).set_name("mu*g[i]")
51 | w_ = ((w_[i]) - mug).set_name("w_out")
52 | x = np.random.randint(0, 10, 10)
53 | y = np.random.randint(0, 10, 1)[0]
54 | w = np.random.randint(0, 10, 10)
55 |
56 | graph_res = graph("w_out", {"x": x, "y": y, "w": w})
57 | actual_res = w - ((np.sum(x*w) - y)*x)*1.0
58 |
59 | np.testing.assert_allclose(graph_res, actual_res)
60 | cwd = Path(f"{__file__}").parent
61 | base_path = f"{cwd}/pmlang_examples"
62 | full_path = f"{base_path}/outputs"
63 | pb_path = f"{full_path}/{graph_name}.srdfg"
64 | pm.pb_store(graph, full_path)
65 | node = pm.pb_load(pb_path)
66 | new_graph_res = node("w_out", {"x": x, "y": y, "w": w})
67 | np.testing.assert_allclose(graph_res, new_graph_res)
68 | np.testing.assert_allclose(actual_res, new_graph_res)
69 |
70 | # TODO: Figure out why hashed nodes are broken
71 | # assert (node.func_hash()) == (graph.func_hash())
72 |
73 | @pytest.mark.parametrize('m_',[
74 | 55
75 | ])
76 | def test_tabla_linear(m_):
77 | shape_dict = {"m": m_}
78 | graph, input_info, out_info, keys = linear(m=m_, coarse=True)
79 | lgraph, input_info, out_info, keys = linear(m=m_, coarse=False)
80 | cwd = Path(f"{__file__}").parent
81 | base_path = f"{cwd}/pmlang_examples"
82 | full_path = f"{base_path}/outputs"
83 | graph_name = f"{graph.name}_{m_}"
84 | tabla_path = f"{full_path}/{graph_name}_tabla.json"
85 |
86 | tabla_ir, tabla_graph = pm.generate_tabla(graph,
87 | shape_dict,
88 | tabla_path,
89 | context_dict=input_info, add_kwargs=True)
90 | cwd = Path(f"{__file__}").parent
91 | base_path = f"{cwd}/pmlang_examples"
92 | full_path = f"{base_path}/outputs"
93 | pb_path = f"{full_path}/{graph.name}.srdfg"
94 | pm.pb_store(graph, full_path)
95 | node = pm.pb_load(pb_path)
96 |
97 |
98 | @pytest.mark.parametrize('x_shape, w_shape, params', [
99 | ((1, 1, 8, 8), (3, 1, 3, 3), {"stride": 1, "pad": 0}),
100 | ((1, 1, 4, 4), (2, 1, 2, 2), {"stride": 2, "pad": 1}),
101 | ])
102 | def test_conv_embedded_values(x_shape, w_shape, params):
103 | shape_dict = {"n": x_shape[0], "ic": x_shape[1], "ih": x_shape[2], "iw": x_shape[3],
104 | "nf": w_shape[0], "kh": w_shape[2], "kw": w_shape[3],
105 | "stride": params["stride"], "pad": params["pad"]}
106 | graph, input_info0, out_info, keys = conv(x_shape, w_shape, params, coarse=True, debug_matrix=True)
107 |
108 | ngraph, input_info1, out_info, keys = conv(x_shape, w_shape, params, coarse=False, debug_matrix=True)
109 |
110 | lower_pass = pm.Lower({})
111 | lowered = lower_pass(ngraph)
112 |
113 |
114 | pb_path = f"{OUTPATH}/{graph.name}.srdfg"
115 | pm.pb_store(lowered, OUTPATH)
116 | node = pm.pb_load(pb_path)
117 | assert len(node.nodes) == len(lowered.nodes)
118 | assert list(node.nodes.keys()) == list(lowered.nodes.keys())
119 |
120 |
121 |
--------------------------------------------------------------------------------
/README.txt:
--------------------------------------------------------------------------------
1 | TABLA Language Spec
2 | ===
3 |
4 | Grammar
5 | The grammar has been written for an LL(1) parser to understand. Grammar rules can be found in Tabla.g file.
6 | ### NOTE: In variable declaration, only int literal assignment is allowed (for now) ###
7 | Every statement has to end with a semicolon(;).
8 | Variables have to be declared before being used.
9 |
10 | There are five data types supported in TABLA:
11 | model_input
12 | model_output
13 | model
14 | gradient
15 | iterator
16 | Variables of these data types must be declared. Multiple variables of the same data type can be decalred in the same line, even if they have different dimensions. For example, the following is legal:
17 | model_input i[x], j[y][z];
18 | On the other hand, integer data types are not declared. In other words, if a declared variable does not have a data type declared with it, it is assumbed to be integer data type. For example,
19 | m = 10;
20 | This is a valid statement, even though the variable m does not have a data type associated with it explicitly.
21 | The following code snippet is legal:
22 | m = 15;
23 | model_input x[m];
24 | However, the following is not legal, since n is not declared:
25 | model_output y[n];
26 |
27 | Iterator data type has a special syntax associated with its variables. A variable name is immediately followed by a left bracket, a starting point and an end point, delimited by a colon, and a right bracket. In other words,
28 | (data type) (variable name)(left bracket)(digit or an integer variable)(colon)(digit or an integer variable)(right bracket)
29 | Using a token notation,
30 | ITERATOR ID LEFT_BRACK (ID | INTLIT) COLON (ID | INTLIT) RIGHT_BRACK SEMI
31 | This is because iterator data type serves the same functionality as a for loop. The range of values to be looped is expressed inside the brackets. These are integer values incrementing by 1. Either raw values or variables containing an integer value (or both) can be used for this.
32 | Here are examples of valid iterator declartion:
33 | iterator i[0:10]; // all iterator arguments as integer literals
34 | iterator j[m:n]; // all iterator arguments as integer variables (that should havebeen decalred before this statment)
35 | iterator k[m:10]; // one iterator argument as an integer variable, the other as an integer literal
36 | iterator l[0:n]; // same as before, but the other way around
37 | The following is illegal, since it does not give the range of values to be looped:
38 | iterator x;
39 |
40 | Lexical Rules
41 | Variable names follow the similar rules as the ones in C. The properties are:
42 | a. Variable names start with either an upper case letter or an upper case letter, followed by an arbitrary length of alphanumeric characters including underscore (_), and it can end with a single quote (').
43 | b. Variables can be of any dimension. For example, the following are all legal:
44 | a b[m] c[x][y] d[i][j][k]
45 |
46 | Comments begin with // and the rest of the line is ignored.
47 |
48 | Operator Precedence
49 | The basic operators follow the following precedence from highest to lowest:
50 | 1. (), []
51 | 2. *
52 | 3. +, -
53 | 4. <, >
54 | 5. =
55 |
56 | Functions
57 | Aside from the basic operators, there are two types of operations: group and non linear. In group operations, pi and sum operates on two arguments, whereas norm operates on one argument. However, even though pi and sum operates on two arguments, this is only in a semantic manner. Syntatically, it appears they take in one. In other words, in between the parentheses, pi and sum operators do not require an argument followed by a comma and then another argument, as one would normally expect from other languages. For example, if one would write a sum function in C, it would look like:
58 | sum(2, 3);
59 | However, in TABLA, it would look something like this:
60 | sum[i](x[i] * w[j][i]);
61 | where i and j are iterators. Notice there is no comma (,) inside the parentheses.
62 | Also, since pi and sum are operated group-wise, they require an iterator. This is wrapped inside square brackets, as shown above in the sum operator. Syntatically, sum and pi operators come in the following format:
63 | (SUM | PI) LEFT_BRACK ID RIGHT_BRACK LEFT_PAREN expr RIGHT_PAREN SEMI
64 | whereas the rest of the operators are expressed in the following format:
65 | (NORM | GAUSSIAN | SIGMOID | SIG_SYM | LOG) LEFT_PAREN expr RIGHT_PAREN SEMI
66 |
67 | For now, functions can't be mixed with other basic operators in the same line, but this can be fixed if needed.
68 | The above functions are recognized as language tokens by the parser.
69 |
70 |
71 | Parser Implementation
72 | ===
73 |
74 | Tools used
75 | ANTLR parser generator was used to automatically generate the language parser. ANTLR version 4.5 was used, with Python as the target parser language. Python version used is 2.7.6.
76 |
77 | To generate lexer and parser:
78 | java -cp "/usr/local/lib/antlr-4.5-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 Tabla.g
79 |
80 | To see tokens:
81 | python3 pygrun.py Tabla program --tokens TEST_FILE
82 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/simulator/stats.py:
--------------------------------------------------------------------------------
1 | class Stats(object):
2 | """
3 | Stores the stats from the simulator
4 | """
5 |
6 | def __init__(self):
7 | self.total_cycles = 0
8 | self.mem_stall_cycles = 0
9 | self.namespaces = ['ibuf', 'wbuf', 'obuf', 'bbuf', 'dram']
10 | self.reads = {}
11 | self.writes = {}
12 | for n in self.namespaces:
13 | self.reads[n] = 0
14 | self.writes[n] = 0
15 |
16 | def __iter__(self):
17 | return iter([\
18 | self.total_cycles,
19 | self.mem_stall_cycles,
20 | self.reads['ibuf'],
21 | self.reads['wbuf'],
22 | self.reads['bbuf'],
23 | self.reads['obuf'],
24 | self.reads['dram'],
25 | self.writes['obuf'],
26 | self.writes['dram']
27 | ])
28 |
29 | def __add__(self, other):
30 | ret = Stats()
31 | ret.total_cycles = self.total_cycles + other.total_cycles
32 | ret.mem_stall_cycles = self.mem_stall_cycles + other.mem_stall_cycles
33 | for n in self.namespaces:
34 | ret.reads[n] = self.reads[n] + other.reads[n]
35 | ret.writes[n] = self.writes[n] + other.writes[n]
36 | return ret
37 |
38 | def __mul__(self, other):
39 | ret = Stats()
40 | ret.total_cycles = self.total_cycles * other
41 | ret.mem_stall_cycles = self.mem_stall_cycles * other
42 | for n in self.namespaces:
43 | ret.reads[n] = self.reads[n] * other
44 | ret.writes[n] = self.writes[n] * other
45 | return ret
46 |
47 | def __str__(self):
48 | ret = '\tStats'
49 | ret+= '\n\t{0:>20} : {1:>20,}, '.format('Total cycles', self.total_cycles)
50 | ret+= '\n\t{0:>20} : {1:>20,}, '.format('Memory Stalls', self.mem_stall_cycles)
51 | ret+= '\n\tReads: '
52 | for n in self.namespaces:
53 | ret+= '\n\t{0:>20} rd: {1:>20,} bits, '.format(n, self.reads[n])
54 | ret+= '\n\tWrites: '
55 | for n in self.namespaces:
56 | ret+= '\n\t{0:>20} wr: {1:>20,} bits, '.format(n, self.writes[n])
57 | return ret
58 |
59 | def get_energy(self, energy_cost, dram_cost=6.e-3):
60 | leak_cost, core_dyn_cost, wbuf_read_cost, wbuf_write_cost, ibuf_read_cost, ibuf_write_cost, bbuf_read_cost, bbuf_write_cost, obuf_read_cost, obuf_write_cost = energy_cost
61 | dyn_energy = (self.total_cycles - self.mem_stall_cycles) * core_dyn_cost
62 |
63 | dyn_energy += self.reads['wbuf'] * wbuf_read_cost
64 | dyn_energy += self.writes['wbuf'] * wbuf_write_cost
65 |
66 | dyn_energy += self.reads['ibuf'] * ibuf_read_cost
67 | dyn_energy += self.writes['ibuf'] * ibuf_write_cost
68 |
69 | dyn_energy += self.reads['bbuf'] * bbuf_read_cost
70 | dyn_energy += self.writes['bbuf'] * bbuf_write_cost
71 |
72 | dyn_energy += self.reads['obuf'] * obuf_read_cost
73 | dyn_energy += self.writes['obuf'] * obuf_write_cost
74 |
75 | # Assuming that the DRAM requires 6 pJ/bit
76 | dyn_energy += self.reads['dram'] * dram_cost
77 | dyn_energy += self.writes['dram'] * dram_cost
78 |
79 | # Leakage Energy
80 | leak_energy = self.total_cycles * leak_cost * 0
81 | return dyn_energy + leak_energy
82 |
83 | def get_energy_breakdown(self, energy_cost, dram_cost=6.e-3):
84 | leak_cost, core_dyn_cost, wbuf_read_cost, wbuf_write_cost, ibuf_read_cost, ibuf_write_cost, bbuf_read_cost, bbuf_write_cost, obuf_read_cost, obuf_write_cost = energy_cost
85 | core_energy = (self.total_cycles - self.mem_stall_cycles) * core_dyn_cost
86 | breakdown = [core_energy]
87 |
88 | sram_energy = self.reads['wbuf'] * wbuf_read_cost
89 | sram_energy += self.writes['wbuf'] * wbuf_write_cost
90 |
91 | sram_energy += self.reads['ibuf'] * ibuf_read_cost
92 | sram_energy += self.writes['ibuf'] * ibuf_write_cost
93 |
94 | sram_energy += self.reads['bbuf'] * bbuf_read_cost
95 | sram_energy += self.writes['bbuf'] * bbuf_write_cost
96 |
97 | sram_energy += self.reads['obuf'] * obuf_read_cost
98 | sram_energy += self.writes['obuf'] * obuf_write_cost
99 |
100 | breakdown.append(sram_energy)
101 | breakdown.append(0)
102 | dram_energy = self.reads['dram'] * dram_cost
103 | dram_energy += self.writes['dram'] * dram_cost
104 | breakdown.append(dram_energy)
105 | return breakdown
106 |
107 | def get_energy_from_results(results, acc_obj):
108 | stats = Stats()
109 | stats.total_cycles = int(results['Cycles'])
110 | stats.mem_stall_cycles = int(results['Memory wait cycles'])
111 | stats.reads['ibuf'] = int(results['IBUF Read'])
112 | stats.reads['obuf'] = int(results['OBUF Read'])
113 | stats.reads['wbuf'] = int(results['WBUF Read'])
114 | stats.reads['dram'] = int(results['DRAM Read'])
115 | stats.writes['ibuf'] = int(results['IBUF Write'])
116 | stats.writes['obuf'] = int(results['OBUF Write'])
117 | stats.writes['wbuf'] = int(results['WBUF Write'])
118 | stats.writes['dram'] = int(results['DRAM Write'])
119 | energy = stats.get_energy(acc_obj)
120 | return energy
121 |
122 |
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver_pass.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from dnnweaver2.graph import Graph
3 | from dnnweaver2 import get_tensor
4 | from dnnweaver2.scalar.dtypes import FQDtype, FixedPoint
5 | from dnnweaver2.tensor_ops.cnn import conv2D, add, mul_scalar
6 | import logging
7 | from collections import OrderedDict
8 | import numpy as np
9 |
10 |
11 |
12 | @pm.register_pass
13 | class DNNWeaverPass(pm.Pass):
14 |
15 | def __init__(self, debug=False):
16 | self.dnnw_ir = {'graph': None, 'dnnweaver_code': []}
17 | self.last = None
18 | super(DNNWeaverPass, self).__init__(debug=debug)
19 |
20 | def initialize_pass(self, node, ctx):
21 | self.dnnw_ir['graph'] = Graph(node.name, None, log_level=logging.INFO)
22 | return node
23 |
24 | def apply_pass(self, node, ctx):
25 | if node.op_name in DNNWEAVER_OPS:
26 | name, dnnweaver_op = DNNWEAVER_OPS[node.op_name](node, self.dnnw_ir)
27 | self.dnnw_ir[name] = dnnweaver_op
28 | return node
29 |
30 | def finalize_pass(self, node, ctx):
31 | return node
32 |
33 | # def package_pass(self, node, ctx):
34 | # pass
35 |
36 | def dnnweaver_conv2d(node, ctx):
37 | # Need to add implicit bias here?
38 | w_dtype = FixedPoint(16,14)
39 | w_shape = node.args[1].shape
40 | inp_dtype = get_dnnweaver_var(ctx, node.args[0]).dtype
41 | c_type = FixedPoint(16, 10)
42 | biases = get_tensor(shape=(node.args[1].shape[0]),
43 | name='biases',
44 | dtype=FixedPoint(32, w_dtype.frac_bits + inp_dtype.frac_bits))
45 |
46 | pad = (1, node.args[4], node.args[4], 1)
47 | strides = (1, node.args[3], node.args[3], 1)
48 | with ctx['graph'].as_default():
49 | with ctx['graph'].name_scope(node.name):
50 | inputs = get_dnnweaver_var(ctx, node.args[0])
51 | weights = get_dnnweaver_var(ctx, node.args[1])
52 | weights.shape = convert_conv_shape(weights.shape)
53 | if weights.shape[-1] != inputs.shape[-1]:
54 | inputs.shape = convert_conv_shape(inputs.shape)
55 | return node.name, conv2D(inputs, weights, biases, name=node.name, pad=pad, stride=strides, dtype=c_type)
56 |
57 |
58 |
59 | def dnnweaver_conv2d_bias(node, ctx):
60 | w_dtype = FixedPoint(16,14)
61 | inp_dtype = ctx[node.args[0].name].dtype
62 | c_type = FixedPoint(16, 10)
63 | pad = (1, node.args[5], node.args[5], 1)
64 | strides = (1, node.args[4], node.args[4], 1)
65 | with ctx['graph'].as_default():
66 | with ctx['graph'].name_scope(node.name):
67 | inputs = get_dnnweaver_var(ctx, node.args[0])
68 | weights = get_dnnweaver_var(ctx, node.args[1])
69 | weights.shape = convert_conv_shape(weights.shape)
70 | if weights.shape[-1] != inputs.shape[-1]:
71 | inputs.shape = convert_conv_shape(inputs.shape)
72 |
73 |
74 | biases = get_dnnweaver_var(ctx, node.args[2])
75 | return node.name, conv2D(inputs, weights, biases, name=node.name, pad=pad, stride=strides, dtype=c_type)
76 |
77 | def dnnweaver_add(node, ctx):
78 |
79 | with ctx['graph'].as_default():
80 | with ctx['graph'].name_scope(node.name):
81 | a = get_dnnweaver_var(ctx, node.args[0])
82 | b = get_dnnweaver_var(ctx, node.args[1])
83 | return node.name, add([a, b], name=node.name, out_shape=node.shape, dtype=a.dtype)
84 |
85 | def dnnweaver_mul(node, ctx):
86 |
87 | with ctx['graph'].as_default():
88 | with ctx['graph'].name_scope(node.name):
89 |
90 | a = get_dnnweaver_var(ctx, node.args[0])
91 | b = get_dnnweaver_var(ctx, node.args[1])
92 | return node.name, mul_scalar(a, b, name=node.name, dtype=a.dtype)
93 |
94 | def dnnweaver_var(node, ctx):
95 | if isinstance(node, pm.var_index):
96 | # TODO: Fix var index shape resolution during onnx translation
97 | assert node.var.name in ctx
98 | new_tensor = ctx[node.var.name]
99 | else:
100 | new_tensor = get_tensor(shape=node.shape, name=node.name, dtype=FixedPoint(16, 10))
101 | return node.name, new_tensor
102 |
103 | def get_dnnweaver_var(ctx, node):
104 | if node.name not in ctx:
105 | raise KeyError(f"Unable to find node with in context:\n"
106 | f"\tName: {node.name}\n"
107 | f"\tOp: {node.op_name}")
108 | else:
109 | return ctx[node.name]
110 |
111 | def convert_conv_shape(shape):
112 | lshape = list(shape)
113 | if len(shape) == 3:
114 | return tuple(lshape[1:] + [lshape[0]])
115 | else:
116 | return tuple([lshape[0]] + lshape[2:4] + [lshape[1]])
117 |
118 |
119 | DNNWEAVER_OPS = {
120 | # "avg_pool2d": dnnweaver_avg_pool,
121 | # "max_pool": dnnweaver_max_pool,
122 | # "batch_norm": dnnweaver_batch_norm,
123 | "slice_add": dnnweaver_add,
124 | "slice_mul": dnnweaver_mul,
125 | "conv_bias": dnnweaver_conv2d_bias,
126 | "conv": dnnweaver_conv2d,
127 | "input": dnnweaver_var,
128 | "state": dnnweaver_var,
129 | "var_index": dnnweaver_var,
130 | # "leaky_relu": dnnweaver_leaky_relu,
131 | # "softmax": dnnweaver_softmax,
132 | # "batch_flatten": dnnweaver_batch_flatten,
133 | }
134 |
135 | def _normalize_name(name):
136 | return name.rsplit("/", 1)[-1]
--------------------------------------------------------------------------------
/polymath/pmlang/antlr_generator/graphutils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | logger = logging.getLogger(__name__)
3 |
4 |
5 | def check(expression):
6 |
7 | open_tup = tuple('(')
8 | close_tup = tuple(')')
9 | map = dict(zip(open_tup, close_tup))
10 | queue = []
11 |
12 | for i in expression:
13 | if i in open_tup:
14 | queue.append(map[i])
15 | elif i in close_tup:
16 | if not queue or i != queue.pop():
17 | return False
18 | return True
19 |
20 | def get_text(context):
21 | if context.getText()[0] == '(' and context.getText()[-1] == ')' and check(context.getText()[1:-1]):
22 |
23 | return context.getText()[1:-1]
24 | else:
25 | return context.getText()
26 |
27 | def infer_type(types, inputs, output):
28 |
29 | valid = [('int', 'float'), ('float', 'int')]
30 | type = types[0]
31 | for t in range(len(types)):
32 | if type != types[t]:
33 | if (types[t],type) not in valid:
34 | logging.warning("Error! Type mismatch for {} in {}. {} and {}".format(inputs, output, types[t], type))
35 | elif types[t] == 'float':
36 | type = types[t]
37 |
38 | return type
39 |
40 | def infer_dtype(curr_type: str, new_type: str):
41 | if curr_type not in TYPES_PRECEDENCE.keys():
42 | raise TypeError(f"Invalid type for inference: {curr_type}")
43 |
44 | if new_type not in TYPES_PRECEDENCE.keys():
45 | raise TypeError(f"Invalid type for inference: {new_type}")
46 |
47 | if TYPES_PRECEDENCE[curr_type] < TYPES_PRECEDENCE[new_type]:
48 | return curr_type
49 | else:
50 | return new_type
51 |
52 | def infer_dims(dims, inputs, output):
53 | return dims[0]
54 |
55 | def infer_vtype(vtypes, inputs, output):
56 | vtype_vals = {'scalar': 0,
57 | 'var' : 1,
58 | 'index' : 2}
59 | vtype = 'scalar'
60 | for v in vtypes:
61 | if vtype_vals[v] > vtype_vals[vtype]:
62 | vtype = v
63 | return vtype
64 |
65 | def check_edge_nodes(node_list, nodes, edges,cname):
66 | edge_cats = ['declaration', 'argument', 'literal', 'assign', 'assign_declaration']
67 | node_cats = ['argument', 'component', ]
68 | for id, edge in edges.items():
69 | if id not in node_list and edges[id]['vcat'] not in edge_cats:
70 | logging.warning("{} -- ID: {} not in node list".format(cname, id))
71 | if len(edge['src']) == 0 and edge['vcat'] not in ['argument','literal']:
72 | logging.warning("{} -- ID: {} does not have source node".format(cname, id))
73 | elif len(edge['src']) > 1:
74 | logging.warning("{} -- ID: {} has multiple source node"
75 | ": {} -- {}".format(cname, id, edge['src'], edge['vcat']))
76 |
77 | for node in node_list:
78 | for i in nodes[node]['inputs']:
79 | if len(edges[i]['src']) == 0:
80 | logging.warning("{} -- ID: {} has no sources".format(cname, i))
81 | if node not in edges.keys() and nodes[node]['op_cat'] not in node_cats:
82 | logging.warning("{} -- ID: {} not in edge list".format(cname, node))
83 |
84 |
85 | GROUP_FUNCTIONS = ['argmax', 'argmin', 'min', 'max', 'sum', 'prod']
86 | FUNCTIONS = ['pi', 'e', 'log2', 'log', 'log10', 'floor', 'ceiling', 'sin', 'cos',
87 | 'ln', 'fread', 'fwrite', 'sigmoid']
88 |
89 | FLOAT_FUNCS = ['pi', 'e', 'log2', 'log', 'log10', 'sin', 'cos', 'sigmoid', 'ln']
90 | INT_FUNCS = ['floor', 'ceiling']
91 |
92 | BINARY_OPS = ['+', '-', '*', '/', '^', '<', '>', '==', '<=', '>=', '!=', '%']
93 |
94 | DATATYPE_SPECIFIERS = ['int', 'float', 'str', 'bool', 'complex']
95 |
96 | STRING_FUNCTION_TO_STRING_TYPE = {"pi": 'float',
97 | "log": 'float',
98 | "log2": 'float',
99 | "float": 'float',
100 | "int": 'int',
101 | "bool": 'bool',
102 | "complex": 'complex',
103 | "bin": 'int',
104 | "random": 'float',
105 | "ceiling": 'float',
106 | "floor": 'float',
107 | "e": 'float',
108 | "fread": 'str',
109 | "fwrite": 'str',
110 | "sigmoid" : "float"}
111 |
112 | STRING_TEXT_TO_BINEXP = {"*": "mul",
113 | "/": "div",
114 | "+": "add",
115 | "-": "sub",
116 | "<": "tlt",
117 | ">": "tgt",
118 | "<=": "tlte",
119 | ">=": "tgte",
120 | "==": "teq",
121 | "!=": "tne",
122 | "^": "exp",
123 | "%": "mod"
124 | }
125 |
126 | STRING_TEXT_TO_UNEXP = {"+" : "mov",
127 | "-": "neg"}
128 |
129 | STRING_TEXT_TO_FUNCTION = {"pi": "mov",
130 | "log": "log",
131 | "log2": "log2",
132 | "float": "cast",
133 | "int": "cast",
134 | "bin": "cast",
135 | "ceiling": "ceil",
136 | "floor": "floor",
137 | "e": "mov",
138 | "fread": "fread",
139 | "fwrite": "fwrite",
140 | "sigmoid" : "sigmoid"}
141 | INSTRUCTION_OPS = ["index", "pred_store"]
142 | NODE_OPS = ["src", "sink"]
143 | NON_COMPONENT_NAMES = BINARY_OPS + DATATYPE_SPECIFIERS + GROUP_FUNCTIONS + FUNCTIONS + INSTRUCTION_OPS + NODE_OPS
144 |
145 | TYPES_PRECEDENCE = {"bool": 0, "float": 1, "int" : 2, "str": 3}
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/utils.py:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | from polymath.codegen.codegen_utils import get_func
6 |
7 | from polymath.codegen.dnnweavergen.dnnweaver2.compiler import *
8 | from polymath.codegen.dnnweavergen.dnnweaver2.simulator.accelerator import Accelerator
9 | from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype, FixedPoint
10 | dtype_map = {
11 | "cout" : [12, 8, 10, 10, 11, 12, 12, 11, 11],
12 | "bn" : [8, 8, 9, 10, 10, 11, 9, 12],
13 | }
14 |
15 | dtype_counters = {
16 | "cout" : 0,
17 | "bn": 0
18 | }
19 | def dnnweaver_init_weight(g,scope, *args, **kwargs):
20 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor"
21 |
22 | with g.as_default():
23 | with g.name_scope(scope):
24 | return get_func(fname)(*args, dtype=FixedPoint(16,14), **kwargs)
25 |
26 | def dnnweaver_init_bias(g, scope, *args, **kwargs):
27 |
28 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor"
29 |
30 | with g.as_default():
31 | with g.name_scope(scope):
32 | return get_func(fname)(*args, dtype=FixedPoint(32,22), **kwargs)
33 |
34 | def dnnweaver_init_data(g,scope, *args, **kwargs):
35 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor"
36 | print(f"Init data name: {kwargs['name']}")
37 |
38 | with g.as_default():
39 | with g.name_scope(scope):
40 | return get_func(fname)(*args, dtype=FQDtype.FXP16, **kwargs)
41 |
42 | def dnnweaver_init_scale(g, scope,input_op, *args, **kwargs):
43 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor"
44 | with g.as_default():
45 | with g.name_scope(scope):
46 | with g.name_scope(input_op):
47 | return get_func(fname)(*args, dtype=FixedPoint(16,9), **kwargs)
48 |
49 |
50 | def dnnweaver_init_mean(g, scope,input_op, *args, **kwargs):
51 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor"
52 | with g.as_default():
53 | with g.name_scope(scope):
54 | with g.name_scope(input_op):
55 | return get_func(fname)(*args, dtype=FixedPoint(16,9), **kwargs)
56 |
57 | def dnnweaver_context(g,scope, fname, *args, **kwargs):
58 |
59 | with g.as_default():
60 | with g.name_scope(scope):
61 | return get_func(fname)(*args, **kwargs)
62 |
63 |
64 | def dnnweaver_conv2d(g,scope, *args, **kwargs):
65 |
66 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.conv2D"
67 | with g.as_default():
68 | with g.name_scope(scope):
69 | dtype_counters['cout'] += 1
70 | return get_func(fname)(*args, dtype=FixedPoint(16, dtype_map['cout'][dtype_counters['cout'] - 1]), **kwargs)
71 |
72 | def dnnweaver_batch_norm(g,scope, *args, **kwargs):
73 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.batch_norm"
74 |
75 | with g.as_default():
76 | with g.name_scope(scope):
77 | with g.name_scope('batch_norm'):
78 | dtype_counters['bn'] +=1
79 | return get_func(fname)(*args, dtype=FixedPoint(16, dtype_map['bn'][dtype_counters['bn'] - 1]),**kwargs)
80 |
81 | def dnnweaver_max_pool(g,scope, *args, **kwargs):
82 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.maxPool"
83 | if 'pad' in kwargs.keys():
84 | kwargs['pad'] = ((0,0), (0, kwargs['pad'][0]), (0, kwargs['pad'][0]), (0,0))
85 | with g.as_default():
86 | with g.name_scope(scope):
87 | return get_func(fname)(*args, **kwargs)
88 |
89 | def dnnweaver_leaky_relu(g,scope, *args, **kwargs):
90 | fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.leakyReLU"
91 |
92 | with g.as_default():
93 | with g.name_scope(scope):
94 | return get_func(fname)(*args, **kwargs)
95 |
96 | def dnnweaver_nyi(orig_func, *args, **kwargs):
97 | logging.error(f"Function {orig_func} has not yet been implemented. Exiting")
98 | exit(1)
99 |
100 |
101 |
102 | def execute_graph(g):
103 | num_rows = 32
104 | num_cols = 32
105 | bram = {
106 | 'ibuf': num_cols * 16 * 2048 / 2,
107 | 'obuf': num_rows * 64 * 2048 / 2,
108 | 'wbuf': num_cols * num_rows * 16 * 512 / 2,
109 | 'bbuf': num_rows * 32 * 2048 / 2,
110 | }
111 | acc_obj = Accelerator(
112 | N=num_rows, M=num_cols,
113 | prec=16,
114 | mem_if_width=256,
115 | frequency=150e6,
116 | sram=bram
117 | )
118 |
119 | print(acc_obj.__str__())
120 |
121 | log_level = logging.INFO
122 | compiler = GraphCompiler(log_level=log_level)
123 | inst_binary = compiler.compile(graph=g, acc_obj=acc_obj)
124 |
125 | print('Number of instructions: {}'.format(inst_binary.size))
126 |
127 | def show_ops_tensors(graph):
128 | print('*' * 50)
129 | print('List of ops (nodes) in the graph_name')
130 | # print the ops in the yolo2_graph
131 | for op in graph.op_registry:
132 | print('\tOp name: {}'.format(op))
133 | print('*' * 50)
134 |
135 | print('*' * 50)
136 | print('List of tensors (edges) in the graph_name')
137 | # print the tensors in the yolo2_graph
138 | for key in graph.tensor_registry.keys():
139 | print('\t{}'.format(graph.tensor_registry[key]))
140 | print('*' * 50)
141 |
142 | def generate_instructions(g):
143 | num_rows = 32
144 | num_cols = 32
145 | bram = {
146 | 'ibuf': num_cols * 16 * 2048 / 2,
147 | 'obuf': num_rows * 64 * 2048 / 2,
148 | 'wbuf': num_cols * num_rows * 16 * 512 / 2,
149 | 'bbuf': num_rows * 32 * 2048 / 2,
150 | }
151 | acc_obj = Accelerator(
152 | N=num_rows, M=num_cols,
153 | prec=16,
154 | mem_if_width=256,
155 | frequency=150e6,
156 | sram=bram
157 | )
158 |
159 | print(acc_obj.__str__())
--------------------------------------------------------------------------------
/tests/tabla_examples/linear_3.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "children": [
4 | 2,
5 | 3,
6 | 4,
7 | 5,
8 | 6,
9 | 7,
10 | 8,
11 | 9
12 | ],
13 | "dataType": null,
14 | "dist2sink": 9,
15 | "id": 0,
16 | "operation": "source",
17 | "parents": []
18 | },
19 | {
20 | "children": [],
21 | "dataType": null,
22 | "dist2sink": 0,
23 | "id": 1,
24 | "operation": "sink",
25 | "parents": [
26 | 24,
27 | 22,
28 | 20
29 | ]
30 | },
31 | {
32 | "children": [
33 | 19,
34 | 21,
35 | 23
36 | ],
37 | "dataType": "constant",
38 | "dist2sink": 3,
39 | "id": 2,
40 | "operation": "mu",
41 | "parents": [
42 | 0
43 | ]
44 | },
45 | {
46 | "children": [
47 | 10,
48 | 16
49 | ],
50 | "dataType": "model_input",
51 | "dist2sink": 8,
52 | "id": 3,
53 | "operation": "x[0]",
54 | "parents": [
55 | 0
56 | ]
57 | },
58 | {
59 | "children": [
60 | 11,
61 | 17
62 | ],
63 | "dataType": "model_input",
64 | "dist2sink": 8,
65 | "id": 4,
66 | "operation": "x[1]",
67 | "parents": [
68 | 0
69 | ]
70 | },
71 | {
72 | "children": [
73 | 12,
74 | 18
75 | ],
76 | "dataType": "model_input",
77 | "dist2sink": 7,
78 | "id": 5,
79 | "operation": "x[2]",
80 | "parents": [
81 | 0
82 | ]
83 | },
84 | {
85 | "children": [
86 | 15
87 | ],
88 | "dataType": "model_output",
89 | "dist2sink": 5,
90 | "id": 6,
91 | "operation": "y",
92 | "parents": [
93 | 0
94 | ]
95 | },
96 | {
97 | "children": [
98 | 10,
99 | 20
100 | ],
101 | "dataType": "model",
102 | "dist2sink": 8,
103 | "id": 7,
104 | "operation": "w[0]",
105 | "parents": [
106 | 0
107 | ]
108 | },
109 | {
110 | "children": [
111 | 11,
112 | 22
113 | ],
114 | "dataType": "model",
115 | "dist2sink": 8,
116 | "id": 8,
117 | "operation": "w[1]",
118 | "parents": [
119 | 0
120 | ]
121 | },
122 | {
123 | "children": [
124 | 12,
125 | 24
126 | ],
127 | "dataType": "model",
128 | "dist2sink": 7,
129 | "id": 9,
130 | "operation": "w[2]",
131 | "parents": [
132 | 0
133 | ]
134 | },
135 | {
136 | "children": [
137 | 13
138 | ],
139 | "dataType": null,
140 | "dist2sink": 7,
141 | "id": 10,
142 | "operation": "*",
143 | "parents": [
144 | 7,
145 | 3
146 | ]
147 | },
148 | {
149 | "children": [
150 | 13
151 | ],
152 | "dataType": null,
153 | "dist2sink": 7,
154 | "id": 11,
155 | "operation": "*",
156 | "parents": [
157 | 8,
158 | 4
159 | ]
160 | },
161 | {
162 | "children": [
163 | 14
164 | ],
165 | "dataType": null,
166 | "dist2sink": 6,
167 | "id": 12,
168 | "operation": "*",
169 | "parents": [
170 | 9,
171 | 5
172 | ]
173 | },
174 | {
175 | "children": [
176 | 14
177 | ],
178 | "dataType": null,
179 | "dist2sink": 6,
180 | "id": 13,
181 | "operation": "+",
182 | "parents": [
183 | 10,
184 | 11
185 | ]
186 | },
187 | {
188 | "children": [
189 | 15
190 | ],
191 | "dataType": null,
192 | "dist2sink": 5,
193 | "id": 14,
194 | "operation": "+",
195 | "parents": [
196 | 13,
197 | 12
198 | ]
199 | },
200 | {
201 | "children": [
202 | 16,
203 | 17,
204 | 18
205 | ],
206 | "dataType": null,
207 | "dist2sink": 4,
208 | "id": 15,
209 | "operation": "-",
210 | "parents": [
211 | 14,
212 | 6
213 | ]
214 | },
215 | {
216 | "children": [
217 | 19
218 | ],
219 | "dataType": null,
220 | "dist2sink": 3,
221 | "id": 16,
222 | "operation": "*",
223 | "parents": [
224 | 15,
225 | 3
226 | ]
227 | },
228 | {
229 | "children": [
230 | 21
231 | ],
232 | "dataType": null,
233 | "dist2sink": 3,
234 | "id": 17,
235 | "operation": "*",
236 | "parents": [
237 | 15,
238 | 4
239 | ]
240 | },
241 | {
242 | "children": [
243 | 23
244 | ],
245 | "dataType": null,
246 | "dist2sink": 3,
247 | "id": 18,
248 | "operation": "*",
249 | "parents": [
250 | 15,
251 | 5
252 | ]
253 | },
254 | {
255 | "children": [
256 | 20
257 | ],
258 | "dataType": null,
259 | "dist2sink": 2,
260 | "id": 19,
261 | "operation": "*",
262 | "parents": [
263 | 2,
264 | 16
265 | ]
266 | },
267 | {
268 | "children": [
269 | 1
270 | ],
271 | "dataType": "model",
272 | "dist2sink": 1,
273 | "id": 20,
274 | "operation": "-",
275 | "parents": [
276 | 7,
277 | 19
278 | ]
279 | },
280 | {
281 | "children": [
282 | 22
283 | ],
284 | "dataType": null,
285 | "dist2sink": 2,
286 | "id": 21,
287 | "operation": "*",
288 | "parents": [
289 | 2,
290 | 17
291 | ]
292 | },
293 | {
294 | "children": [
295 | 1
296 | ],
297 | "dataType": "model",
298 | "dist2sink": 1,
299 | "id": 22,
300 | "operation": "-",
301 | "parents": [
302 | 8,
303 | 21
304 | ]
305 | },
306 | {
307 | "children": [
308 | 24
309 | ],
310 | "dataType": null,
311 | "dist2sink": 2,
312 | "id": 23,
313 | "operation": "*",
314 | "parents": [
315 | 2,
316 | 18
317 | ]
318 | },
319 | {
320 | "children": [
321 | 1
322 | ],
323 | "dataType": "model",
324 | "dist2sink": 1,
325 | "id": 24,
326 | "operation": "-",
327 | "parents": [
328 | 9,
329 | 23
330 | ]
331 | }
332 | ]
--------------------------------------------------------------------------------
/tests/old/test_load_store_mgdfg.py:
--------------------------------------------------------------------------------
1 | # from polymath.pmlang.antlr_generator.parser import FileStream, CommonTokenStream, PMLangParser, ParseTreeWalker
2 | # from polymath.pmlang.antlr_generator.lexer import PMLangLexer
3 | # from polymath.pmlang.symbols import PMLangListener
4 | #
5 | # import os
6 | #
7 | #
8 | # def test_linear_store():
9 | # file = "linear.pm"
10 | # base_path = f"./pmlang_examples"
11 | # full_path = f"./pmlang_examples/{file}"
12 | #
13 | # input_file = FileStream(full_path)
14 | # lexer = PMLangLexer(input_file)
15 | # stream = CommonTokenStream(lexer)
16 | # parser = PMLangParser(stream)
17 | # tree = parser.pmlang()
18 | # output_dir, output_file = os.path.split(file)
19 | # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
20 | # pmlang_graph = PMLangListener(full_path)
21 | # walker = ParseTreeWalker()
22 | # walker.walk(pmlang_graph, tree)
23 | # ext_full_path = os.path.abspath(base_path) + "/outputs"
24 | #
25 | # # def test_yolo():
26 | # # file = "yolodnn.pm"
27 | # # base_path = f"./pmlang_examples"
28 | # # full_path = f"./pmlang_examples/{file}"
29 | # # input_file = FileStream(full_path)
30 | # # lexer = PMLangLexer(input_file)
31 | # # stream = CommonTokenStream(lexer)
32 | # # parser = PMLangParser(stream)
33 | # # tree = parser.pmlang()
34 | # # output_dir, output_file = os.path.split(file)
35 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
36 | # # pmlang_graph = PMLangListener(full_path)
37 | # # walker = ParseTreeWalker()
38 | # # walker.walk(pmlang_graph, tree)
39 | #
40 | # # def test_backprop():
41 | # # file = "backpropagation.pm"
42 | # # base_path = f"./pmlang_examples"
43 | # # full_path = f"./pmlang_examples/{file}"
44 | # # input_file = FileStream(full_path)
45 | # # lexer = PMLangLexer(input_file)
46 | # # stream = CommonTokenStream(lexer)
47 | # # parser = PMLangParser(stream)
48 | # # tree = parser.pmlang()
49 | # # output_dir, output_file = os.path.split(file)
50 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
51 | # # pmlang_graph = PMLangListener(full_path)
52 | # # walker = ParseTreeWalker()
53 | # # walker.walk(pmlang_graph, tree)
54 | # # ext_full_path = os.path.abspath(base_path) + "/outputs"
55 | #
56 | #
57 | #
58 | #
59 | #
60 | #
61 | #
62 | # # def test_logistic():
63 | # # file = "logistic.pm"
64 | # # base_path = f"./pmlang_examples"
65 | # # full_path = f"./pmlang_examples/{file}"
66 | # # input_file = FileStream(full_path)
67 | # # lexer = PMLangLexer(input_file)
68 | # # stream = CommonTokenStream(lexer)
69 | # # parser = PMLangParser(stream)
70 | # # tree = parser.pmlang()
71 | # # output_dir, output_file = os.path.split(file)
72 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
73 | # # pmlang_graph = PMLangListener(full_path)
74 | # # walker = ParseTreeWalker()
75 | # # walker.walk(pmlang_graph, tree)
76 | # # # load_store.save_program(pmlang_graph.program, output_mgdfg)
77 | # #
78 | # # def test_recommender():
79 | # # file = "recommender.pm"
80 | # # base_path = f"./pmlang_examples"
81 | # # full_path = f"./pmlang_examples/{file}"
82 | # # input_file = FileStream(full_path)
83 | # # lexer = PMLangLexer(input_file)
84 | # # stream = CommonTokenStream(lexer)
85 | # # parser = PMLangParser(stream)
86 | # # tree = parser.pmlang()
87 | # # output_dir, output_file = os.path.split(file)
88 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
89 | # # pmlang_graph = PMLangListener(full_path)
90 | # # walker = ParseTreeWalker()
91 | # # walker.walk(pmlang_graph, tree)
92 | # # ext_full_path = os.path.abspath(base_path) + "/outputs"
93 | # # visualize_component(pmlang_graph.components["rec_model"], ext_full_path)
94 | # # # load_store.save_program(pmlang_graph.program, output_mgdfg)
95 | # #
96 | # # def test_lenet():
97 | # # file = "lenet.pm"
98 | # # base_path = f"./pmlang_examples"
99 | # # full_path = f"./pmlang_examples/{file}"
100 | # # input_file = FileStream(full_path)
101 | # # lexer = PMLangLexer(input_file)
102 | # # stream = CommonTokenStream(lexer)
103 | # # parser = PMLangParser(stream)
104 | # # tree = parser.pmlang()
105 | # # output_dir, output_file = os.path.split(file)
106 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
107 | # # pmlang_graph = PMLangListener(full_path)
108 | # # walker = ParseTreeWalker()
109 | # # walker.walk(pmlang_graph, tree)
110 | # # ext_full_path = os.path.abspath(base_path) + "/outputs"
111 | # # visualize_component(pmlang_graph.components["main"], ext_full_path)
112 | # # # load_store.save_program(pmlang_graph.program, output_mgdfg)
113 | # #
114 | # # def test_yolo():
115 | # # file = "yolodnn.pm"
116 | # # base_path = f"./pmlang_examples"
117 | # # full_path = f"./pmlang_examples/{file}"
118 | # # input_file = FileStream(full_path)
119 | # # lexer = PMLangLexer(input_file)
120 | # # stream = CommonTokenStream(lexer)
121 | # # parser = PMLangParser(stream)
122 | # # tree = parser.pmlang()
123 | # # output_dir, output_file = os.path.split(file)
124 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
125 | # # pmlang_graph = PMLangListener(full_path)
126 | # # walker = ParseTreeWalker()
127 | # # walker.walk(pmlang_graph, tree)
128 | # #
129 | # # def test_resnet():
130 | # # file = "resnet18.pm"
131 | # # base_path = f"./pmlang_examples"
132 | # # full_path = f"./pmlang_examples/{file}"
133 | # # input_file = FileStream(full_path)
134 | # # lexer = PMLangLexer(input_file)
135 | # # stream = CommonTokenStream(lexer)
136 | # # parser = PMLangParser(stream)
137 | # # tree = parser.pmlang()
138 | # # output_dir, output_file = os.path.split(file)
139 | # # output_mgdfg = f"{base_path}/outputs/{output_file[:-3]}.pb"
140 | # # pmlang_graph = PMLangListener(full_path)
141 | # # walker = ParseTreeWalker()
142 | # # walker.walk(pmlang_graph, tree)
143 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import sys
4 | sys.path.insert(0, os.path.abspath('..'))
5 |
6 |
7 | with open('../description.json') as fp:
8 | kwargs = json.load(fp)
9 |
10 |
11 | # -- General configuration ------------------------------------------------
12 |
13 | # If your documentation needs a minimal Sphinx version, state it here.
14 | #
15 | # needs_sphinx = '1.0'
16 |
17 | # Add any Sphinx extension module names here, as strings. They can be
18 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
19 | # ones.
20 | extensions = [
21 | 'sphinx.ext.autodoc',
22 | 'sphinx.ext.doctest',
23 | 'sphinx.ext.coverage',
24 | 'sphinx.ext.viewcode',
25 | 'sphinx.ext.githubpages',
26 | 'sphinx.ext.autosummary',
27 | 'sphinx.ext.napoleon',
28 | 'sphinx.ext.intersphinx',
29 | 'matplotlib.sphinxext.plot_directive',
30 | ]
31 |
32 | # Add any paths that contain templates here, relative to this directory.
33 | templates_path = []
34 |
35 | # The suffix(es) of source filenames.
36 | # You can specify multiple suffix as a list of string:
37 | #
38 | # source_suffix = ['.rst', '.md']
39 | source_suffix = '.rst'
40 |
41 | # The master toctree document.
42 | master_doc = 'index'
43 |
44 | # General information about the project.
45 | project = kwargs['name']
46 | author = kwargs['author']
47 |
48 | # The version info for the project you're documenting, acts as replacement for
49 | # |version| and |release|, also used in various other places throughout the
50 | # built documents.
51 | #
52 | # The short X.Y version.
53 | version = kwargs['version']
54 | # The full version, including alpha/beta/rc tags.
55 | release = kwargs['version']
56 |
57 | # The language for content autogenerated by Sphinx. Refer to documentation
58 | # for a list of supported languages.
59 | #
60 | # This is also used if you do content translation via gettext catalogs.
61 | # Usually you set "language" from the command line for these cases.
62 | language = None
63 |
64 | # List of patterns, relative to source directory, that match files and
65 | # directories to ignore when looking for source files.
66 | # This patterns also effect to html_static_path and html_extra_path
67 | exclude_patterns = []
68 |
69 | # The name of the Pygments (syntax highlighting) style to use.
70 | pygments_style = 'sphinx'
71 |
72 | # If true, `todo` and `todoList` produce output, else they produce nothing.
73 | todo_include_todos = False
74 |
75 |
76 | # -- Options for HTML output ----------------------------------------------
77 |
78 | # The theme to use for HTML and HTML Help pages. See the documentation for
79 | # a list of builtin themes.
80 | #
81 | html_theme = 'sphinx_rtd_theme'
82 |
83 | # Theme options are theme-specific and customize the look and feel of a theme
84 | # further. For a list of options available for each theme, see the
85 | # documentation.
86 | #
87 | # html_theme_options = {}
88 |
89 | # Add any paths that contain custom static files (such as style sheets) here,
90 | # relative to this directory. They are copied after the builtin static files,
91 | # so a file named "default.css" will overwrite the builtin "default.css".
92 | html_static_path = []
93 |
94 | # Custom sidebar templates, must be a dictionary that maps document names
95 | # to template names.
96 | #
97 | # This is required for the alabaster theme
98 | # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
99 | html_sidebars = {
100 | '**': [
101 | 'about.html',
102 | 'navigation.html',
103 | 'relations.html', # needs 'show_related': True theme option to display
104 | 'searchbox.html',
105 | 'donate.html',
106 | ]
107 | }
108 |
109 |
110 | # -- Options for HTMLHelp output ------------------------------------------
111 |
112 | # Output file base name for HTML help builder.
113 | htmlhelp_basename = 'polymathdocs'
114 |
115 |
116 | # -- Options for LaTeX output ---------------------------------------------
117 |
118 | latex_elements = {
119 | # The paper size ('letterpaper' or 'a4paper').
120 | #
121 | # 'papersize': 'letterpaper',
122 |
123 | # The font size ('10pt', '11pt' or '12pt').
124 | #
125 | # 'pointsize': '10pt',
126 |
127 | # Additional stuff for the LaTeX preamble.
128 | #
129 | # 'preamble': '',
130 |
131 | # Latex figure (float) alignment
132 | #
133 | # 'figure_align': 'htbp',
134 | }
135 |
136 | # Grouping the document tree into LaTeX files. List of tuples
137 | # (source start file, target name, title,
138 | # author, documentclass [howto, manual, or own class]).
139 | latex_documents = [
140 | (master_doc, 'polymath.tex', 'PolyMath Documentation',
141 | 'Sean Kinzer', 'manual'),
142 | ]
143 |
144 |
145 | # -- Options for manual page output ---------------------------------------
146 |
147 | # One entry per manual page. List of tuples
148 | # (source start file, name, description, authors, manual section).
149 | man_pages = [
150 | (master_doc, 'polymath', 'PolyMath Documentation',
151 | [author], 1)
152 | ]
153 |
154 |
155 | # -- Options for Texinfo output -------------------------------------------
156 |
157 | # Grouping the document tree into Texinfo files. List of tuples
158 | # (source start file, target name, title, author,
159 | # dir menu entry, description, category)
160 | texinfo_documents = [
161 | (master_doc, 'polymath', 'PolyMath Documentation',
162 | author, 'polymath', 'One line description of project.',
163 | 'Miscellaneous'),
164 | ]
165 |
166 | # Disable class members (https://github.com/phn/pytpm/issues/3#issuecomment-12133978)
167 | numpydoc_show_class_members = False
168 |
169 | # Use Numpy style
170 | napoleon_google_docstring = False
171 | napoleon_numpy_docstring = True
172 |
173 | # Ignore some warnings (https://stackoverflow.com/a/30624034/1150961)
174 | nitpick_ignore = [
175 | ('py:class', 'iterable'),
176 | ('py:class', 'None'),
177 | ('py:class', 'callable'),
178 | ('py:class', 'Any'),
179 | ('py:class', 'indexed.IndexedOrderedDict'),
180 | ]
181 |
182 | intersphinx_mapping = {'python': ('https://docs.python.org/3.7', None)}
183 | autodoc_member_order = 'bysource'
--------------------------------------------------------------------------------
/polymath/codegen/dnnweavergen/dnnweaver2/tf_utils/helper.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 |
4 | #----- Different NN layers and functions to requied make a DNN -----#
5 | def conv_simple(input, kernel, bias, s_h, s_w, padding='SAME', relu=True):
6 | x = tf.nn.conv2d(input, kernel, strides=[1, s_h, s_w, 1], padding=padding)
7 | x = tf.nn.bias_add(x, bias)
8 | if relu:
9 | x = tf.nn.relu(x)
10 | return x
11 |
12 | def conv(input, kernel, biases, s_h, s_w, relu=True, padding='SAME', group=1):
13 | convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
14 | if group == 1:
15 | output = convolve(input, kernel)
16 | else:
17 | input_groups = tf.split(input, group, 3)
18 | kernel_groups = tf.split(kernel, group, 3)
19 | output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
20 | output = tf.concat(output_groups, 3)
21 | output = tf.nn.bias_add(output, biases)
22 | if relu:
23 | output = tf.nn.relu(output)
24 | return output
25 |
26 | def max_pool(input, k_h, k_w, s_h, s_w, padding='SAME'):
27 | return tf.nn.max_pool(input, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
28 |
29 | def softmax(input):
30 | input_shape = map(lambda v: v.value, input.get_shape())
31 | if len(input_shape) > 2:
32 | if input_shape[1] == 1 and input_shape[2] == 1:
33 | input = tf.squeeze(input, squeeze_dims=[1, 2])
34 | else:
35 | raise ValueError('Rank 2 tensor input expected for softmax!')
36 | return tf.nn.softmax(input)
37 |
38 | def lrn(input, radius, alpha, beta, bias=1.0):
39 | return tf.nn.local_response_normalization(input, depth_radius=radius, alpha=alpha, beta=beta, bias=bias)
40 |
41 | def fc(input, weights, biases, relu=True):
42 | input_shape = input.get_shape()
43 | if input_shape.ndims == 4:
44 | dim = 1
45 | for d in input_shape[1:].as_list():
46 | dim *= d
47 | feed_in = tf.reshape(input, [-1, dim])
48 | else:
49 | feed_in, dim = (input, input_shape[-1].value)
50 | op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
51 | return op(feed_in, weights, biases)
52 |
53 |
54 | #----- Classes and methods required to get network data specification, e.g., batch size, crop size, etc. -----#
55 | class DataSpec(object):
56 | def __init__(self, batch_size, scale_size, crop_size, isotropic, channels=3, mean=None, bgr=True):
57 | self.batch_size = batch_size
58 | self.scale_size = scale_size
59 | self.isotropic = isotropic
60 | self.crop_size = crop_size
61 | self.channels = channels
62 | self.mean = mean if mean is not None else np.array([104., 117., 124.])
63 | self.expects_bgr = True
64 |
65 | def alexnet_spec(batch_size=20):
66 | return DataSpec(batch_size=batch_size, scale_size=256, crop_size=227, isotropic=False)
67 |
68 | def lenet_spec(batch_size=1):
69 | return DataSpec(batch_size=batch_size, scale_size=28, crop_size=28, isotropic=False, channels=1)
70 |
71 | def std_spec(batch_size, isotropic=True):
72 | return DataSpec(batch_size=batch_size, scale_size=256, crop_size=224, isotropic=isotropic)
73 |
74 | MODEL_DATA_SPECS = {
75 | 'AlexNet': alexnet_spec(),
76 | 'SqueezeNet': alexnet_spec(),
77 | 'CaffeNet': alexnet_spec(),
78 | 'GoogleNet': std_spec(batch_size=20, isotropic=False),
79 | 'ResNet50': std_spec(batch_size=25),
80 | 'ResNet101': std_spec(batch_size=25),
81 | 'ResNet152': std_spec(batch_size=25),
82 | 'NiN': std_spec(batch_size=20),
83 | 'VGG16': std_spec(batch_size=1),
84 | 'LeNet': lenet_spec()
85 | }
86 |
87 | def get_data_spec(model_class):
88 | return MODEL_DATA_SPECS[model_class]
89 |
90 | #----- Methods required to load a trained network ckpt file and return W/B/Names -----#
91 | #----- These already work for ckpt converted from caffe, not necessarily for tf saved ones -----#
92 |
93 | #Retrieve W/B/Names as dictionaries of np arrays; example usecase: weights['conv1']
94 | def load_netparams(ckpt_path):
95 | data_dict = np.load(ckpt_path).item()
96 | weights = {}
97 | biases = {}
98 | layer_names = []
99 | for op_name in data_dict:
100 | layer_names.append(op_name)
101 | for param_name, data in data_dict[op_name].iteritems():
102 | if param_name == 'weights':
103 | weights[op_name] = data
104 | elif param_name == 'biases':
105 | biases[op_name] = data
106 | assert (param_name != 'weights' or param_name != 'biases')
107 | return weights, biases, layer_names
108 |
109 | #Retrieve W/B/Names as dictionaries of tensorflow variables
110 | def load_netparams_tf(ckpt_path, trainable=False):
111 | data_dict = np.load(ckpt_path).item()
112 | weights = {}
113 | biases = {}
114 | layer_names = []
115 | for op_name in data_dict:
116 | layer_names.append(op_name)
117 | with tf.variable_scope(op_name):
118 | for param_name, data in data_dict[op_name].iteritems():
119 | if param_name == 'weights':
120 | weights[op_name] = tf.get_variable(name=param_name, initializer=tf.constant(data), trainable=trainable)
121 | elif param_name == 'biases':
122 | biases[op_name] = tf.get_variable(name=param_name, initializer=tf.constant(data), trainable=trainable)
123 | assert (param_name != 'weights' or param_name != 'biases')
124 | return weights, biases, layer_names
125 |
126 | #Simple example of quantizing the network parameters
127 | def load_netparams_tf_quantize(ckpt_path, trainable=False):
128 | data_dict = np.load(ckpt_path).item()
129 | weights = {}
130 | biases = {}
131 | layer_names = []
132 | for op_name in data_dict:
133 | layer_names.append(op_name)
134 | with tf.variable_scope(op_name):
135 | for param_name, data_temp in data_dict[op_name].iteritems():
136 | #data = data_temp if op_name == 'conv1' else ((np.array(data_temp * 126, int)).astype(np.float32)) / 126
137 | data = ((np.array(data_temp * 256, int)).astype(np.float32)) / 256
138 | if param_name == 'weights':
139 | weights[op_name] = tf.get_variable(name=param_name, initializer=tf.constant(data), trainable=trainable)
140 | elif param_name == 'biases':
141 | biases[op_name] = tf.get_variable(name=param_name, initializer=tf.constant(data), trainable=trainable)
142 | assert (param_name != 'weights' or param_name != 'biases')
143 | return weights, biases, layer_names
144 |
145 |
--------------------------------------------------------------------------------
/tests/tabla_examples/logistic_3.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "children": [
4 | 2,
5 | 3,
6 | 4,
7 | 5,
8 | 6,
9 | 7,
10 | 8,
11 | 9
12 | ],
13 | "dataType": null,
14 | "dist2sink": 10,
15 | "id": 0,
16 | "operation": "source",
17 | "parents": []
18 | },
19 | {
20 | "children": [],
21 | "dataType": null,
22 | "dist2sink": 0,
23 | "id": 1,
24 | "operation": "sink",
25 | "parents": [
26 | 25,
27 | 23,
28 | 21
29 | ]
30 | },
31 | {
32 | "children": [
33 | 20,
34 | 22,
35 | 24
36 | ],
37 | "dataType": "constant",
38 | "dist2sink": 3,
39 | "id": 2,
40 | "operation": "mu",
41 | "parents": [
42 | 0
43 | ]
44 | },
45 | {
46 | "children": [
47 | 10,
48 | 17
49 | ],
50 | "dataType": "model_input",
51 | "dist2sink": 9,
52 | "id": 3,
53 | "operation": "x[0]",
54 | "parents": [
55 | 0
56 | ]
57 | },
58 | {
59 | "children": [
60 | 11,
61 | 18
62 | ],
63 | "dataType": "model_input",
64 | "dist2sink": 9,
65 | "id": 4,
66 | "operation": "x[1]",
67 | "parents": [
68 | 0
69 | ]
70 | },
71 | {
72 | "children": [
73 | 12,
74 | 19
75 | ],
76 | "dataType": "model_input",
77 | "dist2sink": 8,
78 | "id": 5,
79 | "operation": "x[2]",
80 | "parents": [
81 | 0
82 | ]
83 | },
84 | {
85 | "children": [
86 | 16
87 | ],
88 | "dataType": "model_output",
89 | "dist2sink": 5,
90 | "id": 6,
91 | "operation": "y",
92 | "parents": [
93 | 0
94 | ]
95 | },
96 | {
97 | "children": [
98 | 10,
99 | 21
100 | ],
101 | "dataType": "model",
102 | "dist2sink": 9,
103 | "id": 7,
104 | "operation": "w[0]",
105 | "parents": [
106 | 0
107 | ]
108 | },
109 | {
110 | "children": [
111 | 11,
112 | 23
113 | ],
114 | "dataType": "model",
115 | "dist2sink": 9,
116 | "id": 8,
117 | "operation": "w[1]",
118 | "parents": [
119 | 0
120 | ]
121 | },
122 | {
123 | "children": [
124 | 12,
125 | 25
126 | ],
127 | "dataType": "model",
128 | "dist2sink": 8,
129 | "id": 9,
130 | "operation": "w[2]",
131 | "parents": [
132 | 0
133 | ]
134 | },
135 | {
136 | "children": [
137 | 13
138 | ],
139 | "dataType": null,
140 | "dist2sink": 8,
141 | "id": 10,
142 | "operation": "*",
143 | "parents": [
144 | 7,
145 | 3
146 | ]
147 | },
148 | {
149 | "children": [
150 | 13
151 | ],
152 | "dataType": null,
153 | "dist2sink": 8,
154 | "id": 11,
155 | "operation": "*",
156 | "parents": [
157 | 8,
158 | 4
159 | ]
160 | },
161 | {
162 | "children": [
163 | 14
164 | ],
165 | "dataType": null,
166 | "dist2sink": 7,
167 | "id": 12,
168 | "operation": "*",
169 | "parents": [
170 | 9,
171 | 5
172 | ]
173 | },
174 | {
175 | "children": [
176 | 14
177 | ],
178 | "dataType": null,
179 | "dist2sink": 7,
180 | "id": 13,
181 | "operation": "+",
182 | "parents": [
183 | 10,
184 | 11
185 | ]
186 | },
187 | {
188 | "children": [
189 | 15
190 | ],
191 | "dataType": null,
192 | "dist2sink": 6,
193 | "id": 14,
194 | "operation": "+",
195 | "parents": [
196 | 13,
197 | 12
198 | ]
199 | },
200 | {
201 | "children": [
202 | 16
203 | ],
204 | "dataType": null,
205 | "dist2sink": 5,
206 | "id": 15,
207 | "operation": "sigmoid",
208 | "parents": [
209 | 14
210 | ]
211 | },
212 | {
213 | "children": [
214 | 17,
215 | 18,
216 | 19
217 | ],
218 | "dataType": null,
219 | "dist2sink": 4,
220 | "id": 16,
221 | "operation": "-",
222 | "parents": [
223 | 15,
224 | 6
225 | ]
226 | },
227 | {
228 | "children": [
229 | 20
230 | ],
231 | "dataType": null,
232 | "dist2sink": 3,
233 | "id": 17,
234 | "operation": "*",
235 | "parents": [
236 | 16,
237 | 3
238 | ]
239 | },
240 | {
241 | "children": [
242 | 22
243 | ],
244 | "dataType": null,
245 | "dist2sink": 3,
246 | "id": 18,
247 | "operation": "*",
248 | "parents": [
249 | 16,
250 | 4
251 | ]
252 | },
253 | {
254 | "children": [
255 | 24
256 | ],
257 | "dataType": null,
258 | "dist2sink": 3,
259 | "id": 19,
260 | "operation": "*",
261 | "parents": [
262 | 16,
263 | 5
264 | ]
265 | },
266 | {
267 | "children": [
268 | 21
269 | ],
270 | "dataType": null,
271 | "dist2sink": 2,
272 | "id": 20,
273 | "operation": "*",
274 | "parents": [
275 | 2,
276 | 17
277 | ]
278 | },
279 | {
280 | "children": [
281 | 1
282 | ],
283 | "dataType": "model",
284 | "dist2sink": 1,
285 | "id": 21,
286 | "operation": "-",
287 | "parents": [
288 | 7,
289 | 20
290 | ]
291 | },
292 | {
293 | "children": [
294 | 23
295 | ],
296 | "dataType": null,
297 | "dist2sink": 2,
298 | "id": 22,
299 | "operation": "*",
300 | "parents": [
301 | 2,
302 | 18
303 | ]
304 | },
305 | {
306 | "children": [
307 | 1
308 | ],
309 | "dataType": "model",
310 | "dist2sink": 1,
311 | "id": 23,
312 | "operation": "-",
313 | "parents": [
314 | 8,
315 | 22
316 | ]
317 | },
318 | {
319 | "children": [
320 | 25
321 | ],
322 | "dataType": null,
323 | "dist2sink": 2,
324 | "id": 24,
325 | "operation": "*",
326 | "parents": [
327 | 2,
328 | 19
329 | ]
330 | },
331 | {
332 | "children": [
333 | 1
334 | ],
335 | "dataType": "model",
336 | "dist2sink": 1,
337 | "id": 25,
338 | "operation": "-",
339 | "parents": [
340 | 9,
341 | 24
342 | ]
343 | }
344 | ]
--------------------------------------------------------------------------------
/tests/test_transformations.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | import numpy as np
3 | import pytest
4 |
5 | def test_unsqueeze():
6 | with pm.Node(name="indexop") as graph:
7 | m = pm.parameter(name="m")
8 | n = pm.parameter(name="n")
9 | x = pm.state("x", shape=(m, n))
10 | x_us = pm.unsqueeze(x, axis=1, name="res")
11 | m_ = 5
12 | n_ = 3
13 | x_ = np.random.randint(0, 10, (m_, n_))
14 |
15 | input_info = {"m": m_, "n": n_, "x": x_}
16 | res = graph("res", input_info)
17 |
18 | np.testing.assert_allclose(res, np.expand_dims(x_, axis=1))
19 |
20 | def test_squeeze():
21 | with pm.Node(name="indexop") as graph:
22 | m = pm.parameter(name="m")
23 | n = pm.parameter(name="n")
24 | x = pm.state("x", shape=(m, n))
25 | x_us = pm.squeeze(x, axis=None, name="res")
26 | m_ = 5
27 | n_ = 1
28 | x_ = np.random.randint(0, 10, (m_, n_))
29 | input_info = {"m": m_, "n": n_, "x": x_}
30 | res = graph("res", input_info)
31 |
32 | np.testing.assert_allclose(res, np.squeeze(x_, axis=1))
33 |
34 | def test_flatten():
35 | shape = (2, 3, 4, 5)
36 | a = np.random.random_sample(shape).astype(np.float32)
37 | for i in range(len(shape)):
38 | with pm.Node(name="flatten_op") as graph:
39 | x = pm.state("x", shape=shape)
40 | x_us = pm.flatten(x, axis=i, name="res")
41 |
42 | new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)
43 | b = np.reshape(a, new_shape)
44 | pm_b = graph("res", {"x": a})
45 | np.testing.assert_allclose(pm_b, b)
46 |
47 | def quick_flatten(a, new_shape):
48 | from itertools import product
49 | b = np.empty(new_shape)
50 | in_idx = tuple([list(range(i)) for i in a.shape])
51 | out_idx = tuple([list(range(i)) for i in new_shape])
52 | perm_in = list(product(*in_idx))
53 | perm_out = list(product(*out_idx))
54 | for a_idx, b_idx in zip(tuple(perm_in), tuple(perm_out)):
55 | b[b_idx] = a[a_idx]
56 | return b
57 |
58 |
59 | def test_gather0():
60 | axis=0
61 | x = np.random.randn(5, 4, 3, 2).astype(np.float32)
62 | idx = np.array([0, 1, 3])
63 |
64 | with pm.Node(name="gather_op") as graph:
65 | data = pm.input(name="input", shape=x.shape)
66 | indices = pm.input(name="indices", shape=idx.shape)
67 | out = pm.gather(data, indices, axis=axis, name="res")
68 |
69 | pm_y = graph("res", {"input": x, "indices": idx})
70 | np_y = np.take(x, idx, axis=axis)
71 | np.testing.assert_allclose(np_y, pm_y)
72 |
73 | def test_gather2d():
74 | axis = 1
75 | x = np.random.randn(3, 3).astype(np.float32)
76 | idx = np.array([[0, 2]])
77 |
78 | with pm.Node(name="gather_op") as graph:
79 | data = pm.input(name="input", shape=x.shape)
80 | indices = pm.input(name="indices", shape=idx.shape)
81 | out = pm.gather(data, indices, axis=axis, name="res")
82 |
83 | pm_y = graph("res", {"input": x, "indices": idx})
84 | np_y = np.take(x, idx, axis=axis)
85 | np.testing.assert_allclose(np_y, pm_y)
86 |
87 | def test_gather1():
88 | axis = 1
89 | x = np.random.randn(5, 4, 3, 2).astype(np.float32)
90 | idx = np.array([0, 1, 3])
91 |
92 | with pm.Node(name="gather_op") as graph:
93 | data = pm.input(name="input", shape=x.shape)
94 | indices = pm.input(name="indices", shape=idx.shape)
95 | out = pm.gather(data, indices, axis=axis, name="res")
96 |
97 | pm_y = graph("res", {"input": x, "indices": idx})
98 | np_y = np.take(x, idx, axis=axis)
99 | np.testing.assert_allclose(np_y, pm_y)
100 |
101 | @pytest.mark.parametrize('in_shape, out_shape',[
102 | ((5, 100,), (1, 500,)),
103 | ((5, 100,), (5, 25, 4)),
104 | ])
105 | def test_reshape(in_shape, out_shape):
106 | x = np.zeros(in_shape).astype(np.float32)
107 |
108 | with pm.Node(name="reshape_op") as graph:
109 | data = pm.input(name="input", shape=x.shape)
110 | out = pm.reshape(data, out_shape, name="res")
111 |
112 | pm_y = graph("res", {"input": x})
113 | np_y = np.reshape(x, out_shape)
114 | np.testing.assert_allclose(np_y, pm_y)
115 | assert np_y.shape == pm_y.shape
116 |
117 |
118 | @pytest.mark.parametrize('in_shape, axis',[
119 | ((5, 100,), (1, 0)),
120 | ((3, 4, 5, 6), (3, 2, 1, 0)),
121 | ((3, 4, 5, 6), (1, 0, 2, 3)),
122 | ])
123 | def test_transpose(in_shape, axis):
124 | x = np.random.randn(*in_shape).astype(np.float32)
125 |
126 | with pm.Node(name="transpose_op") as graph:
127 | data = pm.input(name="input", shape=x.shape)
128 | out = pm.transpose(data, axis, name="res")
129 |
130 | np_y = np.transpose(x, axis)
131 | pm_y = graph("res", {"input": x})
132 | np.testing.assert_allclose(np_y, pm_y)
133 | assert np_y.shape == pm_y.shape
134 |
135 | @pytest.mark.parametrize('in_shape, axis',[
136 | ((5, 100,), (0,)),
137 | ((5, 100,), (0,1)),
138 | ((3, 4, 5, 6), (0, 1, 2)),
139 | ((3, 4, 5, 6), (1,)),
140 | ])
141 | def test_flip(in_shape, axis):
142 | x = np.random.randn(*in_shape).astype(np.float32)
143 |
144 | with pm.Node(name="flip_op") as graph:
145 | data = pm.input(name="input", shape=x.shape)
146 | out = pm.flip(data, axis, name="res")
147 |
148 | np_y = np.flip(x, axis)
149 | pm_y = graph("res", {"input": x})
150 | np.testing.assert_allclose(np_y, pm_y)
151 |
152 |
153 | @pytest.mark.parametrize('in_shape, pad_start, pad_end',[
154 | ((5, 100,), (0, 2), None),
155 | ((5, 100,), (0, 2), (0, 0)),
156 | ((3, 4, 5, 6), (1, 1, 1, 1), None),
157 | ((3, 4, 5, 6), (1, 1, 1, 1), (1, 0, 0, 1)),
158 | ])
159 | def test_pad(in_shape, pad_start, pad_end):
160 | x = np.random.randn(*in_shape).astype(np.float32)
161 |
162 | with pm.Node(name="pad_op") as graph:
163 | data = pm.input(name="input", shape=x.shape)
164 | out = pm.pad(data, pad_start, pad_end=pad_end, name="res")
165 |
166 | if pad_end is None:
167 | padding_val = tuple((pad_start[i], pad_start[i]) for i in range(len(pad_start)))
168 | else:
169 | padding_val = tuple((pad_start[i], pad_end[i]) for i in range(len(pad_start)))
170 | np_y = np.pad(x, padding_val)
171 | pm_y = graph("res", {"input": x})
172 | assert np_y.shape == pm_y.shape
173 | np.testing.assert_allclose(np_y, pm_y)
174 |
175 |
176 |
--------------------------------------------------------------------------------
/polymath/srdfg/templates/tensor_transformations.py:
--------------------------------------------------------------------------------
1 | import polymath as pm
2 | from .template_utils import _get_single_node_indices
3 | from polymath.srdfg.util import squeeze_shape
4 | from numbers import Integral
5 | import numpy as np
6 | import functools
7 |
8 | class tensor_transpose(pm.Template):
9 | def define_graph(self, data, out, perm=None):
10 |
11 | temp = pm.transpose(data, perm)
12 | indices = _get_single_node_indices(temp)
13 | out[indices] = temp[indices]
14 |
15 | @property
16 | def inputs(self):
17 | return (self.args[0],)
18 |
19 | @property
20 | def outputs(self):
21 | return (self.args[1],)
22 |
23 | @property
24 | def perm(self):
25 | return self.kwargs["perm"] or tuple(reversed(range(len(self.args[0].shape))))
26 |
27 | class tensor_flip(pm.Template):
28 | def define_graph(self, data, out, axis=None):
29 | temp = pm.flip(data, axis)
30 | indices = _get_single_node_indices(temp)
31 | out[indices] = temp[indices]
32 |
33 | @property
34 | def inputs(self):
35 | return (self.args[0],)
36 |
37 | @property
38 | def outputs(self):
39 | return (self.args[1],)
40 |
41 |
42 | class tensor_reshape(pm.Template):
43 | def define_graph(self, data, out, new_shape):
44 | temp = pm.reshape(data, new_shape)
45 | indices = _get_single_node_indices(temp)
46 | out[indices] = temp[indices]
47 |
48 | @property
49 | def inputs(self):
50 | return (self.args[0],)
51 |
52 | @property
53 | def outputs(self):
54 | return (self.args[1],)
55 |
56 | class tensor_pad(pm.Template):
57 | def define_graph(self, data, out, pad_start, pad_end=None):
58 | assert isinstance(pad_start, (list, tuple)) and len(pad_start) >= 1
59 | if isinstance(pad_start[0], (list, tuple)):
60 | assert pad_end is None
61 | pad_end = tuple([pad_start[i][1] for i in range(len(pad_start))])
62 | pad_start = tuple([pad_start[i][0] for i in range(len(pad_start))])
63 |
64 | temp = pm.pad(data, pad_start, pad_end=pad_end)
65 | indices = _get_single_node_indices(temp)
66 | out.set_shape(temp.shape, override=True)
67 | out[indices] = temp[indices]
68 |
69 | @property
70 | def inputs(self):
71 | return (self.args[0],)
72 |
73 | @property
74 | def outputs(self):
75 | return (self.args[1],)
76 |
77 | class coarse_flatten(pm.Template):
78 | def define_graph(self, data, out, axis=1):
79 | o_indices = _get_single_node_indices(out, shape=out.shape)
80 | i_indices = _get_single_node_indices(data, shape=out.shape)
81 | out[o_indices] = data[i_indices]
82 |
83 | @property
84 | def inputs(self):
85 | return (self.args[0],)
86 |
87 | @property
88 | def outputs(self):
89 | return (self.args[1],)
90 |
91 |
92 | class tensor_squeeze(pm.Template):
93 | def define_graph(self, data, out):
94 | pass
95 |
96 | @property
97 | def inputs(self):
98 | return (self.args[0],)
99 |
100 | @property
101 | def outputs(self):
102 | return (self.args[1],)
103 |
104 |
105 | class elem_gather(pm.Template):
106 | def define_graph(self, data, output, indices=None, axis=0):
107 | # TODO: Fix this to use manual implementation
108 | assert indices is not None
109 | output.write(pm.gather(data, np.asarray([indices]), axis=axis))
110 |
111 | @property
112 | def inputs(self):
113 | return (self.args[0],)
114 |
115 | @property
116 | def outputs(self):
117 | return (self.args[1],)
118 |
119 | @property
120 | def indices(self):
121 | return self.kwargs['indices']
122 |
123 |
124 |
125 | class elem_expand(pm.Template):
126 | def define_graph(self, data, new_shape, output, axis=0):
127 | # TODO: Fix this to use manual implementation
128 | in_dims = data.shape[0]
129 | new_dims = new_shape[0]
130 | update_shape_bool = in_dims < new_dims
131 | in_shape = in_dims * update_shape_bool + (1-update_shape_bool)
132 |
133 | @property
134 | def inputs(self):
135 | return (self.args[0], self.args[1])
136 |
137 | @property
138 | def outputs(self):
139 | return (self.args[2],)
140 |
141 | class resize(pm.Template):
142 | def define_graph(self, data, scales, output, mode=0):
143 | pass
144 |
145 | @property
146 | def inputs(self):
147 | return (self.args[0], self.args[1])
148 |
149 | @property
150 | def outputs(self):
151 | return (self.args[2],)
152 |
153 | def pad_tensor_inlined(data, out, pad_start, pad_end=None):
154 | assert isinstance(pad_start, (list, tuple)) and len(pad_start) >= 1
155 | if isinstance(pad_start[0], (list, tuple)):
156 | assert pad_end is None
157 | pad_end = tuple([pad_start[i][1] for i in range(len(pad_start))])
158 | pad_start = tuple([pad_start[i][0] for i in range(len(pad_start))])
159 |
160 | temp = pm.pad(data, pad_start, pad_end=pad_end)
161 | indices = _get_single_node_indices(temp)
162 | out.set_shape(temp.shape, override=True)
163 | out[indices] = temp[indices]
164 |
165 | def flip_tensor_inlined(data, out, axis=None):
166 | temp = pm.flip(data, axis)
167 | indices = _get_single_node_indices(temp)
168 | out[indices] = temp[indices]
169 |
170 | # TODO: Need to fix this functionality to create a new node
171 | def onnx_unsqueeze(x, *args, axes=None, shape=None, name=None, **kwargs):
172 | out = pm.unsqueeze(x, axis=axes, name=name, shape=shape)
173 | return out
174 |
175 | # TODO: Check this works after changes
176 | def onnx_squeeze(x, *args, axes=None, shape=None, name=None, **kwargs):
177 | out = pm.squeeze(x, axis=axes, name=name, shape=shape)
178 | return out
179 |
180 | # TODO: Check this works after changes
181 | def onnx_reshape(data, *args, shape=None, name=None, **kwargs):
182 | data._shape = shape
183 | data.graph.nodes[name] = data
184 | return data
185 |
186 | # TODO: Convert this to a template node
187 | def onnx_resize(data, *args, shape=None, name=None, **kwargs):
188 |
189 | data._shape = shape
190 | data.graph.nodes[name] = data
191 | return data
192 |
193 | def onnx_identity(data, shape=None, name=None, **kwargs):
194 | data.set_name(name)
195 | return data
--------------------------------------------------------------------------------
/polymath/codegen/tabla/sigmoid_lookup.csv:
--------------------------------------------------------------------------------
1 | -256,0
2 | -255,0
3 | -254,0
4 | -253,0
5 | -252,0
6 | -251,0
7 | -250,0
8 | -249,0
9 | -248,0
10 | -247,0
11 | -246,0
12 | -245,0
13 | -244,0
14 | -243,0
15 | -242,0
16 | -241,0
17 | -240,0
18 | -239,0
19 | -238,0
20 | -237,0
21 | -236,0
22 | -235,0
23 | -234,0
24 | -233,0
25 | -232,0
26 | -231,0
27 | -230,0
28 | -229,0
29 | -228,0
30 | -227,0
31 | -226,0
32 | -225,0
33 | -224,0
34 | -223,0
35 | -222,0
36 | -221,0
37 | -220,0
38 | -219,0
39 | -218,0
40 | -217,0
41 | -216,0
42 | -215,0
43 | -214,0
44 | -213,0
45 | -212,0
46 | -211,0
47 | -210,0
48 | -209,0
49 | -208,0
50 | -207,0
51 | -206,0
52 | -205,0
53 | -204,0
54 | -203,0
55 | -202,0
56 | -201,0
57 | -200,0
58 | -199,0
59 | -198,0
60 | -197,0
61 | -196,0
62 | -195,0
63 | -194,0
64 | -193,0
65 | -192,0
66 | -191,0
67 | -190,0
68 | -189,0
69 | -188,0
70 | -187,0
71 | -186,0
72 | -185,0
73 | -184,0
74 | -183,0
75 | -182,0
76 | -181,0
77 | -180,0
78 | -179,0
79 | -178,0
80 | -177,1
81 | -176,1
82 | -175,1
83 | -174,1
84 | -173,1
85 | -172,1
86 | -171,1
87 | -170,1
88 | -169,1
89 | -168,1
90 | -167,1
91 | -166,1
92 | -165,1
93 | -164,1
94 | -163,1
95 | -162,1
96 | -161,1
97 | -160,1
98 | -159,1
99 | -158,1
100 | -157,1
101 | -156,1
102 | -155,1
103 | -154,1
104 | -153,1
105 | -152,1
106 | -151,1
107 | -150,1
108 | -149,1
109 | -148,1
110 | -147,1
111 | -146,1
112 | -145,1
113 | -144,1
114 | -143,1
115 | -142,1
116 | -141,2
117 | -140,2
118 | -139,2
119 | -138,2
120 | -137,2
121 | -136,2
122 | -135,2
123 | -134,2
124 | -133,2
125 | -132,2
126 | -131,2
127 | -130,2
128 | -129,2
129 | -128,2
130 | -127,2
131 | -126,2
132 | -125,3
133 | -124,3
134 | -123,3
135 | -122,3
136 | -121,3
137 | -120,3
138 | -119,3
139 | -118,3
140 | -117,3
141 | -116,3
142 | -115,3
143 | -114,4
144 | -113,4
145 | -112,4
146 | -111,4
147 | -110,4
148 | -109,4
149 | -108,4
150 | -107,4
151 | -106,4
152 | -105,5
153 | -104,5
154 | -103,5
155 | -102,5
156 | -101,5
157 | -100,5
158 | -99,6
159 | -98,6
160 | -97,6
161 | -96,6
162 | -95,6
163 | -94,6
164 | -93,7
165 | -92,7
166 | -91,7
167 | -90,7
168 | -89,7
169 | -88,8
170 | -87,8
171 | -86,8
172 | -85,8
173 | -84,9
174 | -83,9
175 | -82,9
176 | -81,9
177 | -80,10
178 | -79,10
179 | -78,10
180 | -77,11
181 | -76,11
182 | -75,11
183 | -74,12
184 | -73,12
185 | -72,12
186 | -71,13
187 | -70,13
188 | -69,13
189 | -68,14
190 | -67,14
191 | -66,14
192 | -65,15
193 | -64,15
194 | -63,16
195 | -62,16
196 | -61,17
197 | -60,17
198 | -59,17
199 | -58,18
200 | -57,18
201 | -56,19
202 | -55,19
203 | -54,20
204 | -53,21
205 | -52,21
206 | -51,22
207 | -50,22
208 | -49,23
209 | -48,23
210 | -47,24
211 | -46,25
212 | -45,25
213 | -44,26
214 | -43,26
215 | -42,27
216 | -41,28
217 | -40,29
218 | -39,29
219 | -38,30
220 | -37,31
221 | -36,31
222 | -35,32
223 | -34,33
224 | -33,34
225 | -32,34
226 | -31,35
227 | -30,36
228 | -29,37
229 | -28,38
230 | -27,38
231 | -26,39
232 | -25,40
233 | -24,41
234 | -23,42
235 | -22,43
236 | -21,44
237 | -20,45
238 | -19,46
239 | -18,46
240 | -17,47
241 | -16,48
242 | -15,49
243 | -14,50
244 | -13,51
245 | -12,52
246 | -11,53
247 | -10,54
248 | -9,55
249 | -8,56
250 | -7,57
251 | -6,58
252 | -5,59
253 | -4,60
254 | -3,61
255 | -2,62
256 | -1,63
257 | 0 ,64
258 | 1 ,65
259 | 2 ,66
260 | 3 ,67
261 | 4 ,68
262 | 5 ,69
263 | 6 ,70
264 | 7 ,71
265 | 8 ,72
266 | 9 ,73
267 | 10,74
268 | 11,75
269 | 12,76
270 | 13,77
271 | 14,78
272 | 15,79
273 | 16,80
274 | 17,81
275 | 18,82
276 | 19,82
277 | 20,83
278 | 21,84
279 | 22,85
280 | 23,86
281 | 24,87
282 | 25,88
283 | 26,89
284 | 27,90
285 | 28,90
286 | 29,91
287 | 30,92
288 | 31,93
289 | 32,94
290 | 33,94
291 | 34,95
292 | 35,96
293 | 36,97
294 | 37,97
295 | 38,98
296 | 39,99
297 | 40,99
298 | 41,100
299 | 42,101
300 | 43,102
301 | 44,102
302 | 45,103
303 | 46,103
304 | 47,104
305 | 48,105
306 | 49,105
307 | 50,106
308 | 51,106
309 | 52,107
310 | 53,107
311 | 54,108
312 | 55,109
313 | 56,109
314 | 57,110
315 | 58,110
316 | 59,111
317 | 60,111
318 | 61,111
319 | 62,112
320 | 63,112
321 | 64,113
322 | 65,113
323 | 66,114
324 | 67,114
325 | 68,114
326 | 69,115
327 | 70,115
328 | 71,115
329 | 72,116
330 | 73,116
331 | 74,116
332 | 75,117
333 | 76,117
334 | 77,117
335 | 78,118
336 | 79,118
337 | 80,118
338 | 81,119
339 | 82,119
340 | 83,119
341 | 84,119
342 | 85,120
343 | 86,120
344 | 87,120
345 | 88,120
346 | 89,121
347 | 90,121
348 | 91,121
349 | 92,121
350 | 93,121
351 | 94,122
352 | 95,122
353 | 96,122
354 | 97,122
355 | 98,122
356 | 99,122
357 | 100,123
358 | 101,123
359 | 102,123
360 | 103,123
361 | 104,123
362 | 105,123
363 | 106,124
364 | 107,124
365 | 108,124
366 | 109,124
367 | 110,124
368 | 111,124
369 | 112,124
370 | 113,124
371 | 114,124
372 | 115,125
373 | 116,125
374 | 117,125
375 | 118,125
376 | 119,125
377 | 120,125
378 | 121,125
379 | 122,125
380 | 123,125
381 | 124,125
382 | 125,125
383 | 126,126
384 | 127,126
385 | 128,126
386 | 129,126
387 | 130,126
388 | 131,126
389 | 132,126
390 | 133,126
391 | 134,126
392 | 135,126
393 | 136,126
394 | 137,126
395 | 138,126
396 | 139,126
397 | 140,126
398 | 141,126
399 | 142,127
400 | 143,127
401 | 144,127
402 | 145,127
403 | 146,127
404 | 147,127
405 | 148,127
406 | 149,127
407 | 150,127
408 | 151,127
409 | 152,127
410 | 153,127
411 | 154,127
412 | 155,127
413 | 156,127
414 | 157,127
415 | 158,127
416 | 159,127
417 | 160,127
418 | 161,127
419 | 162,127
420 | 163,127
421 | 164,127
422 | 165,127
423 | 166,127
424 | 167,127
425 | 168,127
426 | 169,127
427 | 170,127
428 | 171,127
429 | 172,127
430 | 173,127
431 | 174,127
432 | 175,127
433 | 176,127
434 | 177,127
435 | 178,127
436 | 179,127
437 | 180,127
438 | 181,127
439 | 182,127
440 | 183,127
441 | 184,127
442 | 185,127
443 | 186,127
444 | 187,127
445 | 188,127
446 | 189,127
447 | 190,127
448 | 191,127
449 | 192,127
450 | 193,127
451 | 194,127
452 | 195,127
453 | 196,127
454 | 197,127
455 | 198,127
456 | 199,127
457 | 200,127
458 | 201,127
459 | 202,127
460 | 203,127
461 | 204,127
462 | 205,127
463 | 206,127
464 | 207,127
465 | 208,127
466 | 209,127
467 | 210,127
468 | 211,127
469 | 212,127
470 | 213,127
471 | 214,127
472 | 215,127
473 | 216,127
474 | 217,127
475 | 218,127
476 | 219,127
477 | 220,127
478 | 221,127
479 | 222,127
480 | 223,127
481 | 224,127
482 | 225,127
483 | 226,127
484 | 227,127
485 | 228,127
486 | 229,127
487 | 230,127
488 | 231,127
489 | 232,127
490 | 233,127
491 | 234,127
492 | 235,127
493 | 236,127
494 | 237,127
495 | 238,127
496 | 239,127
497 | 240,127
498 | 241,127
499 | 242,127
500 | 243,127
501 | 244,127
502 | 245,127
503 | 246,127
504 | 247,127
505 | 248,127
506 | 249,127
507 | 250,127
508 | 251,127
509 | 252,127
510 | 253,127
511 | 254,127
512 | 255,127
--------------------------------------------------------------------------------