├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── __init__.py ├── client.py ├── common ├── __init__.py ├── config.py ├── logging.conf ├── minionn_helper.py ├── minionn_onnx_pb2.py ├── minionn_onnx_pb2_grpc.py ├── node_operations │ ├── __init__.py │ ├── common.py │ ├── gemm.py │ ├── log.py │ ├── relu.py │ ├── reshape.py │ └── softmax.py ├── onnx_helper.py └── operation_handler.py ├── lib ├── Makefile ├── __init__.py ├── aby.makefile ├── minionn.cpp ├── minionnABY.cpp ├── minionnABY.h ├── minionnCommon.h ├── minionnCrypto.cpp ├── minionnCrypto.h ├── minionnMath.cpp ├── minionnMath.h ├── miracl.makefile ├── test.py ├── test_mpc_client.py └── test_mpc_server.py ├── models ├── R2_S.onnx ├── R_S.onnx ├── S.onnx ├── S.tensor ├── check_r2s.py ├── check_rs.py └── check_s.py ├── proto ├── README.md ├── minionn-onnx.proto └── onnx.proto ├── server.py └── tools ├── __init__.py ├── csv_to_tensor.py ├── make_model.py ├── make_model_non_reversed.py ├── make_model_only_gemm.py ├── out.txt ├── test_model.py ├── test_non_reversed.py └── test_only_gemm.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Prevent key leaks 2 | assets/* 3 | *.pkey 4 | *.skey 5 | 6 | # Unnecessary generated onnx files in tools 7 | tools/*.onnx 8 | tools/*.tensor 9 | 10 | # SEAL library 11 | lib/SEAL/* 12 | 13 | #binaries 14 | lib/bin/*.a 15 | 16 | # Temporary objects 17 | *.o 18 | *.d 19 | **.cppimporthash 20 | **/.rendered* 21 | **/*.cpython-35* 22 | **/__pycache__/ 23 | 24 | # Workspace objects 25 | *.code-workspace 26 | **/.vscode/* 27 | 28 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lib/pybind11"] 2 | path = lib/pybind11 3 | url = https://github.com/pybind/pybind11.git 4 | [submodule "lib/ABY"] 5 | path = lib/ABY 6 | url = https://github.com/encryptogroup/ABY.git 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Requirements 2 | ```bash 3 | sudo apt install libssl-dev libgmp-dev libglib2.0-dev 4 | pip install pybind11 cppimport onnx 5 | ``` 6 | 7 | # Installation 8 | 9 | 10 | ## ABY 11 | Make sure you initialized gits submodules recursively: 12 | ```bash 13 | git submodule update --init --recursive 14 | ``` 15 | Then, run make all in the lib folder: 16 | ```bash 17 | cd lib 18 | make all 19 | ``` 20 | 21 | ## SEAL 22 | Download SEAL (MiniONN is tested for Seal v2.3.1), and place its SEAL subdirectory in libs (so that libs/SEAL contains the seal subdirectory). 23 | https://www.microsoft.com/en-us/research/project/simple-encrypted-arithmetic-library/ 24 | 25 | You now need to install SEAL with position independent code! Do this by adding the following line to the fie CMakeLists.txt before running CMake: 26 | ``` 27 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 28 | ``` 29 | 30 | Now, install SEAL as instructed in the INSTALL.txt: 31 | ```bash 32 | cd SEAL 33 | cmake . 34 | make 35 | sudo make install 36 | ``` 37 | 38 | NOTE: If you prefer to not install SEAL globally, or do not want to install a global version with position independent code, you can instruct MiniONN to use a local SEAL library. For this, update the minionn.cpp file as follows: 39 | ```python 40 | cfg['libraries'] = [ 41 | #SEAL library 42 | 'seal', # Change the path to the SEAL file here 43 | ``` 44 | 45 | ## Veryfying the installation 46 | There are three test files in the lib subdirectory: test.py, test_mpc_server.py and test_mpc_client.py. 47 | 48 | Run 49 | ```bash 50 | python3 test.py 51 | ``` 52 | and verify that all tests passed. If you encountered errors during the initial compilation of the C++ modules, there are probably some files missing for SEAL or ABY. 53 | 54 | Next, run the test_mpc_server.py and then the test_mpc_client.py in a second terminal. 55 | 56 | # Usage 57 | There are three models given in the models folder. All three take the S.tensor as input: 58 | ```bash 59 | # Server: 60 | python3 server.py -i models/S.onnx 61 | # Client: 62 | python3 client.py -i models/S.tensor -o models.out.txt 63 | ``` 64 | 65 | You can test and verify the correctness of the three models with the given scripts in the models folder. 66 | ```bash 67 | cd models 68 | python3 check_s.py 69 | ``` 70 | 71 | You can additionally build your own models with the scripts in the tools folder. Those also have their own test scripts included. 72 | 73 | # MiniONN input 74 | The MiniONN client requires only the model input to be given as a ONNX Tensor. The example models and tool model creators already create a TensorProto file automatically. However, for your own models you will need to export your data into a TensorProto and store that as a file. An example of how to do this process is in tools/csv_to_tensor.py . 75 | 76 | # MiniONN inaccuracy 77 | MiniONN introduces some inaccuracy into the calculated result. This can best be seen when running the above usage example and taking a look at the Difference between the expected and given result of the check_s.py file. 78 | In our tests, this error did not change any predicted result. However, keep this error in mind whenever you experiment. 79 | 80 | In future, this slight error might be resolved by changing the randomness of V as V gets downshifted after every matrix multiplication. 81 | 82 | This downshift is also important for the scaling that is set in common/config.py . Here, the fractional_base is used to shift the input (weight and client input) up with the fractional base, and to scale the result down again after every matrix multiplication. If your input does not need to be scaled up (into integer range), then you can set the fractional_downscale to 1 for your models. However, you might then need to set the downscale factor in the same configuration file to prevent overflows (especially overflows over the cryptographic modulo used by MiniONN). 83 | 84 | # Important note 85 | The MiniONN code has a small drawback that has to be taken into account when working with it: 86 | Currently, the server implementation uses a static dictionary in the minionn_helper file. This means that a server needs to be restarted after every run. This can easily be fixed by changing to a class that keeps all tensors of a specific instance and using this from the operation_helper. 87 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/__init__.py -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import argparse 12 | import sys, time, os 13 | 14 | # ONNX/Numpy 15 | import onnx 16 | import onnx.numpy_helper 17 | import numpy as np 18 | 19 | #gRPC for client-server communication 20 | import grpc 21 | 22 | # cpp 23 | import cppimport 24 | import cppimport.import_hook 25 | cppimport.set_quiet(True) 26 | import lib.minionn as minionn 27 | 28 | #project imports 29 | from common import minionn_onnx_pb2 30 | from common import minionn_onnx_pb2_grpc 31 | from common import onnx_helper, operation_handler, minionn_helper, config 32 | 33 | # Logging 34 | import logging 35 | import logging.config 36 | logging.config.fileConfig('common/logging.conf') 37 | logger = logging.getLogger('minionn') 38 | 39 | def main(): 40 | parser = argparse.ArgumentParser(description="MiniONN - ONNX compatible version") 41 | parser.add_argument( 42 | "-i", "--input", 43 | type=str, required=True, 44 | help="The input file for the client's input. Should contain the X vector as a single TensorProto object.", 45 | ) 46 | parser.add_argument( 47 | "-o", "--output", 48 | type=str, required=False, 49 | help="If given, prints the output matrix into this file (csv comma style).", 50 | ) 51 | parser.add_argument( 52 | "-s", "--server", 53 | type=str, required=False, default=config.ip, 54 | help="IP address of the server.", 55 | ) 56 | parser.add_argument( 57 | "-p","--rpc_port", 58 | type=int, required=False, default=config.port_rpc, 59 | help="Server port for MPC.", 60 | ) 61 | parser.add_argument( 62 | "-m","--mpc_port", 63 | type=int, required=False, default=config.port_aby, 64 | help="Server port for MPC.", 65 | ) 66 | parser.add_argument( 67 | "-v", "--verbose", 68 | required=False, default=False, action='store_true', 69 | help="Log verbosely.", 70 | ) 71 | args = parser.parse_args() 72 | 73 | """ 74 | Create and set up Logger 75 | """ 76 | loglevel = (logging.DEBUG if args.verbose else logging.INFO) 77 | logger.setLevel(loglevel) 78 | logger.info("MiniONN CLIENT") 79 | 80 | """ 81 | First, read the x vector from input 82 | """ 83 | x = onnx.TensorProto() 84 | with open(args.input, 'rb') as fid: 85 | content = fid.read() 86 | x.ParseFromString(content) 87 | 88 | if len(x.dims) == 0: 89 | logger.error("Error reading the ONNX tensor. Aborting.") 90 | sys.exit() 91 | 92 | x_list = onnx_helper.onnx_tensor_to_list(x) 93 | # multiply by fractional 94 | x_list = [int(config.fractional_base*v) for v in x_list] 95 | 96 | logger.info("Successfuly read X from input.") 97 | if config.debug_mode: 98 | logger.debug("Input starts with " + str(x_list[:config.debug_print_length]) + " and has size " + str(len(x_list))) 99 | 100 | """ 101 | With x ready, we can connect to the server to receive the model and w 102 | """ 103 | channel = grpc.insecure_channel(args.server + ":" + str(args.rpc_port), options=config.grpc_options) 104 | stub = minionn_onnx_pb2_grpc.MinioNNStub(channel) 105 | response = stub.Precomputation(minionn_onnx_pb2.PrecomputationRequest(request_model=True, request_w=True)) 106 | server_w = response.w 107 | server_model = response.model 108 | 109 | logger.info("Server sent privatized model.") 110 | 111 | # Parse model and fill in dimensions 112 | tensors_dims = onnx_helper.retrieveTensorDimensionsFromModel(server_model) 113 | nodes = onnx_helper.retrieveNodesFromModel(server_model) 114 | 115 | for name,dim in tensors_dims.items(): 116 | minionn_helper.put_cpp_tensor(name, None, dim) 117 | 118 | """ 119 | Create handler for the model. 120 | The handler will already calculate the dimensions of all tensors 121 | """ 122 | handler = operation_handler.OperationHandler(nodes, server_model.graph.input[0].name) 123 | 124 | """ 125 | Init and generate keys 126 | """ 127 | if not os.path.exists(config.asset_folder): 128 | os.makedirs(config.asset_folder) 129 | logger.info("Created directory " + config.asset_folder) 130 | 131 | minionn_helper.init(config.SLOTS) 132 | minionn_helper.generate_keys(config.client_pkey,config.client_skey) 133 | 134 | """ 135 | Use w to generate u and xs 136 | """ 137 | start_time = time.time() 138 | w_list = handler.get_w_list() 139 | encU = minionn_helper.client_precomputation(server_w, config.SLOTS, w_list) 140 | 141 | """ 142 | Initialize the handler as client 143 | This generates the input r that we can use to calculate xs 144 | """ 145 | handler.init_client() 146 | 147 | # Calculate xs = x - r. Use the input r 148 | # (Activation functions later obliviously put in the next Rs) 149 | # The input r might be different from the first r due to transposes/reshapes done on x 150 | input_r = "r0" # First r calculated by init_client 151 | xs = minionn_helper.vector_sub(x_list, minionn_helper.get_cpp_tensor(input_r)) 152 | # Client input is just the first v 153 | xc = minionn_helper.get_cpp_tensor(input_r) 154 | 155 | """ 156 | Request a computation result from server for u and xs 157 | and start execution locally. 158 | """ 159 | result_future = stub.Computation.future(minionn_onnx_pb2.ComputationRequest(u=encU, xs=xs)) 160 | logger.info("Sent Computation request to server.") 161 | if config.debug_mode: 162 | logger.debug("x is:" + str(x_list[:config.debug_print_length_long])) 163 | logger.debug("xs is:" + str(xs[:config.debug_print_length_long])) 164 | logger.debug("xc is:" + str(xc[:config.debug_print_length_long])) 165 | logger.debug("input r is:" + str(minionn_helper.get_cpp_tensor("r0")[:config.debug_print_length_long])) 166 | 167 | # Connect to MPC port 168 | logger.info("Establishing MPC connection") 169 | minionn_helper.init_mpc(args.server, args.mpc_port, False) 170 | 171 | # Now run the model with xc 172 | result_client = handler.run_network(x_in = xc, 173 | in_name = server_model.graph.input[0].name, 174 | out_name = server_model.graph.output[0].name) 175 | 176 | logger.info("Shutting down MPC connection") 177 | minionn_helper.shutdown_mpc() 178 | 179 | # Get server result and calculate final result 180 | result_server = result_future.result().ys 181 | logger.info("Server result is:" + str(result_server)) 182 | logger.info("Client result is:" + str(result_client)) 183 | 184 | 185 | result = minionn_helper.vector_add(result_client, result_server) 186 | logger.info("Overall result is: " + str(list(result))) 187 | finish_time = time.time() 188 | logger.info("Processing took " + str(finish_time - start_time) + " seconds.") 189 | 190 | # Output to file if requested 191 | if args.output: 192 | # reshape to numpy array first 193 | shape = minionn_helper.get_cpp_tensor_dim(server_model.graph.output[0].name) 194 | reshaped = np.array(result).reshape(shape) 195 | # use numpy to store file 196 | np.savetxt(args.output,reshaped, delimiter=",") 197 | 198 | if __name__ == '__main__': 199 | main() 200 | -------------------------------------------------------------------------------- /common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/common/__init__.py -------------------------------------------------------------------------------- /common/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | # Fractions for shifting up the input data and scaling down after every layer (to prevent overflows) 11 | fractional_base = 1000 12 | fractional_downscale = 1 13 | 14 | # MiniONN static vars 15 | SLOTS = 4096 16 | PMAX = 101285036033 17 | PMAX_HALF = int(PMAX / 2) 18 | 19 | # Key files 20 | asset_folder = "assets/" 21 | client_pkey = asset_folder + "c.pkey" 22 | client_skey = asset_folder + "c.skey" 23 | server_pkey = asset_folder + "s.pkey" 24 | server_skey = asset_folder + "s.skey" 25 | 26 | # Server config, default values 27 | ip = "127.0.0.1" 28 | port_rpc = 8555 29 | port_aby = 8556 30 | 31 | # GRPC options 32 | grpc_options = [('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)] 33 | 34 | # Debugging parameters 35 | debug_mode = False 36 | debug_print_length = 5 37 | debug_print_length_long = 10 38 | 39 | random_r = True 40 | random_v = True -------------------------------------------------------------------------------- /common/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,minionn,onnx_helper,node_operations,minionn_helper, NodeOperator 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=simpleFormatter 9 | 10 | [logger_root] 11 | level=DEBUG 12 | handlers=consoleHandler 13 | 14 | [logger_minionn] 15 | level=DEBUG 16 | handlers=consoleHandler 17 | qualname=minionn 18 | propagate=0 19 | 20 | [logger_onnx_helper] 21 | level=DEBUG 22 | handlers=consoleHandler 23 | qualname=onnx_helper 24 | 25 | [logger_minionn_helper] 26 | level=DEBUG 27 | handlers=consoleHandler 28 | qualname=minionn_helper 29 | 30 | [logger_node_operations] 31 | level=DEBUG 32 | handlers=consoleHandler 33 | qualname=node_operations 34 | 35 | [logger_NodeOperator] 36 | level=DEBUG 37 | handlers=consoleHandler 38 | qualname=NodeOperator 39 | 40 | [handler_consoleHandler] 41 | class=StreamHandler 42 | level=DEBUG 43 | formatter=simpleFormatter 44 | args=(sys.stdout,) 45 | 46 | 47 | [formatter_simpleFormatter] 48 | format=%(asctime)s-%(name)s_%(levelname)s: %(message)s 49 | datefmt=%d/%m/%Y_%I:%M:%S 50 | class=logging.Formatter 51 | -------------------------------------------------------------------------------- /common/minionn_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | # cpp 12 | import cppimport 13 | import cppimport.import_hook 14 | cppimport.set_quiet(True) 15 | from lib import minionn as minionn 16 | 17 | import logging 18 | logger = logging.getLogger('minionn.minionn_helper') 19 | 20 | from operator import mul 21 | from functools import reduce 22 | 23 | import numpy as np 24 | 25 | from . import config 26 | 27 | #TODO: Work with tensor objects held by a tensorhandler 28 | class Tensor(object): 29 | """ 30 | Class for a Tensor that can have a numpy or a cpp reference. 31 | Contains: 32 | - Shape 33 | - CPP representation (optional) 34 | - Numpy representation (optional but preferred) 35 | 36 | When the system gets initialized, all tensors are numpy tensors. 37 | Whenever a cpp tensor gets requested, it gets generated on demand from the numpy tensor. 38 | Vice versa, if a numpy tensor is requested but only a cpp vector exists, it also gets generated on demand (e.g. after a MPC operation, only a cpp vector will exist) 39 | 40 | During the computation of the network, only cpp tensors should be used. The main reason for this is that the MPC operations are implemented in cpp with the ABY library and require the tensors to be cpp vectors. Every conversion from cpp to numpy or vice versa contains at least one copy of the whole tensor which should be avoided whenever possible. 41 | """ 42 | def __init__(self): 43 | pass 44 | 45 | # NOTE: Currently NOT concurrency safe! Only works for a single client 46 | # TODO: Refactor into tensors as objects. Then keep a dict of objects. 47 | # TODO: Think about splitting minionn helper and matrix stuff 48 | # Maybe node operator can take care of matrices while having 49 | # minionn helper only call cpp code 50 | # Or create extra class for matrices and give it to node operator 51 | # as argument when calling run network 52 | 53 | # TODO: Move minionn operations into the actual nodes 54 | 55 | # dictionary to hold the cpp tensors mapped by their name 56 | # This is a dict of a tensor name to a tuple of (VectorInt, Shape (as list)) 57 | cpp_tensors = {} 58 | 59 | def _tensor_is_transposed(name): 60 | transposed = name.count("T") % 2 61 | if transposed: 62 | return True 63 | else: 64 | return False 65 | 66 | def _tensor_get_base_name(name): 67 | return name.replace("T","") 68 | 69 | def _tensor_normalize_name(name): 70 | normalized_name = _tensor_get_base_name(name) 71 | 72 | if _tensor_is_transposed(name): 73 | normalized_name += "T" 74 | 75 | return normalized_name 76 | 77 | def _has_tensor(name): 78 | if name in cpp_tensors: 79 | return True 80 | return False 81 | 82 | def _get_tensor(name): 83 | name_normalized = _tensor_normalize_name(name) 84 | 85 | if not _has_tensor(name_normalized) \ 86 | or \ 87 | cpp_tensors[name_normalized][0] is None: 88 | # Return transposed vector. 89 | # It does not exist yet, but we can quickly create it 90 | # if its transposed exists and has values 91 | transposed_name = _tensor_normalize_name(name_normalized + "T") 92 | if _has_tensor(transposed_name) \ 93 | and tensor_has_values(transposed_name): 94 | _transpose(transposed_name) 95 | 96 | return cpp_tensors[name_normalized][0] 97 | 98 | def _get_tensor_dim(name): 99 | """ 100 | Returns the shape (dimension) as a list of the given tensor. 101 | If name does not exist, we try to return the reversed dimension of 102 | its transposed vector 103 | """ 104 | tname = _tensor_normalize_name(name) 105 | if _has_tensor(tname): 106 | return cpp_tensors[tname][1] 107 | else: 108 | # Name does not exist. Try transposed of name 109 | tname = _tensor_normalize_name(name + "T") 110 | if _has_tensor(tname): 111 | return list(reversed(cpp_tensors[tname][1])) 112 | else: 113 | logger.error("Cannot get dimension of nonexistent tensor " + name) 114 | 115 | def _set_tensor(name, new_tensor, new_dimension): 116 | cpp_tensors[_tensor_normalize_name(name)] = (new_tensor, new_dimension) 117 | logger.debug("Touched tensor " + name + ". New dims:" + str(new_dimension) ) 118 | if new_tensor is not None and config.debug_mode: 119 | logger.debug("-- Tensor's size is " + str(len(list(new_tensor)))) 120 | # assert + " size:" + str(new_tensor.size()) 121 | 122 | def _transpose(inp): 123 | """ 124 | Takes the input vector from cpp_vectors, reshapes it into 125 | the dimensions given, transposes the matrix, and creates a new, 126 | flattened, cpp vector as "T" with being the input string. 127 | """ 128 | # Calculate new name (to prevent double T namings) 129 | new_name = _tensor_get_base_name(inp) 130 | if not _tensor_is_transposed(inp): 131 | new_name += "T" 132 | logger.debug("Transposing " + inp + " to output " + new_name ) 133 | 134 | # Get vec and dim 135 | vec_in = list(cpp_tensors[inp][0]) 136 | dim_in = cpp_tensors[inp][1] 137 | 138 | # Transpose the reshaped matrix 139 | reshaped = np.reshape(vec_in, dim_in) 140 | transposed = np.transpose(reshaped) 141 | dim_out = list(transposed.shape) 142 | 143 | # Flatten and store 144 | _set_tensor(new_name, minionn.VectorInt(transposed.flatten().tolist()), dim_out) 145 | 146 | def put_cpp_tensor(name, values, dimension, fractional = 1): 147 | if values is None: 148 | # If values is none, input none to dict 149 | _set_tensor(name, None, dimension) 150 | elif fractional != 1 or not all(isinstance(v, int) for v in values): 151 | # If fractional is not 1 or we have a list of not solely integers, 152 | # perform list comprehension 153 | tmp = [modulo_pmax(int(fractional * v)) for v in values] 154 | _set_tensor(name, minionn.VectorInt(tmp), dimension) 155 | else: 156 | # Else, simply add to dict 157 | _set_tensor(name, minionn.VectorInt(values), dimension) 158 | 159 | def get_cpp_tensor(name, reshape = False): 160 | """ 161 | Returns the cpp tensor associated with name. 162 | If name ends on T, the transposed tensor is returned. 163 | If reshape is true, a proper reshaped numpy array is returned 164 | """ 165 | name_normalized = _tensor_normalize_name(name) 166 | 167 | tensor = list(_get_tensor(name_normalized)) 168 | 169 | if reshape: 170 | # Use numpy to reshape array 171 | tensor = np.reshape(tensor, _get_tensor_dim(name_normalized)) 172 | 173 | return tensor 174 | 175 | def get_cpp_tensor_dim(name): 176 | """ 177 | Returns the shape (dimension) as a list of the given tensor. 178 | Result is a list 179 | """ 180 | return _get_tensor_dim(name) 181 | 182 | def has_cpp_tensor(name): 183 | """ 184 | Checks if the given named tensor exists. 185 | Takes the following three cases into account: 186 | - named vector exists 187 | - normal vector exists but transposed doesn't ("T") 188 | - transposed vector "T" exists but named vector doesn't 189 | """ 190 | if _has_tensor(_tensor_normalize_name(name)) \ 191 | or _has_tensor(_tensor_get_base_name(name) + "T") \ 192 | or _has_tensor(_tensor_get_base_name(name)): 193 | return True 194 | else: 195 | return False 196 | 197 | def tensor_has_values(name): 198 | """ 199 | Checks if a given tensor, if it exists, has any values or 200 | if it just a stub for dimensions. 201 | """ 202 | if has_cpp_tensor(name) and _get_tensor(name) is not None: 203 | return True 204 | else: 205 | return False 206 | 207 | def print_tensor(name): 208 | normalized = _tensor_normalize_name(name) 209 | s = normalized 210 | if normalized != name: 211 | s += " (aka " + name + ")" 212 | s += " (dim: " + str(_get_tensor_dim(name)) + ")" 213 | 214 | if config.debug_mode: 215 | s += " (Currently has values: " 216 | if tensor_has_values(name): 217 | s += "Yes. Complete size: " + str(len(get_cpp_tensor(name))) 218 | s += " First values:" + str(get_cpp_tensor(name)[:config.debug_print_length]) 219 | else: 220 | s+= "No" 221 | 222 | s += ")" 223 | 224 | return s 225 | 226 | def _log_vector_dict(): 227 | logger.debug("Cpp dictionary elements:") 228 | 229 | for name in sorted(cpp_tensors): 230 | logger.debug(" -- " + print_tensor(name)) 231 | logger.debug("End Cpp dictionary") 232 | 233 | def copy_tensor(name_src, name_dst): 234 | src = _tensor_normalize_name(name_src) 235 | if _has_tensor(src): 236 | _set_tensor(name_dst, _get_tensor(src), _get_tensor_dim(src) ) 237 | 238 | 239 | """" 240 | MiniONN functions. 241 | These functions are CPP functions and the python functions are just a wrapper 242 | for them. 243 | """ 244 | def init(slots): 245 | minionn.init(slots) 246 | 247 | def init_mpc(ip, port, is_server): 248 | minionn.init_aby(ip, port, is_server) 249 | 250 | def shutdown_mpc(): 251 | minionn.shutdown_aby() 252 | 253 | def generate_keys(pkey, skey): 254 | minionn.gen_keys(pkey, skey) 255 | 256 | def server_prepare_w(w_list, pkey): 257 | """ 258 | Prepares the W to send over to the client. 259 | This W contains all w from every matrix multiplication 260 | and is encrypted with the server's public key. 261 | Arranging the Ws is done doing the following: 262 | For each m x n * n x o matrix multiplication, 263 | this multiplication's W has every row of w repeated o times. 264 | Each multiplication's W is then attached to the overall W. 265 | 266 | Input: 267 | - w_list: List of tuples:(name of W, dimensions of matrix multiplication [m,n,o]) 268 | - public key of server 269 | """ 270 | 271 | # We will use numpy to properly arrange the Ws. 272 | # In future, this has a way better performance if numpy is 273 | # the primary library in use 274 | overall_w = [] 275 | for (w, dim) in w_list: 276 | # Get list as reshaped numpy array 277 | tensor = get_cpp_tensor(w, reshape=True) 278 | 279 | for dm in range(0, dim[0]): 280 | for do in range(0, dim[2]): 281 | overall_w.extend(tensor[dm].tolist()) 282 | 283 | if config.debug_mode: 284 | logger.debug("W has size " + str(len(overall_w))) 285 | logger.debug("W starts with " + str(overall_w[:config.debug_print_length_long]) + " and ends with " + str(overall_w[-config.debug_print_length_long:])) 286 | 287 | return minionn.encrypt_w(minionn.VectorInt(overall_w), pkey) 288 | 289 | def server_decrypt_u(encU, skey): 290 | tmp = minionn.VectorInt([]) 291 | minionn.decrypt_w(encU, skey, tmp) 292 | return tmp 293 | 294 | def modulo_pmax(x_in): 295 | x_in = x_in % config.PMAX 296 | 297 | if abs(x_in) <= config.PMAX_HALF: 298 | return x_in 299 | elif x_in > 0: 300 | return x_in - config.PMAX 301 | else: 302 | return x_in + config.PMAX 303 | 304 | def client_precomputation(encW, slot_size, w_list): 305 | """ 306 | Performs the client precomputation. 307 | This takes the encrypted W from the server and generates 308 | a v and r for each matrix multiplication. 309 | r has the shape of x in the W*x multiplication (n x o) 310 | v has the shape of m x n x o (which gets summed up to n x o later during the client matrix multiplication) 311 | 312 | As the r and v values are needed later, they are stored as r0,v0,r1,v1,.. tensors in the tensor dictionary. 313 | 314 | Input: 315 | - encrypted W 316 | - slot size 317 | - w_list: List of tuples:(name of W, dimensions of matrix multiplication [m,n,o]) 318 | Output: 319 | - encrypted U that can be sent back to the server 320 | """ 321 | logger.info("Started Client Precomputation.") 322 | 323 | # Use numpy to generate r and v 324 | client_randoms = [] 325 | for (w,dim) in w_list: 326 | # Generate v 327 | v = np.random.randint(config.PMAX, dtype='uint64', size = (dim[0], dim[1], dim[2])) 328 | 329 | if not config.random_v: 330 | # Allow the construction of a static v in debug mode 331 | v = np.zeros((dim[0], dim[1], dim[2]), dtype='uint64') 332 | 333 | # Generate r in column major order 334 | # We will need to transpose r before using it later, but now for precomputation 335 | # column major order is required 336 | r = np.random.randint(config.PMAX, dtype='uint64', size = (dim[2], dim[1])) 337 | 338 | if not config.random_r: 339 | # Allow the construction of a static r in debug mode 340 | r = np.multiply(np.ones((dim[2], dim[1]), dtype='uint64'),1) 341 | 342 | client_randoms.append((r,v)) 343 | 344 | logger.debug(" - Generated r and v values:") 345 | for (r,v) in client_randoms: 346 | logger.debug(" -- r size " + str(r.shape) + " v size " + str(v.shape)) 347 | 348 | # Now assemble the big r and v that are used for precomputation 349 | assembled_R = [] 350 | assembled_V = [] 351 | for i in range(0, len(w_list)): # For every Gemm 352 | # Assemble R by repeating r_i for every row of W (m times) 353 | for dm in range(0, w_list[i][1][0]): # For every server row (m) (W row) 354 | for do in range(0, w_list[i][1][2]): # For every client column o (x col) 355 | assembled_R.extend(client_randoms[i][0][do].tolist()) # Append a row of r (here, column because it is transposed - Matrix multiplication takes a row times a column) 356 | 357 | 358 | # Assemble v by just appending all v's after each other 359 | assembled_V.extend(client_randoms[i][1].flatten().tolist()) 360 | 361 | if config.debug_mode: 362 | logger.debug(" - Assembled big R: Size " + str(len(assembled_R)) + "; starts with " + str(assembled_R[:config.debug_print_length_long])) 363 | logger.debug(" - Assembled big V: Size " + str(len(assembled_V)) + "; starts with " + str(assembled_V[:config.debug_print_length_long])) 364 | 365 | # Now we need to transpose the r matrices so that they can be used later (remember, we used r as columns earlier for the matrix multiplication with W) 366 | logger.debug(" - Transposing r values:") 367 | for i in range(0,len(client_randoms)): 368 | # Transpose r 369 | client_randoms[i] = (client_randoms[i][0].T, client_randoms[i][1]) 370 | 371 | # And convert the uint numpy arrays to int cpp arrays for later use 372 | # NOTE: We use a modulo with PMAX here to convert from uint to int 373 | # This is the same that is done on the cpp side for the homomorphic encryptions. 374 | # For the precomputation, Uint64 is needed, and for everything afterwards, int64 375 | iR = minionn.VectorInt([modulo_pmax(r) for r in client_randoms[i][0].flatten().tolist()]) 376 | _set_tensor("initial_r" + str(i), iR, list(client_randoms[i][0].shape)) 377 | 378 | iV = minionn.VectorInt([modulo_pmax(v) for v in client_randoms[i][1].flatten().tolist()]) 379 | _set_tensor("v" + str(i), iV, list(client_randoms[i][1].shape)) 380 | 381 | logger.debug(" -- r" + str(i) + " now has size " + str(client_randoms[i][0].shape) + " v" + str(i) + " size " + str(client_randoms[i][1].shape)) 382 | 383 | # Generate assembled uint vectors 384 | uR = minionn.VectorUInt(assembled_R) 385 | uV = minionn.VectorUInt(assembled_V) 386 | 387 | # Use them for the client precomputation 388 | encU = minionn.client_precomputation(encW, uR, uV) 389 | 390 | logger.info("Client Precomputation success.") 391 | 392 | # return U 393 | return encU 394 | 395 | def extract_sum(inp, dimensions, offset): 396 | """ 397 | Extracts the sum of the tensor of shape dimension (beginning 398 | at offset) and returns it. 399 | dim is assuming a list for [m, n, o] for the matrix calculation mxn * nxo 400 | This is equal to crow, ccol, srow where server matrix gets multiplied with client matrix 401 | """ 402 | tmp = minionn.VectorInt([]) 403 | minionn.extract_sum(inp, tmp, 404 | dimensions[1], dimensions[2], dimensions[0], 405 | offset) 406 | 407 | logger.debug("Extract sum: Extracted with offset " + str(offset)+ " and dimensions " + str(dimensions)) 408 | if config.debug_mode: 409 | logger.debug("Extracted U starts with " + str(list(tmp)[:config.debug_print_length_long]) + " and ends with " + str(list(tmp)[-config.debug_print_length_long:])) 410 | return tmp 411 | 412 | def vector_add(vec_a, vec_b): 413 | cpp_a = minionn.VectorInt(vec_a) 414 | cpp_b = minionn.VectorInt(vec_b) 415 | return minionn.vector_add(cpp_a, cpp_b) 416 | 417 | def vector_sub(vec_a, vec_b): 418 | cpp_a = minionn.VectorInt(vec_a) 419 | cpp_b = minionn.VectorInt(vec_b) 420 | return minionn.vector_sub(cpp_a, cpp_b) 421 | 422 | def vector_floor(vector): 423 | minionn.vector_floor(vector, config.fractional_base) 424 | 425 | def matrix_mult(inp, outp, instance_u, order_w_x = True): 426 | """ 427 | calculates W*x + U + b or 428 | if order_w_x is False, calculates (W' * X' + U)' + b 429 | """ 430 | tmp = minionn.VectorInt([]) 431 | 432 | cpp_w = _get_tensor(inp[0]) 433 | cpp_x = _get_tensor(inp[1]) 434 | cpp_b = _get_tensor(inp[2]) 435 | 436 | my_outp = outp 437 | 438 | # Calculate dimensions as a [m,n,o] list 439 | dims = [ 440 | _get_tensor_dim(inp[0])[0], #first dimension of w 441 | _get_tensor_dim(inp[0])[1], #second dim of w 442 | _get_tensor_dim(inp[1])[1] # second dim of x 443 | ] 444 | 445 | 446 | if config.debug_mode: 447 | logger.debug("U is " + str(instance_u)) 448 | 449 | b_string = "(b ROW wise)" 450 | if not order_w_x: 451 | b_string = "(b COLUMN wise)" 452 | 453 | logger.debug("Performing cpp matrix multiplication " + b_string + " with " 454 | + str(inp) + " to " + my_outp + " with the following dimensions " 455 | + str(_get_tensor_dim(inp[0])[0]) + "x" + str(_get_tensor_dim(inp[0])[1]) 456 | + " * " + str(_get_tensor_dim(inp[1])[0]) + "x" + str(_get_tensor_dim(inp[1])[1]) 457 | ) 458 | 459 | #Compute based on order of W and x 460 | if order_w_x: 461 | # Normal order, calculate W*x + U + b 462 | minionn.matrixmul(cpp_w,cpp_b,instance_u,cpp_x,dims[1],dims[2],dims[0],tmp) 463 | # Dimensions are the ones that we were given: 464 | # first dimension of w and second dim of x 465 | else: 466 | # Reversed order: (W' * X' + U + b')' 467 | minionn.matrixmul_b_columns(cpp_w,cpp_b,instance_u,cpp_x,dims[1],dims[2],dims[0],tmp) 468 | 469 | # As we received W' and x', the output now has the dimensions 470 | # of the first dimension of x and the second dimension of w (both are transposed) 471 | # Also, keep in mind that the order is reversed because we store the transposed 472 | # of the final output. 473 | my_outp = my_outp + "T" 474 | 475 | # Floor the resulting vector to reverse the fractional shifting and store it 476 | minionn.vector_floor(tmp, pow(config.fractional_base, 1) * config.fractional_downscale) 477 | 478 | _set_tensor(my_outp, tmp, [dims[0], dims[2]]) 479 | 480 | def matrix_mult_client(inp, outp, v_in, order_w_x = True): 481 | tmp = minionn.VectorInt([]) 482 | cpp_v = _get_tensor(v_in) 483 | 484 | # Calculate dimensions as a [m,n,o] list 485 | dims = [ 486 | _get_tensor_dim(inp[0])[0], #first dimension of w 487 | _get_tensor_dim(inp[0])[1], #second dim of w 488 | _get_tensor_dim(inp[1])[1] # second dim of x 489 | ] 490 | 491 | logger.debug("Client Gemm with dimensions " + str(dims) + " and actual dims " + str(_get_tensor_dim(inp[0])) + " and " + str(_get_tensor_dim(inp[1]))) 492 | 493 | #Compute and store 494 | minionn.matrixmul_simple(cpp_v,dims[1],dims[2],dims[0],tmp) 495 | 496 | # Floor the resulting vector to reverse the fractional shifting 497 | minionn.vector_floor(tmp, pow(config.fractional_base, 1) * config.fractional_downscale) 498 | 499 | my_outp = outp 500 | # If we have a reversed order of operations, we also need to 501 | # transpose v!! 502 | if not order_w_x: 503 | my_outp += "T" 504 | 505 | _set_tensor(my_outp, tmp, [dims[0], dims[2]]) 506 | 507 | def relu_client(inp, outp, responsible_r): 508 | # Prepare vectors 509 | xc = _get_tensor(inp) 510 | yc = minionn.VectorInt([]) 511 | dims = _get_tensor_dim(inp) 512 | rc = _get_tensor(responsible_r) 513 | 514 | # Calculate num of elements in vector 515 | num = reduce(mul, dims, 1) 516 | 517 | # Execute relu 518 | minionn.relu_client(num, xc, rc, yc) 519 | 520 | # Store ys. Dims did not change 521 | _set_tensor(outp, yc, dims) 522 | 523 | def relu_server(inp, outp): 524 | # Prepare vectors 525 | xs = _get_tensor(inp) 526 | ys = minionn.VectorInt([]) 527 | dims = _get_tensor_dim(inp) 528 | 529 | # Calculate num of elements in vector 530 | num = reduce(mul, dims, 1) 531 | 532 | # Execute relu 533 | minionn.relu_server(num, xs, ys) 534 | 535 | # Store ys. Dims did not change 536 | _set_tensor(outp, ys, dims) 537 | -------------------------------------------------------------------------------- /common/minionn_onnx_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: minionn-onnx.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf import descriptor_pb2 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | import onnx as onnx__pb2 17 | 18 | 19 | DESCRIPTOR = _descriptor.FileDescriptor( 20 | name='minionn-onnx.proto', 21 | package='minionn', 22 | syntax='proto3', 23 | serialized_pb=_b('\n\x12minionn-onnx.proto\x12\x07minionn\x1a\nonnx.proto\"A\n\x15PrecomputationRequest\x12\x15\n\rrequest_model\x18\x01 \x01(\x08\x12\x11\n\trequest_w\x18\x02 \x01(\x08\"D\n\x16PrecomputationResponse\x12\x1f\n\x05model\x18\x01 \x01(\x0b\x32\x10.onnx.ModelProto\x12\t\n\x01w\x18\x02 \x03(\x0c\"+\n\x12\x43omputationRequest\x12\t\n\x01u\x18\x01 \x03(\x0c\x12\n\n\x02xs\x18\x02 \x03(\x03\"!\n\x13\x43omputationResponse\x12\n\n\x02ys\x18\x01 \x03(\x03\x32\xaa\x01\n\x07MinioNN\x12S\n\x0ePrecomputation\x12\x1e.minionn.PrecomputationRequest\x1a\x1f.minionn.PrecomputationResponse\"\x00\x12J\n\x0b\x43omputation\x12\x1b.minionn.ComputationRequest\x1a\x1c.minionn.ComputationResponse\"\x00\x62\x06proto3') 24 | , 25 | dependencies=[onnx__pb2.DESCRIPTOR,]) 26 | 27 | 28 | 29 | 30 | _PRECOMPUTATIONREQUEST = _descriptor.Descriptor( 31 | name='PrecomputationRequest', 32 | full_name='minionn.PrecomputationRequest', 33 | filename=None, 34 | file=DESCRIPTOR, 35 | containing_type=None, 36 | fields=[ 37 | _descriptor.FieldDescriptor( 38 | name='request_model', full_name='minionn.PrecomputationRequest.request_model', index=0, 39 | number=1, type=8, cpp_type=7, label=1, 40 | has_default_value=False, default_value=False, 41 | message_type=None, enum_type=None, containing_type=None, 42 | is_extension=False, extension_scope=None, 43 | options=None, file=DESCRIPTOR), 44 | _descriptor.FieldDescriptor( 45 | name='request_w', full_name='minionn.PrecomputationRequest.request_w', index=1, 46 | number=2, type=8, cpp_type=7, label=1, 47 | has_default_value=False, default_value=False, 48 | message_type=None, enum_type=None, containing_type=None, 49 | is_extension=False, extension_scope=None, 50 | options=None, file=DESCRIPTOR), 51 | ], 52 | extensions=[ 53 | ], 54 | nested_types=[], 55 | enum_types=[ 56 | ], 57 | options=None, 58 | is_extendable=False, 59 | syntax='proto3', 60 | extension_ranges=[], 61 | oneofs=[ 62 | ], 63 | serialized_start=43, 64 | serialized_end=108, 65 | ) 66 | 67 | 68 | _PRECOMPUTATIONRESPONSE = _descriptor.Descriptor( 69 | name='PrecomputationResponse', 70 | full_name='minionn.PrecomputationResponse', 71 | filename=None, 72 | file=DESCRIPTOR, 73 | containing_type=None, 74 | fields=[ 75 | _descriptor.FieldDescriptor( 76 | name='model', full_name='minionn.PrecomputationResponse.model', index=0, 77 | number=1, type=11, cpp_type=10, label=1, 78 | has_default_value=False, default_value=None, 79 | message_type=None, enum_type=None, containing_type=None, 80 | is_extension=False, extension_scope=None, 81 | options=None, file=DESCRIPTOR), 82 | _descriptor.FieldDescriptor( 83 | name='w', full_name='minionn.PrecomputationResponse.w', index=1, 84 | number=2, type=12, cpp_type=9, label=3, 85 | has_default_value=False, default_value=[], 86 | message_type=None, enum_type=None, containing_type=None, 87 | is_extension=False, extension_scope=None, 88 | options=None, file=DESCRIPTOR), 89 | ], 90 | extensions=[ 91 | ], 92 | nested_types=[], 93 | enum_types=[ 94 | ], 95 | options=None, 96 | is_extendable=False, 97 | syntax='proto3', 98 | extension_ranges=[], 99 | oneofs=[ 100 | ], 101 | serialized_start=110, 102 | serialized_end=178, 103 | ) 104 | 105 | 106 | _COMPUTATIONREQUEST = _descriptor.Descriptor( 107 | name='ComputationRequest', 108 | full_name='minionn.ComputationRequest', 109 | filename=None, 110 | file=DESCRIPTOR, 111 | containing_type=None, 112 | fields=[ 113 | _descriptor.FieldDescriptor( 114 | name='u', full_name='minionn.ComputationRequest.u', index=0, 115 | number=1, type=12, cpp_type=9, label=3, 116 | has_default_value=False, default_value=[], 117 | message_type=None, enum_type=None, containing_type=None, 118 | is_extension=False, extension_scope=None, 119 | options=None, file=DESCRIPTOR), 120 | _descriptor.FieldDescriptor( 121 | name='xs', full_name='minionn.ComputationRequest.xs', index=1, 122 | number=2, type=3, cpp_type=2, label=3, 123 | has_default_value=False, default_value=[], 124 | message_type=None, enum_type=None, containing_type=None, 125 | is_extension=False, extension_scope=None, 126 | options=None, file=DESCRIPTOR), 127 | ], 128 | extensions=[ 129 | ], 130 | nested_types=[], 131 | enum_types=[ 132 | ], 133 | options=None, 134 | is_extendable=False, 135 | syntax='proto3', 136 | extension_ranges=[], 137 | oneofs=[ 138 | ], 139 | serialized_start=180, 140 | serialized_end=223, 141 | ) 142 | 143 | 144 | _COMPUTATIONRESPONSE = _descriptor.Descriptor( 145 | name='ComputationResponse', 146 | full_name='minionn.ComputationResponse', 147 | filename=None, 148 | file=DESCRIPTOR, 149 | containing_type=None, 150 | fields=[ 151 | _descriptor.FieldDescriptor( 152 | name='ys', full_name='minionn.ComputationResponse.ys', index=0, 153 | number=1, type=3, cpp_type=2, label=3, 154 | has_default_value=False, default_value=[], 155 | message_type=None, enum_type=None, containing_type=None, 156 | is_extension=False, extension_scope=None, 157 | options=None, file=DESCRIPTOR), 158 | ], 159 | extensions=[ 160 | ], 161 | nested_types=[], 162 | enum_types=[ 163 | ], 164 | options=None, 165 | is_extendable=False, 166 | syntax='proto3', 167 | extension_ranges=[], 168 | oneofs=[ 169 | ], 170 | serialized_start=225, 171 | serialized_end=258, 172 | ) 173 | 174 | _PRECOMPUTATIONRESPONSE.fields_by_name['model'].message_type = onnx__pb2.ModelProto 175 | DESCRIPTOR.message_types_by_name['PrecomputationRequest'] = _PRECOMPUTATIONREQUEST 176 | DESCRIPTOR.message_types_by_name['PrecomputationResponse'] = _PRECOMPUTATIONRESPONSE 177 | DESCRIPTOR.message_types_by_name['ComputationRequest'] = _COMPUTATIONREQUEST 178 | DESCRIPTOR.message_types_by_name['ComputationResponse'] = _COMPUTATIONRESPONSE 179 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 180 | 181 | PrecomputationRequest = _reflection.GeneratedProtocolMessageType('PrecomputationRequest', (_message.Message,), dict( 182 | DESCRIPTOR = _PRECOMPUTATIONREQUEST, 183 | __module__ = 'minionn_onnx_pb2' 184 | # @@protoc_insertion_point(class_scope:minionn.PrecomputationRequest) 185 | )) 186 | _sym_db.RegisterMessage(PrecomputationRequest) 187 | 188 | PrecomputationResponse = _reflection.GeneratedProtocolMessageType('PrecomputationResponse', (_message.Message,), dict( 189 | DESCRIPTOR = _PRECOMPUTATIONRESPONSE, 190 | __module__ = 'minionn_onnx_pb2' 191 | # @@protoc_insertion_point(class_scope:minionn.PrecomputationResponse) 192 | )) 193 | _sym_db.RegisterMessage(PrecomputationResponse) 194 | 195 | ComputationRequest = _reflection.GeneratedProtocolMessageType('ComputationRequest', (_message.Message,), dict( 196 | DESCRIPTOR = _COMPUTATIONREQUEST, 197 | __module__ = 'minionn_onnx_pb2' 198 | # @@protoc_insertion_point(class_scope:minionn.ComputationRequest) 199 | )) 200 | _sym_db.RegisterMessage(ComputationRequest) 201 | 202 | ComputationResponse = _reflection.GeneratedProtocolMessageType('ComputationResponse', (_message.Message,), dict( 203 | DESCRIPTOR = _COMPUTATIONRESPONSE, 204 | __module__ = 'minionn_onnx_pb2' 205 | # @@protoc_insertion_point(class_scope:minionn.ComputationResponse) 206 | )) 207 | _sym_db.RegisterMessage(ComputationResponse) 208 | 209 | 210 | 211 | _MINIONN = _descriptor.ServiceDescriptor( 212 | name='MinioNN', 213 | full_name='minionn.MinioNN', 214 | file=DESCRIPTOR, 215 | index=0, 216 | options=None, 217 | serialized_start=261, 218 | serialized_end=431, 219 | methods=[ 220 | _descriptor.MethodDescriptor( 221 | name='Precomputation', 222 | full_name='minionn.MinioNN.Precomputation', 223 | index=0, 224 | containing_service=None, 225 | input_type=_PRECOMPUTATIONREQUEST, 226 | output_type=_PRECOMPUTATIONRESPONSE, 227 | options=None, 228 | ), 229 | _descriptor.MethodDescriptor( 230 | name='Computation', 231 | full_name='minionn.MinioNN.Computation', 232 | index=1, 233 | containing_service=None, 234 | input_type=_COMPUTATIONREQUEST, 235 | output_type=_COMPUTATIONRESPONSE, 236 | options=None, 237 | ), 238 | ]) 239 | _sym_db.RegisterServiceDescriptor(_MINIONN) 240 | 241 | DESCRIPTOR.services_by_name['MinioNN'] = _MINIONN 242 | 243 | # @@protoc_insertion_point(module_scope) 244 | -------------------------------------------------------------------------------- /common/minionn_onnx_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | import grpc 3 | 4 | from common import minionn_onnx_pb2 as minionn__onnx__pb2 5 | 6 | 7 | class MinioNNStub(object): 8 | """The service definition. 9 | """ 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.Precomputation = channel.unary_unary( 18 | '/minionn.MinioNN/Precomputation', 19 | request_serializer=minionn__onnx__pb2.PrecomputationRequest.SerializeToString, 20 | response_deserializer=minionn__onnx__pb2.PrecomputationResponse.FromString, 21 | ) 22 | self.Computation = channel.unary_unary( 23 | '/minionn.MinioNN/Computation', 24 | request_serializer=minionn__onnx__pb2.ComputationRequest.SerializeToString, 25 | response_deserializer=minionn__onnx__pb2.ComputationResponse.FromString, 26 | ) 27 | 28 | 29 | class MinioNNServicer(object): 30 | """The service definition. 31 | """ 32 | 33 | def Precomputation(self, request, context): 34 | """Precomputation service - requests ONNX format, ~w 35 | """ 36 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 37 | context.set_details('Method not implemented!') 38 | raise NotImplementedError('Method not implemented!') 39 | 40 | def Computation(self, request, context): 41 | """Computation message - sends ~u and x_s and receives y_s 42 | """ 43 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 44 | context.set_details('Method not implemented!') 45 | raise NotImplementedError('Method not implemented!') 46 | 47 | 48 | def add_MinioNNServicer_to_server(servicer, server): 49 | rpc_method_handlers = { 50 | 'Precomputation': grpc.unary_unary_rpc_method_handler( 51 | servicer.Precomputation, 52 | request_deserializer=minionn__onnx__pb2.PrecomputationRequest.FromString, 53 | response_serializer=minionn__onnx__pb2.PrecomputationResponse.SerializeToString, 54 | ), 55 | 'Computation': grpc.unary_unary_rpc_method_handler( 56 | servicer.Computation, 57 | request_deserializer=minionn__onnx__pb2.ComputationRequest.FromString, 58 | response_serializer=minionn__onnx__pb2.ComputationResponse.SerializeToString, 59 | ), 60 | } 61 | generic_handler = grpc.method_handlers_generic_handler( 62 | 'minionn.MinioNN', rpc_method_handlers) 63 | server.add_generic_rpc_handlers((generic_handler,)) 64 | -------------------------------------------------------------------------------- /common/node_operations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/common/node_operations/__init__.py -------------------------------------------------------------------------------- /common/node_operations/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | Placeholder operation. Does nothing. 13 | 14 | Is used as a base class for the other operations. 15 | 16 | Operations should be adhere to the standards defined in 17 | https://github.com/onnx/onnx/blob/master/docs/Operators.md 18 | 19 | """ 20 | import logging 21 | logger = logging.getLogger('minionn.node_operations') 22 | 23 | class BaseNode(object): 24 | def __init__(self, node, input_dependent_tensors): 25 | # Store node and inputs/output 26 | self.node = node 27 | self.inp = node["input"] 28 | self.outp = node["output"][0] 29 | 30 | # We only care about the inputs to our node 31 | self.input_dependent_tensors = list(set(input_dependent_tensors) & set(self.inp)) 32 | 33 | # Store if this operation consumes an r in the MiniONN context 34 | # The idea behind this is that we need to know which operations need an r 35 | # to function. In general these are the activation functions as those 36 | # use multi party computations and the r for the next matrix multiplication 37 | # As such, a "Gemm" operation does NOT __consume__ an r. Instead, the r is 38 | # consumed before by either the input into the Gemm or by its preceding 39 | # activation function such as Relu. 40 | self._consumes_r = False 41 | 42 | #For to string, store name 43 | self._string_name = "Base Node Operation" 44 | 45 | 46 | def client(self): 47 | """ 48 | NOTE: Implement this function for the client operation. 49 | Computes the Node on the client. 50 | """ 51 | logger.info("Placeholder for Client operation.") 52 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 53 | logger.debug("We have the attributes " + str(self.node["attributes"])) 54 | logger.debug("Placeholder end") 55 | 56 | def server(self): 57 | """ 58 | NOTE: Implement this function for the server operation. 59 | Computes the Node on the server. 60 | """ 61 | logger.info("Placeholder for Server operation.") 62 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 63 | logger.debug("We have the attributes " + str(self.node["attributes"])) 64 | logger.debug("Placeholder end") 65 | 66 | def reverse_r(self, current_r): 67 | """ 68 | NOTE: Implement this function for the client. 69 | Reverses the effects this node has on MiniONN's r. 70 | Such effects might not be trivial and have to be handled with care. 71 | The problem with r is that it is random and has the shape of x 72 | AT THE TIME of the matrix multiplication. 73 | This means if the matrix multiplication actually computes on x', 74 | r needs to be transposed too (to r' then) for its usage in the 75 | activation function so that X_s is correct shape when it 76 | comes to the matrix multiplication (where r is assumed). 77 | 78 | See Gemm operator for an example of this. 79 | 80 | The default implementation does nothing and simply returns 81 | the r without changing it. 82 | """ 83 | return current_r 84 | 85 | 86 | def consumes_r(self): 87 | return self._consumes_r 88 | 89 | def __str__(self): 90 | return self._string_name + ". Inputs are " + str(self.inp) + " while output is into " + str(self.outp) 91 | -------------------------------------------------------------------------------- /common/node_operations/gemm.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | Operator node for Gemm. 13 | Adhering to https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm 14 | 15 | Gemm 16 | 17 | General Matrix multiplication: 18 | https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 19 | Compute Y = alpha * A * B + beta * C, where input tensor A has 20 | dimension (M X K) , input tensor B has dimension (K X N), 21 | input tensor C and output tensor Y have dimension (M X N). 22 | If attribute broadcast is non-zero, input tensor C will be broadcasted 23 | to match the dimension requirement. A will be transposed before 24 | doing the computation if attribute transA is non-zero, same for B and transB. 25 | Version 26 | This version of the operator has been available since version 6 of the 27 | default ONNX operator set. 28 | 29 | Other versions of this operator: Gemm-1 30 | Attributes 31 | 32 | alpha : float 33 | Scalar multiplier for the product of input tensors A * B 34 | beta : float 35 | Scalar multiplier for input tensor C 36 | broadcast : int 37 | Whether C should be broadcasted 38 | transA : int 39 | Whether A should be transposed 40 | transB : int 41 | Whether B should be transposed 42 | 43 | Inputs 44 | 45 | A : T 46 | Input tensor A 47 | B : T 48 | Input tensor B 49 | C : T 50 | Input tensor C, can be inplace. 51 | 52 | Outputs 53 | 54 | Y : T 55 | Output tensor. 56 | 57 | Type Constraints 58 | 59 | T : tensor(float16), tensor(float), tensor(double) 60 | Constrain input and output types to float tensors. 61 | 62 | 63 | NOTE: Operator currently only supports transA and transB. Alpha, Beta, Broadcast are ignored. 64 | """ 65 | from .common import BaseNode 66 | from common import minionn_helper 67 | from common import config 68 | import logging 69 | logger = logging.getLogger('minionn.node_operations') 70 | 71 | class GeneralMatrixMultiplicationNode(BaseNode): 72 | def __init__(self, node, input_dependent_tensors): 73 | super(GeneralMatrixMultiplicationNode, self).__init__(node, input_dependent_tensors) 74 | 75 | # Get dimensions for this Gemm 76 | self.dim_w = minionn_helper.get_cpp_tensor_dim(self.inp[0]) 77 | self.dim_x = minionn_helper.get_cpp_tensor_dim(self.inp[1]) 78 | 79 | self.dim_m = self.dim_w[0] 80 | self.dim_n = self.dim_w[1] 81 | self.dim_n_c = self.dim_x[0] 82 | self.dim_o = self.dim_x[1] 83 | 84 | self._broadcast_active = False 85 | 86 | logger.debug("Gemm start. Dimensions are " + str(self.get_dimensions()) + " with inputs " + str(self.inp)) 87 | 88 | # Analyze and parse attributes 89 | attribute_map = { 90 | "alpha": self._alpha, 91 | "beta": self._beta, 92 | "transA": self._transA, 93 | "transB": self._transB, 94 | "broadcast": self._broadcast 95 | } 96 | for a in self.node["attributes"]: 97 | attribute_map[a["name"]](a) 98 | 99 | logger.debug("Gemm attribute " + a["name"] + ". Dimensions are " + str(self.get_dimensions()) + " with inputs " + str(self.inp)) 100 | 101 | # Sanity check if dimensions match 102 | if(self.dim_n != self.dim_n_c): 103 | logger.error( 104 | "Matrix multiplication: Dimensions do not match! " 105 | + "They are: [" + str(self.dim_m) + "," + str(self.dim_n) + "] " 106 | + "[" + str(self.dim_n_c) + "," + str(self.dim_o) + "] " 107 | ) 108 | 109 | # If broadcast is Not active, throw a warning 110 | if not self._broadcast_active: 111 | logger.error("Broadcast is not active but we are broadcasting anyways! If you do not want broadcast, check the Matrix Multiplication functions.") 112 | 113 | # Already create the output stub with correct dimensions 114 | # The actual output is not affected by the following potential 115 | # dimension adjustments 116 | minionn_helper.put_cpp_tensor( 117 | self.outp, 118 | None, 119 | [self.dim_m, self.dim_o] 120 | ) 121 | 122 | # Minionn needs w * x and not x * w ! 123 | # If we have W*x, calculate W*x + U + b 124 | # If we have x*w, instead calculate (W' * X' + U + b')' 125 | # (U is already calculated accordingly and needs no transpose here) 126 | if self.inp[1] in self.input_dependent_tensors: 127 | # First input is a W. We have W*x. Proceed normally 128 | logger.debug("Normal W*x + b + U Gemm") 129 | self.order_w_x = True 130 | self.w = self.inp[0] 131 | 132 | elif self.inp[0] in self.input_dependent_tensors: 133 | # First input is not a W, we have x*W. Perform alternative multiplication 134 | # calculate (W' * X' + U)' + b . In one step: (W' * X' + U + b')' 135 | logger.debug("Reversed Gemm with x*w + b. Computing (W'*x' + U + b')' instead") 136 | new_x = self.inp[0] + "T" 137 | new_w = self.inp[1] + "T" 138 | new_b = self.inp[2] 139 | self.inp = [new_w, new_x, new_b ] 140 | self.order_w_x = False 141 | self.w = new_w 142 | 143 | # Adjust dimensions 144 | # We had m x n * n x o and now have o x n * n x m 145 | # --> Swap m and o 146 | self.dim_m, self.dim_o = self.dim_o, self.dim_m 147 | else: 148 | logger.error("Both inputs to matrix multiplication depend on the model input. This is not supported by MiniONN as we cannot perform a precomputation then.") 149 | 150 | logger.debug("Operation Gemm will operate on " 151 | + str(self.inp) + " to " + self.outp + " with the following dimensions " 152 | + str(self.dim_m) + "x" + str(self.dim_n) 153 | + " * " + str(self.dim_n_c) + "x" + str(self.dim_o) 154 | ) 155 | 156 | #For to string, store name 157 | self._string_name = "Gemm" 158 | 159 | def set_u(self, u): 160 | self.u = u 161 | 162 | def set_gemm_count(self, count): 163 | self.gemm_count = count 164 | 165 | def get_w(self): 166 | return self.w 167 | 168 | def get_dimensions(self): 169 | """ 170 | Returns the __actual__ dimensions of this Gemm. 171 | This accounts for: 172 | - TransA, TransB 173 | - Wx or xW situation 174 | """ 175 | return [self.dim_m, self.dim_n, self.dim_o] 176 | 177 | def has_order_w_x(self): 178 | return self.order_w_x 179 | 180 | def server(self): 181 | logger.info("Performing Gemm with " 182 | + str(self.inp) + " to " + self.outp + " with the following dimensions " 183 | + str(self.dim_m) + "x" + str(self.dim_n) 184 | + " * " + str(self.dim_n_c) + "x" + str(self.dim_o) 185 | ) 186 | 187 | minionn_helper.matrix_mult(self.inp, self.outp, self.u, order_w_x=self.order_w_x) 188 | 189 | 190 | def client(self): 191 | logger.info("Performing Client Gemm with " + str(self.inp) + " to " + self.outp ) 192 | 193 | minionn_helper.matrix_mult_client(self.inp, self.outp, "v" + str(self.gemm_count), order_w_x=self.order_w_x) 194 | 195 | def reverse_r(self, current_r): 196 | """ 197 | A reversed matrix multiplication means that due to MiniONN, we transpose 198 | both inputs and reverse their order (as MiniONN expects W*x) 199 | For r, this means that we need to transpose it for its earlier use 200 | in the activation function/client start. 201 | """ 202 | if self.order_w_x: 203 | logger.debug("Gemm reverse r: NOT reversing r. Returning " + current_r) 204 | return current_r 205 | else: 206 | logger.debug("Gemm reverse r: REVERSING r. Returning " + current_r +"T") 207 | return current_r + "T" 208 | 209 | def _alpha(self, attribute): 210 | # Throw error if alpha value is not 1 (not supported currently) 211 | if attribute["value"] != 1.0: 212 | logger.error("Gemm attribute alpha is not 1. This is unsupported. Ignoring alpha.") 213 | 214 | def _beta(self, attribute): 215 | # Throw error if beta value is not 1 (not supported currently) 216 | if attribute["value"] != 1.0: 217 | logger.error("Gemm attribute beta is not 1. This is unsupported. Ignoring beta.") 218 | 219 | def _transA(self, attribute): 220 | if attribute["value"] == 1: 221 | # Transpose A (Deferred transpose 222 | # -> Transpose is executed when tensor is accessed) 223 | # Store the transposed vector as input and swap dimensions 224 | self.inp[0] = self.inp[0] + "T" 225 | self.dim_m, self.dim_n = self.dim_n, self.dim_m 226 | 227 | def _transB(self, attribute): 228 | if attribute["value"] == 1: 229 | # Transpose B (Deferred transpose 230 | # -> Transpose is executed when tensor is accessed) 231 | # Store the transposed vector as input and swap dimensions 232 | self.inp[1] = self.inp[1] + "T" 233 | self.dim_o, self.dim_n_c = self.dim_n_c, self.dim_o 234 | 235 | def _broadcast(self, attribute): 236 | # Currently, broadcasting is always on (we assume a column vector and add it to the matrix) 237 | # If you wish to implement this properly, change b here and change the matrix multiplication code that is called 238 | if attribute["value"] == 1: 239 | self._broadcast_active = True 240 | 241 | 242 | -------------------------------------------------------------------------------- /common/node_operations/log.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | Placeholder operation for Log. Does nothing. 13 | 14 | Log is currently not supported by MiniONN and is ignored! 15 | 16 | """ 17 | from .common import BaseNode 18 | from common import minionn_helper 19 | import logging 20 | logger = logging.getLogger('minionn.node_operations') 21 | 22 | class LogNode(BaseNode): 23 | def __init__(self, node, input_dependent_tensors): 24 | super(LogNode, self).__init__(node, input_dependent_tensors) 25 | 26 | # Input is a single vector (and not several) 27 | self.inp = self.inp[0] 28 | 29 | # Store the output tensor with original dimensions (they dont change) 30 | dims = minionn_helper.get_cpp_tensor_dim(self.inp) 31 | minionn_helper.put_cpp_tensor( 32 | self.outp, 33 | None, 34 | dims 35 | ) 36 | 37 | logger.debug("Operation Log will make " 38 | + minionn_helper.print_tensor(self.inp) 39 | + " into tensor " + self.outp + " of shape " 40 | + str(dims) 41 | ) 42 | 43 | #For to string, store name 44 | self._string_name = "Log" 45 | 46 | def server(self): 47 | """ 48 | Placeholder function to simulate execution of a node 49 | """ 50 | logger.info("Log operation. Redirected input to output. Did nothing.") 51 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 52 | logger.debug("We have the attributes " + str(self.node["attributes"])) 53 | logger.debug("Log end") 54 | 55 | # Copy input tensor over to output 56 | minionn_helper.copy_tensor(self.inp, self.outp) 57 | 58 | def client(self): 59 | """ 60 | Placeholder function to simulate execution of a node 61 | """ 62 | if minionn_helper.tensor_has_values(self.inp): 63 | # If this tensor exists, simply reshape it. 64 | self.server() 65 | else: 66 | logger.info("Log operation did nothing.") 67 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 68 | logger.debug("We have the attributes " + str(self.node["attributes"])) 69 | logger.debug("Log end") 70 | 71 | # If this tensor does not exist here, do nothing 72 | # We already stored a stub tensor during initialization -------------------------------------------------------------------------------- /common/node_operations/relu.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | Relu operation 13 | https://github.com/onnx/onnx/blob/master/docs/Operators.md#Relu 14 | 15 | Relu takes one input data (Tensor) and produces one output data (Tensor) 16 | where the rectified linear function, y = max(0, x), is applied to 17 | the tensor elementwise. 18 | 19 | Version 20 | This version of the operator has been available since version 6 of 21 | the default ONNX operator set. 22 | 23 | Other versions of this operator: Relu-1 24 | Inputs 25 | 26 | X : T 27 | Input tensor 28 | 29 | Outputs 30 | 31 | Y : T 32 | Output tensor 33 | 34 | Type Constraints 35 | 36 | T : tensor(float16), tensor(float), tensor(double) 37 | Constrain input and output types to float tensors. 38 | """ 39 | from .common import BaseNode 40 | from common import minionn_helper 41 | import logging 42 | logger = logging.getLogger('minionn.node_operations') 43 | 44 | class ReluNode(BaseNode): 45 | def __init__(self, node, input_dependent_tensors): 46 | super(ReluNode, self).__init__(node, input_dependent_tensors) 47 | 48 | # Relu is an activation function that requires an R 49 | self._consumes_r = True 50 | 51 | # Input is a single tensor (and not several) 52 | self.inp = self.inp[0] 53 | 54 | # Store the output tensor with original dimensions (they dont change) 55 | dims = minionn_helper.get_cpp_tensor_dim(self.inp) 56 | minionn_helper.put_cpp_tensor( 57 | self.outp, 58 | None, 59 | dims 60 | ) 61 | 62 | logger.debug("Operation Relu will make " 63 | + minionn_helper.print_tensor(self.inp) 64 | + " into tensor " + self.outp + " of shape " 65 | + str(dims) 66 | ) 67 | 68 | #For to_string, store name 69 | self._string_name = "Relu" 70 | 71 | def set_gemm_count(self, count): 72 | self.gemm_count = count 73 | 74 | def server(self): 75 | """ 76 | Relu server simply calls the cpp MPC code 77 | """ 78 | logger.info("Relu operation. Server version.") 79 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 80 | logger.debug("We have the attributes " + str(self.node["attributes"])) 81 | 82 | minionn_helper.relu_server(self.inp, self.outp) 83 | logger.debug("Relu end") 84 | 85 | def client(self): 86 | """ 87 | Relu client uses the R of the next layer during the MPC 88 | """ 89 | logger.info("Relu operation. Client version.") 90 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 91 | logger.debug("We have the attributes " + str(self.node["attributes"])) 92 | 93 | rc = "r" + str(self.gemm_count) 94 | logger.debug("R is " + rc) 95 | minionn_helper.relu_client(self.inp, self.outp, rc) 96 | logger.debug("Relu end") -------------------------------------------------------------------------------- /common/node_operations/reshape.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | Operator node for Reshape. 13 | Adhering to https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape 14 | 15 | 16 | Reshape 17 | 18 | Reshape the input tensor similar to numpy.reshape. 19 | 20 | First input is the data tensor, second input is a shape tensor which 21 | specifies the output shape. It outputs the reshaped tensor. 22 | 23 | At most one dimension of the new shape can be -1. In this case, 24 | the value is inferred from the size of the tensor and the 25 | remaining dimensions. A dimension could also be 0, in which case 26 | the actual dimension value is unchanged (i.e. taken from the input tensor). 27 | 28 | Version 29 | This version of the operator has been available since version 5 of 30 | the default ONNX operator set. 31 | 32 | Other versions of this operator: Reshape-1 33 | Inputs 34 | 35 | data : T 36 | An input tensor. 37 | shape : tensor(int64) 38 | Specified shape for output. 39 | 40 | Outputs 41 | 42 | reshaped : T 43 | Reshaped data. 44 | 45 | Type Constraints 46 | 47 | T : tensor(float16), tensor(float), tensor(double) 48 | Constrain input and output types to float tensors. 49 | 50 | """ 51 | from .common import BaseNode 52 | import logging 53 | logger = logging.getLogger('minionn.node_operations') 54 | from common import minionn_helper 55 | 56 | from operator import mul 57 | from functools import reduce 58 | 59 | import numpy as np 60 | 61 | class ReshapeNode(BaseNode): 62 | def __init__(self, node, input_dependent_tensors): 63 | super(ReshapeNode, self).__init__(node, input_dependent_tensors) 64 | self.inp = self.inp[0] 65 | 66 | # First, calculate dimensions 67 | self.dims = list(self.node["attributes"][0]["value"]) 68 | # if it contains a -1, calculate that entry by deduction 69 | if self.dims.count(-1) > 1: 70 | logger.error("Reshape only works with at most one -1 dimension.") 71 | 72 | if -1 in self.dims: 73 | i = self.dims.index(-1) 74 | # Calculate this dimension 75 | total_size = reduce(mul, minionn_helper.get_cpp_tensor_dim(self.inp), 1) 76 | other_dims = [d for d in self.dims if d != -1] 77 | current_size = reduce(mul, other_dims, 1) 78 | self.dims[i] = int(total_size / current_size) 79 | 80 | # And store that tensor with the calculated dimensions 81 | minionn_helper.put_cpp_tensor( 82 | self.outp, 83 | None, 84 | self.dims 85 | ) 86 | 87 | logger.debug("Operation Reshape will reshape " 88 | + minionn_helper.print_tensor(self.inp) 89 | + " into tensor " + self.outp + " of shape " 90 | + str(self.dims) 91 | ) 92 | 93 | #For to string, store name 94 | self._string_name = "Reshape" 95 | 96 | def server(self): 97 | # Use numpy to reshape input 98 | # Technically, we use cpp vectors here and would not need 99 | # to touch the vector at all. But just to avoid any problems 100 | # once we change away from cpp vectors, lets use numpy and copy here. 101 | original = minionn_helper.get_cpp_tensor(self.inp) 102 | reshaped = np.reshape( 103 | original, 104 | self.node["attributes"][0]["value"] 105 | ) 106 | dims = list(reshaped.shape) 107 | 108 | logger.info("Reshaping tensor " 109 | + minionn_helper.print_tensor(self.inp) 110 | + " into tensor " + self.outp + " of shape " 111 | + str(dims) 112 | + ". Original length is " + str(len(original)) 113 | + " and resized has size " + str(len(reshaped)) 114 | ) 115 | 116 | minionn_helper.put_cpp_tensor( 117 | self.outp, 118 | reshaped.flatten().tolist(), 119 | dims 120 | ) 121 | 122 | def client(self): 123 | if minionn_helper.tensor_has_values(self.inp): 124 | # If this tensor exists, simply reshape it. 125 | self.server() 126 | else: 127 | logger.info("Client Reshape did nothing." 128 | + minionn_helper.print_tensor(self.inp) 129 | + " (I do not have this one) should be reshaped to tensor " + self.outp + " of shape " 130 | + str(self.dims) 131 | ) 132 | 133 | # If this tensor does not exist here, do nothing 134 | # We already stored a stub tensor during initialization 135 | 136 | -------------------------------------------------------------------------------- /common/node_operations/softmax.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | Placeholder operation for Softmax. Does nothing. 13 | 14 | Softmax is currently not supported by MiniONN and is ignored! 15 | 16 | """ 17 | from .common import BaseNode 18 | from common import minionn_helper 19 | import logging 20 | logger = logging.getLogger('minionn.node_operations') 21 | 22 | class SoftmaxNode(BaseNode): 23 | def __init__(self, node, input_dependent_tensors): 24 | super(SoftmaxNode, self).__init__(node, input_dependent_tensors) 25 | 26 | # Input is a single vector (and not several) 27 | self.inp = self.inp[0] 28 | 29 | # Store the output tensor with original dimensions (they dont change) 30 | dims = minionn_helper.get_cpp_tensor_dim(self.inp) 31 | minionn_helper.put_cpp_tensor( 32 | self.outp, 33 | None, 34 | dims 35 | ) 36 | 37 | logger.debug("Operation Softmax will make " 38 | + minionn_helper.print_tensor(self.inp) 39 | + " into tensor " + self.outp + " of shape " 40 | + str(dims) 41 | ) 42 | 43 | #For to string, store name 44 | self._string_name = "Softmax" 45 | 46 | def server(self): 47 | """ 48 | Placeholder function to simulate execution of a node 49 | """ 50 | logger.info("Softmax operation. Redirected input to output. Did nothing.") 51 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 52 | logger.debug("We have the attributes " + str(self.node["attributes"])) 53 | logger.debug("Softmax end") 54 | 55 | # Copy input tensor over to output 56 | minionn_helper.copy_tensor(self.inp, self.outp) 57 | 58 | def client(self): 59 | """ 60 | Placeholder function to simulate execution of a node 61 | """ 62 | if minionn_helper.tensor_has_values(self.inp): 63 | # If this tensor exists, simply reshape it. 64 | self.server() 65 | else: 66 | logger.info("Softmax operation. Did nothing.") 67 | logger.debug("Inputs are " + str(self.node["input"]) + " while output is into " + str(self.node["output"])) 68 | logger.debug("We have the attributes " + str(self.node["attributes"])) 69 | logger.debug("Softmax end") 70 | 71 | # If this tensor does not exist here, do nothing 72 | # We already stored a stub tensor during initialization -------------------------------------------------------------------------------- /common/onnx_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | import onnx.numpy_helper 13 | 14 | from . import config 15 | 16 | import logging 17 | logger = logging.getLogger('minionn.onnx_helper') 18 | 19 | def stripModelFromPrivateData(m): 20 | """ 21 | Strips the given model from all private data and returns a copy. 22 | This usually includes all tensors, i.e. all w and b. 23 | """ 24 | # Create new model and copy all from m 25 | privatizedModel = onnx.ModelProto() 26 | privatizedModel.CopyFrom(m) 27 | 28 | # Clear the tensors from the model 29 | del privatizedModel.graph.initializer[:] 30 | 31 | # Return the privatized model 32 | return privatizedModel 33 | 34 | 35 | def onnx_tensor_to_list(tensor): 36 | return onnx.numpy_helper.to_array(tensor).flatten().tolist() 37 | 38 | 39 | def retrieveTensorsFromModel(model): 40 | tensor_dict = {} 41 | for t in model.graph.initializer: 42 | tensor_dict[str(t.name)] = onnx_tensor_to_list(t) 43 | logger.debug("Parsed tensor with name " + str(t.name) + ".") 44 | if config.debug_mode: 45 | logger.debug(" It starts with " + str(tensor_dict[str(t.name)][:2])) 46 | 47 | return tensor_dict 48 | 49 | 50 | def retrieveTensorDimensionsFromModel(model): 51 | dims_dict = {} 52 | for i in model.graph.input: 53 | dimensions = [] 54 | for d in i.type.tensor_type.shape.dim: 55 | dimensions.append(d.dim_value) 56 | dims_dict[str(i.name)] = dimensions 57 | logger.debug("Tensor dimensions are:" + str(dims_dict)) 58 | 59 | return dims_dict 60 | 61 | 62 | # ONNX attribute type map as defined in the onnx protobuf file 63 | # Only the required attributes here 64 | _attr_type_map = { 65 | 1: "f", 66 | 2: "i", 67 | 3: "s", 68 | 4: "t", 69 | 7: "ints" 70 | } 71 | 72 | 73 | def retrieveNodesFromModel(model): 74 | nodes = model.graph.node 75 | nodes_list = [] 76 | for n in nodes: 77 | node_dict = {"input":n.input, 78 | "output":n.output, 79 | "operation":n.op_type, 80 | "attributes":[ 81 | {"name":a.name, 82 | "type":a.type, 83 | "value":getattr(a,_attr_type_map[a.type])} 84 | for a in n.attribute] 85 | } 86 | nodes_list.append(node_dict) 87 | 88 | logger.debug("Nodes of the network are: " + str(nodes_list)) 89 | return nodes_list 90 | 91 | 92 | def get_bs_and_ws(nodes, tensors): 93 | b_list = [] 94 | w_list = [] 95 | 96 | # Parse the nodes for any occurence of Gemm and put the inputs into b and w accordingly 97 | for n in nodes: 98 | if n["operation"] == "Gemm": 99 | inp = n["input"] 100 | # Get the two operators 101 | # In a normal W*x matrix multiplication, w is the first input 102 | w = inp[0] 103 | x = inp[1] 104 | 105 | #Figure out which one is w 106 | if w not in tensors and x in tensors: 107 | # we have x*w 108 | # minionn expects W*x 109 | # The difference is that for x*w we need to transpose w for 110 | # the precomputation phase (U) 111 | # However, in this step this does not matter as we only 112 | # use the Ws and Bs for writing into the model (see fractions) 113 | w = x 114 | else: 115 | logger.error("Retrieval of bs and ws from matrix multiplications failed! No w found:" + str(inp)) 116 | 117 | # Store them to lists 118 | w_list.append(w) 119 | b_list.append(inp[2]) 120 | 121 | return b_list, w_list 122 | 123 | 124 | def calculate_input_dependent_tensors(nodes, model_input): 125 | input_dependent_tensors = [model_input] 126 | for n in nodes: 127 | input_Set = set(input_dependent_tensors) 128 | operator_Set = set(n["input"]) 129 | set_intersection = input_Set & operator_Set 130 | 131 | if len(set_intersection) > 0: 132 | # Input of node depends on model input 133 | # Add output of node to the dependent set 134 | input_dependent_tensors.append(n["output"][0]) 135 | 136 | return input_dependent_tensors -------------------------------------------------------------------------------- /common/operation_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | # Logging 12 | import logging 13 | logger = logging.getLogger('minionn.NodeOperator') 14 | 15 | # Wrapper for cpp functions and vectors 16 | from . import minionn_helper 17 | 18 | # Helper for onnx functions 19 | from . import onnx_helper 20 | 21 | # Node operations 22 | from common.node_operations import log, gemm, relu, reshape, softmax 23 | 24 | import copy 25 | from operator import mul 26 | from functools import reduce 27 | 28 | class OperationHandler(object): 29 | """ 30 | Class that takes the nodes of the model and transforms them into 31 | node objects that will handle the execution of that layer. 32 | Is called by client and server to execute their respective version of each layer. 33 | """ 34 | 35 | """ 36 | Map of the operations named in the nodes of an ONNX file 37 | to their corresponding function 38 | Defined in: https://github.com/onnx/onnx/blob/master/docs/Operators.md 39 | 40 | Log and Softmax are not supported by MiniONN but are kept here as placeholders. 41 | This allows to run models with Softmax and Log in them without needing to change the ONNX model. 42 | """ 43 | _ONNX_OPERATIONS = { 44 | "Log": log.LogNode, # Not supported and ignored 45 | "Gemm": gemm.GeneralMatrixMultiplicationNode, 46 | "Relu": relu.ReluNode, 47 | "Reshape": reshape.ReshapeNode, 48 | "Softmax": softmax.SoftmaxNode, # Not supported and ignored 49 | } 50 | 51 | def __init__(self, nodes, model_input, simulation=False): 52 | # Use a copy of the node dictionary to prevent side effects 53 | self._nodes = copy.deepcopy(nodes) 54 | 55 | # Now, calculate which tensors are depending on the input 56 | # This is necessary to know for e.g. Gemm where the detection 57 | # of W and x (for W*x or x*W) depends on which tensors is 58 | # depending on the input 59 | input_dependent_tensors = onnx_helper.calculate_input_dependent_tensors(self._nodes, model_input) 60 | 61 | # If we are in simulation mode, log more about the model 62 | if simulation: 63 | logger.info("Tensors that depend on the model input are:" 64 | + str(input_dependent_tensors) 65 | ) 66 | 67 | # Build the order of operations 68 | self._operations = [] 69 | self.w_list = [] 70 | gemm_count = 0 71 | for n in self._nodes: 72 | new_operation = self._get_node_operator(n, input_dependent_tensors) 73 | self._operations.append(new_operation) 74 | 75 | # Now depending on the operator: 76 | # - For Gemm, store name of w for precomputation and store the index of this Gemm 77 | # - For Relu, store the index of the previous Gemm (for r detection) 78 | if type(new_operation) is gemm.GeneralMatrixMultiplicationNode: 79 | self.w_list.append((new_operation.get_w(), new_operation.get_dimensions())) 80 | new_operation.set_gemm_count(gemm_count) 81 | gemm_count += 1 82 | 83 | logger.debug("Detected W as " + str(self.w_list[-1][0]) 84 | + " with shape of gemm (m,n,o) " + str(self.w_list[-1][1])) 85 | 86 | if type(new_operation) is relu.ReluNode: 87 | new_operation.set_gemm_count(gemm_count) 88 | 89 | # Sanity check that all w's got detected 90 | # Technically, this should not happen and only happens 91 | # when a weight matrix of a Gemm is modified before being used. 92 | # Then, the input to the Gemm is different than the initializer matrix and 93 | # as such is not detected. 94 | # If you experience this error, go through your node operations and 95 | # make sure that you execute all nodes that change a weight matrix BEFORE 96 | # the MiniONN precomputation (as this requires all W's to be present) 97 | assert len(self.w_list) == gemm_count, \ 98 | "Not all w's were properly detected by the Matrix Multiplications!" 99 | 100 | # Print the order of operations of the network in simulation mode 101 | if simulation: 102 | logger.info("Network has this order:") 103 | for o in self._operations: 104 | logger.info(" - " + str(o)) 105 | logger.info("Network end.") 106 | 107 | def init_server(self, instance_u): 108 | """ 109 | Initializes the NodeOperator in server mode. 110 | """ 111 | self._is_server = True 112 | self._instance_u = instance_u 113 | 114 | # Iterate over operations and: 115 | # - for Gemm: fill in Us 116 | u_counter = 0 117 | for o in self._operations: 118 | if type(o) is gemm.GeneralMatrixMultiplicationNode: 119 | # get dimensions of o 120 | dims = o.get_dimensions() 121 | # fill in U of o 122 | this_u = minionn_helper.extract_sum( 123 | self._instance_u, 124 | dims, 125 | u_counter 126 | ) 127 | o.set_u(this_u) 128 | u_counter += reduce(mul, dims, 1) 129 | 130 | 131 | def init_client(self): 132 | """ 133 | Initialize the NodeOperator in client mode. 134 | """ 135 | self._is_server = False 136 | 137 | """ 138 | Iterate through the operations and revert all changes on R that might happen 139 | between the matrix multiplication and its activation function / beginning. 140 | This is necessary as the r might change between its first usage and 141 | the Gemm 142 | The simplest example is a transpose in the Gemm. There, the r might 143 | be used as a transposed r but for the preceding activation 144 | function it needs to not be transposed. 145 | The rs are generated as they are used in the Gemms, so we might need 146 | to change some r's here (e.g. transpose them) 147 | """ 148 | 149 | mm_counter = 0 150 | for i in range(0,len(self._operations)): 151 | o = self._operations[i] 152 | if type(o) is gemm.GeneralMatrixMultiplicationNode: 153 | # This is a Gemm. Now adjust r based on each operation 154 | #Go back to the beginning or to last consumer of r 155 | j = i 156 | current_r = "initial_r" + str(mm_counter) 157 | while j >= 0 and not self._operations[j].consumes_r(): 158 | # Call the operation to reverse its effect on r 159 | current_r = self._operations[j].reverse_r(current_r) 160 | 161 | j -= 1 162 | 163 | # Now we have reversed all effects for the r that the activation function needs 164 | # store this in xr where x is the number of the Gemm operation 165 | r_name = "r" + str(mm_counter) 166 | minionn_helper.put_cpp_tensor(r_name, 167 | minionn_helper.get_cpp_tensor(current_r), 168 | minionn_helper.get_cpp_tensor_dim(current_r)) 169 | 170 | logger.debug("Rewrote initial_r" + str(mm_counter) 171 | + " to " + current_r 172 | + " and stored it in " + r_name 173 | + ". It's shape is now " 174 | + str(minionn_helper.get_cpp_tensor_dim(r_name)) 175 | ) 176 | 177 | # increment matrix multiplication counter 178 | mm_counter += 1 179 | 180 | def get_w_list(self): 181 | return self.w_list 182 | 183 | def run_network(self, x_in, in_name, out_name): 184 | logger.info("RUNNING NETWORK") 185 | logger.debug("Desired output:" + out_name) 186 | 187 | # Put x into the tensor dictionary 188 | # it should already be in there as a dimension stub, 189 | # so take the dimension from there 190 | x_dim = minionn_helper.get_cpp_tensor_dim(in_name) 191 | minionn_helper.put_cpp_tensor(in_name, x_in, x_dim) 192 | 193 | # Execute 194 | for o in self._operations: 195 | self._execute_operator(o) 196 | minionn_helper._log_vector_dict() 197 | 198 | #We are done if we have the output 199 | if minionn_helper.has_cpp_tensor(out_name) and \ 200 | minionn_helper.tensor_has_values(out_name): 201 | # Return requested output 202 | return minionn_helper.get_cpp_tensor(out_name) 203 | 204 | # If we did not compute the output until here, we have a problem 205 | logger.error("Requested output was not calculated: " + str(out_name)) 206 | 207 | def _get_node_operator(self, node, input_dependent_tensors): 208 | """ 209 | Returns a new node that is registered as responsible for the given ONNX operator. 210 | """ 211 | op_type = node["operation"] 212 | if op_type in self._ONNX_OPERATIONS: 213 | return self._ONNX_OPERATIONS[op_type](node, input_dependent_tensors) 214 | else: 215 | raise TypeError("ONNX node of type {} is not supported.".format(op_type)) 216 | 217 | def _execute_operator(self, operator): 218 | """ 219 | Run a given node operator 220 | The executed function depends if we are server or client 221 | """ 222 | if self._is_server: 223 | operator.server() 224 | else: 225 | operator.client() -------------------------------------------------------------------------------- /lib/Makefile: -------------------------------------------------------------------------------- 1 | #Makefile for Miracl, ABY, and SEAL Libraries 2 | 3 | 4 | #relative project folders for ABY 5 | BIN=bin 6 | SRC=ABY/src 7 | CORE=${SRC}/abycore 8 | ARCHIVE_FILE=libaby.a 9 | 10 | # Create bin dir 11 | $(shell mkdir -p $(BIN)) 12 | 13 | # all source files and corresponding object files in abycore 14 | SOURCES_CORE := $(shell find ${CORE} -type f -name '*.cpp' -not -path '*/ENCRYPTO_utils/miracl_lib/*' -not -path '*/ENCRYPTO_utils/Miracl/*' -not -path '*/ot/external/*') 15 | OBJECTS_CORE := $(SOURCES_CORE:.cpp=.o) 16 | #OBJECTS_CORE := $(shell find ${CORE} -type f -name '*.o' -not -path '*/ENCRYPTO_utils/miracl_lib/*' -not -path '*/ot/external/*') 17 | 18 | 19 | # directory for the Miracl submodule and library 20 | MIRACL_LIB_DIR=${CORE}/ENCRYPTO_utils/miracl_lib 21 | SOURCES_MIRACL= ${MIRACL_LIB_DIR}/mrcore.c ${MIRACL_LIB_DIR}/mrarth0.c ${MIRACL_LIB_DIR}/mrarth1.c ${MIRACL_LIB_DIR}/mrarth2.c ${MIRACL_LIB_DIR}/mralloc.c ${MIRACL_LIB_DIR}/mrsmall.c ${MIRACL_LIB_DIR}/mrio1.c ${MIRACL_LIB_DIR}/mrio2.c ${MIRACL_LIB_DIR}/mrgcd.c ${MIRACL_LIB_DIR}/mrjack.c ${MIRACL_LIB_DIR}/mrxgcd.c ${MIRACL_LIB_DIR}/mrarth3.c ${MIRACL_LIB_DIR}/mrbits.c ${MIRACL_LIB_DIR}/mrrand.c ${MIRACL_LIB_DIR}/mrprime.c ${MIRACL_LIB_DIR}/mrcrt.c ${MIRACL_LIB_DIR}/mrscrt.c ${MIRACL_LIB_DIR}/mrmonty.c ${MIRACL_LIB_DIR}/mrpower.c ${MIRACL_LIB_DIR}/mrsroot.c ${MIRACL_LIB_DIR}/mrcurve.c ${MIRACL_LIB_DIR}/mrfast.c ${MIRACL_LIB_DIR}/mrshs.c ${MIRACL_LIB_DIR}/mrshs256.c ${MIRACL_LIB_DIR}/mrshs512.c ${MIRACL_LIB_DIR}/mrsha3.c ${MIRACL_LIB_DIR}/mrfpe.c ${MIRACL_LIB_DIR}/mraes.c ${MIRACL_LIB_DIR}/mrgcm.c ${MIRACL_LIB_DIR}/mrlucas.c ${MIRACL_LIB_DIR}/mrzzn2.c ${MIRACL_LIB_DIR}/mrzzn2b.c ${MIRACL_LIB_DIR}/mrzzn3.c ${MIRACL_LIB_DIR}/mrzzn4.c ${MIRACL_LIB_DIR}/mrecn2.c ${MIRACL_LIB_DIR}/mrstrong.c ${MIRACL_LIB_DIR}/mrbrick.c ${MIRACL_LIB_DIR}/mrebrick.c ${MIRACL_LIB_DIR}/mrec2m.c ${MIRACL_LIB_DIR}/mrgf2m.c ${MIRACL_LIB_DIR}/mrflash.c ${MIRACL_LIB_DIR}/mrfrnd.c ${MIRACL_LIB_DIR}/mrdouble.c ${MIRACL_LIB_DIR}/mrround.c ${MIRACL_LIB_DIR}/mrbuild.c ${MIRACL_LIB_DIR}/mrflsh1.c ${MIRACL_LIB_DIR}/mrpi.c ${MIRACL_LIB_DIR}/mrflsh2.c ${MIRACL_LIB_DIR}/mrflsh3.c ${MIRACL_LIB_DIR}/mrflsh4.c ${MIRACL_LIB_DIR}/mrmuldv.c 22 | SOURCES_MIRACL_CPP= ${MIRACL_LIB_DIR}/big.cpp ${MIRACL_LIB_DIR}/zzn.cpp ${MIRACL_LIB_DIR}/ecn.cpp ${MIRACL_LIB_DIR}/ec2.cpp ${MIRACL_LIB_DIR}/flash.cpp ${MIRACL_LIB_DIR}/crt.cpp 23 | OBJECTS_MIRACL=$(SOURCES_MIRACL:.c=.o) 24 | OBJECTS_MIRACL_CPP=$(SOURCES_MIRACL_CPP:.cpp=.o) 25 | 26 | OTEXT_DIR=${CORE}/ot 27 | OTEXT_SUB_DIR=${OTEXT_DIR}/external/ot 28 | OT_SUBDIR_FILES=${OTEXT_SUB_DIR}/baseOT.h ${OTEXT_SUB_DIR}/iknp-ot-ext-rec.cpp ${OTEXT_SUB_DIR}/iknp-ot-ext-rec.h ${OTEXT_SUB_DIR}/iknp-ot-ext-snd.cpp ${OTEXT_SUB_DIR}/iknp-ot-ext-snd.h ${OTEXT_SUB_DIR}/naor-pinkas.cpp ${OTEXT_SUB_DIR}/naor-pinkas.h ${OTEXT_SUB_DIR}/ot-ext.cpp ${OTEXT_SUB_DIR}/ot-ext.h ${OTEXT_SUB_DIR}/ot-ext-snd.cpp ${OTEXT_SUB_DIR}/ot-ext-snd.h ${OTEXT_SUB_DIR}/ot-ext-rec.cpp ${OTEXT_SUB_DIR}/ot-ext-rec.h ${OTEXT_SUB_DIR}/xormasking.h ${OTEXT_SUB_DIR}/maskingfunction.h ${OTEXT_SUB_DIR}/kk-ot-ext-snd.h ${OTEXT_SUB_DIR}/kk-ot-ext-snd.cpp ${OTEXT_SUB_DIR}/kk-ot-ext-rec.h ${OTEXT_SUB_DIR}/kk-ot-ext-rec.cpp ${OTEXT_SUB_DIR}/kk-ot-ext.h 29 | OT_FILES=${OTEXT_DIR}/baseOT.h ${OTEXT_DIR}/iknp-ot-ext-rec.cpp ${OTEXT_DIR}/iknp-ot-ext-rec.h ${OTEXT_DIR}/iknp-ot-ext-snd.cpp ${OTEXT_DIR}/iknp-ot-ext-snd.h ${OTEXT_DIR}/naor-pinkas.cpp ${OTEXT_DIR}/naor-pinkas.h ${OTEXT_DIR}/ot-ext.cpp ${OTEXT_DIR}/ot-ext.h ${OTEXT_DIR}/ot-ext-snd.cpp ${OTEXT_DIR}/ot-ext-snd.h ${OTEXT_DIR}/ot-ext-rec.cpp ${OTEXT_DIR}/ot-ext-rec.h ${OTEXT_DIR}/xormasking.h ${OTEXT_DIR}/maskingfunction.h ${OTEXT_DIR}/kk-ot-ext-snd.h ${OTEXT_DIR}/kk-ot-ext-snd.cpp ${OTEXT_DIR}/kk-ot-ext-rec.h ${OTEXT_DIR}/kk-ot-ext-rec.cpp ${OTEXT_DIR}/kk-ot-ext.h 30 | 31 | 32 | # compiler settings 33 | CC=gcc -fPIC 34 | ABY_COMPILER_OPTIONS=-O2 -std=c++14 -march=native 35 | ABY_COMPILER_OPTIONS_C=-O2 -m64 36 | ARCHITECTURE = $(shell uname -m) 37 | ifeq (${ARCHITECTURE},x86_64) 38 | MIRACL_MAKE:=linux64_cpp 39 | GNU_LIB_PATH:=x86_64 40 | ARCH_LIB_PATH:=64 41 | else 42 | MIRACL_MAKE:=linux 43 | GNU_LIB_PATH:=i386 44 | ARCH_LIB_PATH:=32 45 | endif 46 | 47 | 48 | LIBRARIES=-lgmp -lgmpxx -lpthread ${CORE}/ENCRYPTO_utils/miracl_lib/miracl.a -L /usr/lib -lssl -lcrypto -lrt 49 | 50 | 51 | 52 | all: miracl miraclbuild otext core ${BIN}/${ARCHIVE_FILE} 53 | @echo "make all done." 54 | 55 | # this will create a copy of the files in src/ENCRYPTO_utils/Miracl and its sub-directories and put them into src/ENCRYPTO_utils/miracl_lib without sub-directories, then compile it 56 | #miracl: ${MIRACL_LIB_DIR}/miracl.a 57 | 58 | # copy Miracl files to a new directory (${CORE}/ENCRYPTO_utils/miracl_lib/), call the build script and delete everything except the archive, header and object files. 59 | #${MIRACL_LIB_DIR}/miracl.a: 60 | # @find ${CORE}/ENCRYPTO_utils/Miracl/ -type f -exec cp '{}' ${CORE}/ENCRYPTO_utils/miracl_lib \; 61 | # @cp miracl.makefile "${CORE}/ENCRYPTO_utils/miracl_lib/${MIRACL_MAKE}"; cd ${CORE}/ENCRYPTO_utils/miracl_lib/; bash ${MIRACL_MAKE}; 62 | 63 | miracl: FORCE 64 | @find ${CORE}/ENCRYPTO_utils/Miracl/ -type f -exec cp '{}' ${MIRACL_LIB_DIR} \; 65 | @cp ${MIRACL_LIB_DIR}/mirdef.h64 ${MIRACL_LIB_DIR}/mirdef.h 66 | @cp ${MIRACL_LIB_DIR}/mrmuldv.g64 ${MIRACL_LIB_DIR}/mrmuldv.c 67 | 68 | FORCE: ; 69 | 70 | miraclbuild: ${OBJECTS_MIRACL} ${OBJECTS_MIRACL_CPP} 71 | 72 | # this will create a copy of the files in src/abycore/ot/external/ot/ and put them into src/abycore/ot where they are then used for compiling. 73 | # it does not override files in the ot dir! 74 | otext: 75 | @cp -n ${OT_SUBDIR_FILES} ${OTEXT_DIR} 76 | 77 | 78 | core: otext ${OBJECTS_CORE} 79 | 80 | %.o:%.cpp %.h 81 | ${CC} $< ${ABY_COMPILER_OPTIONS} -c -o $@ 82 | 83 | %.o:%.cpp 84 | ${CC} $< ${ABY_COMPILER_OPTIONS} -c -o $@ 85 | 86 | %.o:%.c 87 | ${CC} $< ${ABY_COMPILER_OPTIONS_C} -c -o $@ 88 | 89 | ${BIN}/${ARCHIVE_FILE}: ${OBJECTS_MIRACL} ${OBJECTS_MIRACL_CPP} ${OBJECTS_CORE} 90 | ar rc $@ $^ 91 | 92 | .PHONY: all clean cleanall 93 | 94 | clean: 95 | rm -f ${OBJECTS_CORE} 96 | rm -f ${OBJECTS_MIRACL} 97 | rm -f ${MIRACL_LIB_DIR}/miracl.a 98 | 99 | cleanall: clean 100 | rm -f ${BIN}/*.a 101 | rmdir --ignore-fail-on-non-empty $(BIN) 102 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/lib/__init__.py -------------------------------------------------------------------------------- /lib/aby.makefile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/lib/aby.makefile -------------------------------------------------------------------------------- /lib/minionn.cpp: -------------------------------------------------------------------------------- 1 | /*cppimport 2 | <% 3 | setup_pybind11(cfg) 4 | cfg['dependencies'] = ['minionnCommon.h','minionnMath.h', 'minionnCrypto.h', 'minionnABY.h'] 5 | cfg['libraries'] = [ 6 | #SEAL library 7 | 'seal', 8 | #Aby library 9 | 'bin/aby', 10 | #Utilities required for ABY and Miracl 11 | 'ssl', 'crypto', 'gmp', 'gmpxx', 'pthread' 12 | ] 13 | cfg['sources'] = [ 14 | # Minionn sources 15 | 'minionnMath.cpp', 16 | 'minionnCrypto.cpp', 17 | 'minionnABY.cpp', 18 | ] 19 | cfg['include_dirs'] = ['SEAL', 'ABY/src/abycore', '/usr/lib'] 20 | cfg['parallel'] = True 21 | cfg['compiler_args'] = ['-std=c++17'] 22 | %> 23 | */ 24 | 25 | /* 26 | Author: Fritz Alder 27 | Copyright: 28 | Secure Systems Group, Aalto University 29 | https://ssg.aalto.fi/ 30 | 31 | This code is released under Apache 2.0 license 32 | http://www.apache.org/licenses/LICENSE-2.0 33 | */ 34 | 35 | #include "minionnCommon.h" 36 | #include "minionnMath.h" 37 | #include "minionnCrypto.h" 38 | #include "minionnABY.h" 39 | 40 | PYBIND11_MODULE(minionn, m) { 41 | py::bind_vector>(m, "VectorInt", py::buffer_protocol()); 42 | py::bind_vector>(m, "VectorUInt", py::buffer_protocol()); 43 | // py::bind_vector>(m, "VectorBytes", py::buffer_protocol()); 44 | // py::bind_vector>(m, "VectorStr", py::buffer_protocol()); 45 | m.doc() ="C++ module for MiniONN"; 46 | /* 47 | First, the math module 48 | */ 49 | m.def("modulo", &moduloPMAX, "Modulo the given number to PMAX"); 50 | m.def("floor", &Floor, "Floors a number"); 51 | m.def("vector_floor", &vector_floor, "Floors a whole vector (divided by fractional) in place"); 52 | m.def("vector_raise", &vector_raise, "Raises a whole vector by the given fractional in place"); 53 | m.def("vector_div", &vector_div, "Vector scalar division"); 54 | m.def("vector_add", &vector_add, "Vector to vector addition"); 55 | m.def("vector_sub", &vector_sub, "Vector to vector subtraction"); 56 | m.def("vector_mul", &vector_mul, "Vector scalar mult"); 57 | m.def("matrixmul", &matrixmul, "Multiplies the Inputs according to y = Wx + b + U"); 58 | m.def("matrixmul_b_columns", &matrixmul_b_columns, "Multiplies the Inputs according to y = Wx + b + U. b is added column wise"); 59 | m.def("matrixmul_simple", &matrixmul_simple, "Multiplies the Inputs according to y = Wx"); 60 | m.def("generate_random_vector", &generate_random_vector, "Generates a random UNSIGNED int vector of given size"); 61 | m.def("vector_to_int_PMAX", &vector_to_int_PMAX, "Takes a vector of unsigned int and returns it as a vector of signed int, adjusted to PMAX"); 62 | m.def("extract_sum", &extract_sum, "Reduces a part of a 3-dim matrix to 2-dims"); 63 | 64 | /* 65 | Next, the crypto functions 66 | */ 67 | m.def("init", &init, "Initializes important parameters for the crypto lib"); 68 | m.def("gen_keys", &gen_keys, "Generates the keys required for homomorphic encryption"); 69 | m.def("encrypt_w",&encrypt_w, "Takes W and encrypts it with the server pkey"); 70 | m.def("decrypt_w", &decrypt_w, "Takes an encrypted w as string vector and outputs its decryption to given vector"); 71 | m.def("client_precomputation", &client_precomputation, "Client precomputation according to MiniONN. Takes encrypted w, r, and v and returns U"); 72 | 73 | /* 74 | Lastly, the ABYcore dependent functions. 75 | These are mainly functions for all layers of the conv nn 76 | */ 77 | m.def("init_aby", &init_aby, "Initializes important parameters for the ABY MPC"); 78 | m.def("shutdown_aby", &shutdown_aby, "Shuts down the ABY MPC"); 79 | 80 | m.def("relu_server", &relu_server, "Server version of ReLU"); 81 | m.def("relu_client", &relu_client, "Client version of ReLU"); 82 | } 83 | -------------------------------------------------------------------------------- /lib/minionnABY.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #include "minionnABY.h" 12 | #include "minionnMath.h" 13 | 14 | ABYParty* party; 15 | 16 | void init_aby(string address, uint16_t port, bool role_is_server){ 17 | e_role role; 18 | if(role_is_server){ 19 | role = (e_role) 0; 20 | } else { // client 21 | role = (e_role) 1; 22 | } 23 | seclvl sl = get_sec_lvl(secparam); 24 | party = new ABYParty(role, (char*) address.c_str(), port, sl, bitlen, nthreads, mt_alg); 25 | } 26 | 27 | void shutdown_aby(){ 28 | delete party; 29 | } 30 | 31 | void reset_aby(){ 32 | // Simply reset the ABY party object 33 | party->Reset(); 34 | } 35 | 36 | void relu_server(uint32_t num, vector* x_s, vector* y_s) 37 | { 38 | vector& sharings = party->GetSharings(); 39 | Circuit* y_circ = sharings[S_YAO]->GetCircuitBuildRoutine(); 40 | Circuit* a_circ = sharings[S_ARITH]->GetCircuitBuildRoutine(); 41 | Circuit* b_circ = sharings[S_BOOL]->GetCircuitBuildRoutine(); 42 | 43 | uint64_t* xs = (uint64_t*) malloc(num * sizeof(uint64_t)); 44 | uint64_t* xc = (uint64_t*) malloc(num * sizeof(uint64_t)); 45 | uint64_t* rc = (uint64_t*) malloc(num * sizeof(uint64_t)); 46 | uint64_t* s_out_vec = (uint64_t*) malloc(num * sizeof(uint64_t)); 47 | uint32_t out_bitlen, out_nvals; 48 | 49 | for (int i = 0; i < num; i++) { 50 | if ((*x_s)[i] >= 0) 51 | xs[i] = (*x_s)[i]; 52 | else 53 | xs[i] = PMAX + (*x_s)[i]; 54 | } 55 | 56 | share *xs_share, *xc_share, *rc_share, *x, *sel_share, *N, *halfN, *zero; 57 | 58 | xs_share = y_circ->PutSIMDINGate(num, xs, bitlen, SERVER); 59 | xc_share = y_circ->PutSIMDINGate(num, xc, bitlen, CLIENT); 60 | rc_share = y_circ->PutSIMDINGate(num, rc, bitlen, CLIENT); 61 | N = y_circ->PutSIMDCONSGate(num, PMAX, bitlen); 62 | halfN = y_circ->PutSIMDCONSGate(num, PMAX/2, bitlen); 63 | zero = y_circ->PutSUBGate(N, N); 64 | 65 | x = y_circ->PutADDGate(xs_share, xc_share); 66 | sel_share = y_circ->PutGTGate(N, x); 67 | x = y_circ->PutMUXGate(x, y_circ->PutSUBGate(x, N), sel_share); 68 | sel_share = y_circ->PutGTGate(x, halfN); 69 | x = y_circ->PutMUXGate(zero, x, sel_share); 70 | x = y_circ->PutADDGate(x, rc_share); 71 | sel_share = y_circ->PutGTGate(N, x); 72 | x = y_circ->PutMUXGate(x, y_circ->PutSUBGate(x, N), sel_share); 73 | 74 | x = y_circ->PutOUTGate(x, ALL); //TODO 75 | 76 | party->ExecCircuit(); 77 | 78 | x -> get_clear_value_vec(&s_out_vec, &out_bitlen, &out_nvals); 79 | 80 | for(int i = 0; i < num; i ++) 81 | { 82 | y_s->push_back(moduloPMAX(s_out_vec[i])); 83 | // cout << i << " " << x_s[i] << endl; 84 | } 85 | 86 | // Reset party 87 | reset_aby(); 88 | } 89 | 90 | void relu_client(uint32_t num, vector* x_c, vector* r, vector* y_c) 91 | { 92 | vector& sharings = party->GetSharings(); 93 | Circuit* y_circ = sharings[S_YAO]->GetCircuitBuildRoutine(); 94 | Circuit* a_circ = sharings[S_ARITH]->GetCircuitBuildRoutine(); 95 | Circuit* b_circ = sharings[S_BOOL]->GetCircuitBuildRoutine(); 96 | 97 | uint64_t* xs = (uint64_t*) malloc(num * sizeof(uint64_t)); 98 | uint64_t* xc = (uint64_t*) malloc(num * sizeof(uint64_t)); 99 | uint64_t* rc = (uint64_t*) malloc(num * sizeof(uint64_t)); 100 | uint64_t* s_out_vec = (uint64_t*) malloc(num * sizeof(uint64_t)); 101 | uint32_t out_bitlen, out_nvals; 102 | 103 | for (int i = 0; i < num; i++) { 104 | if ((*x_c)[i] >= 0) 105 | xc[i] = (*x_c)[i]; 106 | else 107 | xc[i] = PMAX + (*x_c)[i]; 108 | 109 | if ((*r)[i] >= 0) //-r 110 | rc[i] = PMAX - (*r)[i]; 111 | else 112 | rc[i] = 0 - (*r)[i]; 113 | } 114 | 115 | share *xs_share, *xc_share, *rc_share, *x, *sel_share, *N, *halfN, *zero; 116 | 117 | xs_share = y_circ->PutSIMDINGate(num, xs, bitlen, SERVER); 118 | xc_share = y_circ->PutSIMDINGate(num, xc, bitlen, CLIENT); 119 | rc_share = y_circ->PutSIMDINGate(num, rc, bitlen, CLIENT); 120 | N = y_circ->PutSIMDCONSGate(num, PMAX, bitlen); 121 | halfN = y_circ->PutSIMDCONSGate(num, PMAX/2, bitlen); 122 | zero = y_circ->PutSUBGate(N, N); 123 | 124 | x = y_circ->PutADDGate(xs_share, xc_share); 125 | sel_share = y_circ->PutGTGate(N, x); 126 | x = y_circ->PutMUXGate(x, y_circ->PutSUBGate(x, N), sel_share); 127 | sel_share = y_circ->PutGTGate(x, halfN); 128 | x = y_circ->PutMUXGate(zero, x, sel_share); 129 | x = y_circ->PutADDGate(x, rc_share); 130 | sel_share = y_circ->PutGTGate(N, x); 131 | x = y_circ->PutMUXGate(x, y_circ->PutSUBGate(x, N), sel_share); 132 | 133 | x = y_circ->PutOUTGate(x, ALL); //TODO 134 | 135 | party->ExecCircuit(); 136 | 137 | x -> get_clear_value_vec(&s_out_vec, &out_bitlen, &out_nvals); 138 | 139 | for(int i = 0; i < num; i ++) 140 | { 141 | y_c->push_back((*r)[i]); 142 | // cout << i << " " << x_s[i] << endl; 143 | } 144 | 145 | // Reset party 146 | reset_aby(); 147 | } 148 | -------------------------------------------------------------------------------- /lib/minionnABY.h: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #ifndef MINIONNABY_H 12 | #define MINIONNABY_H 13 | 14 | #include "minionnCommon.h" 15 | 16 | //Utility libs 17 | #include "ENCRYPTO_utils/crypto/crypto.h" 18 | #include "ENCRYPTO_utils/parse_options.h" 19 | //ABY Party class 20 | #include "aby/abyparty.h" 21 | #include "circuit/booleancircuits.h" 22 | #include "circuit/arithmeticcircuits.h" 23 | #include "circuit/circuit.h" 24 | 25 | extern ABYParty* party; 26 | 27 | //for ABY 28 | const uint32_t secparam = 128; 29 | const uint32_t nthreads = 1; 30 | const e_mt_gen_alg mt_alg = MT_OT; 31 | const uint32_t bitlen = 64; 32 | 33 | 34 | // core Functions for aby 35 | void init_aby(string address, uint16_t port, bool role_is_server); 36 | void shutdown_aby(); 37 | 38 | // functions for layers 39 | void relu_server(uint32_t num, vector* x_s, vector* y_s); 40 | void relu_client(uint32_t num, vector* x_c, vector* r, vector* y_c); 41 | 42 | 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /lib/minionnCommon.h: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #ifndef MINIONNCOMMON_H 12 | #define MINIONNCOMMON_H 13 | 14 | 15 | #include 16 | #include 17 | #include 18 | #include "seal/seal.h" 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | typedef int64_t Int; 35 | typedef uint64_t uInt; 36 | 37 | namespace py = pybind11; 38 | using namespace std; 39 | PYBIND11_MAKE_OPAQUE(std::vector); 40 | PYBIND11_MAKE_OPAQUE(std::vector); 41 | //PYBIND11_MAKE_OPAQUE(std::vector); 42 | 43 | 44 | 45 | const Int PMAX = 101285036033; 46 | const Int PMAX_HALF = PMAX / 2; 47 | extern Int PMAXBITS; 48 | extern int SLOTS; 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /lib/minionnCrypto.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #include "minionnCrypto.h" 12 | #include "minionnMath.h" 13 | 14 | 15 | Int PMAXBITS; 16 | int SLOTS; 17 | SEALContext *context; 18 | PolyCRTBuilder *crtbuilder; 19 | KeyGenerator *generator; 20 | 21 | /** 22 | Initializes important params. Should be called first. 23 | */ 24 | void init(int slot_count){ 25 | EncryptionParameters parms; 26 | SLOTS = slot_count; 27 | parms.set_poly_modulus("1x^" + to_string(SLOTS) + " + 1"); 28 | //parms.set_coeff_modulus(BigUInt("FFFFFFFFFFFFFFFFFFFFFFFFC0001")) 29 | // parms.set_coeff_modulus(ChooserEvaluator::default_parameter_options().at(SLOTS)); 30 | //TODO: This might not be correct. Is below the 2.3 equivalent of the line above? 31 | parms.set_coeff_modulus(coeff_modulus_192(2*SLOTS)); 32 | parms.set_plain_modulus(PMAX); 33 | PMAXBITS = parms.plain_modulus().bit_count(); 34 | 35 | // Create context, CRT Builder, and generator 36 | context = new SEALContext(parms); 37 | generator = new KeyGenerator(*context); 38 | crtbuilder = new PolyCRTBuilder(*context); 39 | } 40 | 41 | /** 42 | Generates a public and secret key and stores them in the given path 43 | */ 44 | void gen_keys(string str_pk, string str_sk) 45 | { 46 | // Generate key and retrieve it from generator 47 | PublicKey pk = generator->public_key(); 48 | SecretKey sk = generator->secret_key(); 49 | 50 | // Store keys in file 51 | ofstream pk_file (str_pk, ofstream::binary); 52 | pk.save(pk_file); 53 | ofstream sk_file (str_sk, ofstream::binary); 54 | sk.save(sk_file); 55 | } 56 | 57 | /** 58 | Calculate dimensions of W shaped according to SLOT size 59 | n is the number of batches that we store w in 60 | */ 61 | int calculate_batches(int w_size){ 62 | int l = w_size % SLOTS; 63 | int n = 0; 64 | if (l == 0) 65 | n = w_size / SLOTS; 66 | else 67 | n = w_size / SLOTS + 1; 68 | 69 | return n; 70 | } 71 | 72 | /** 73 | Takes an unsigned x and returns it as BigUInt that is adjusted to PMAX 74 | */ 75 | BigUInt adjust_to_PMAX(uInt x){ 76 | /* 77 | if (x > PMAX_HALF) 78 | return BigUInt(PMAXBITS, x - PMAX); 79 | else 80 | return BigUInt(PMAXBITS, x); 81 | */ 82 | if (x >= 0) 83 | return BigUInt(PMAXBITS, static_cast(x)); 84 | else 85 | return BigUInt(PMAXBITS, static_cast(PMAX + x)); 86 | } 87 | 88 | /** 89 | Encrypts a given vector w and returns it as a vector of python bytes 90 | */ 91 | vector encrypt_w(std::vector* in_w, string pkp){ 92 | // Read PK from file 93 | ifstream pks_file (pkp, ifstream::binary); 94 | PublicKey pks; 95 | pks.load(pks_file); 96 | 97 | Encryptor encryptor(*context, pks); 98 | 99 | int n = calculate_batches(in_w->size()); 100 | 101 | // shape w into a n*SLOTS matrix and take entries from in_w adjusted with PMAX 102 | vector tmp_W(n*SLOTS, 0); 103 | for (size_t i = 0; i < in_w->size(); i ++) 104 | if ((*in_w)[i] >= 0) 105 | tmp_W[i] = static_cast((*in_w)[i]); 106 | else 107 | tmp_W[i] = static_cast(PMAX+(*in_w)[i]); 108 | 109 | // encrypt w 110 | ostringstream ostream(stringstream::binary); 111 | vector out_w; 112 | for (int i = 0; i < n; i ++) 113 | { 114 | //Divide W into n batches of size SLOTS and encode 115 | vector batch(tmp_W.begin() + i*SLOTS, tmp_W.begin() + (i+1)*SLOTS); 116 | Plaintext encodedBatch(context->parms().poly_modulus().coeff_count(), 0); 117 | crtbuilder->compose(batch, encodedBatch); 118 | 119 | // Encrypt each batch separately 120 | Ciphertext encryptedBatch(context->parms()); 121 | encryptor.encrypt(encodedBatch,encryptedBatch); 122 | encryptedBatch.save(ostream); 123 | 124 | // add to batch list 125 | out_w.push_back(py::bytes(ostream.str())); 126 | 127 | // clear ostream 128 | ostream.str(""); 129 | } 130 | 131 | return out_w; 132 | } 133 | 134 | /** 135 | Decrypts a vector of bytes (given as string) and returns it into 136 | the out vector as Integer (uint64) 137 | */ 138 | void decrypt_w(vector* w_in, string skp, vector* w_out){ 139 | // Read secret key 140 | ifstream sks_file (skp,std::ifstream::binary); 141 | SecretKey sks; 142 | sks.load(sks_file); 143 | 144 | // create SEAL objects 145 | Decryptor decryptor(*context, sks); 146 | 147 | // reserve space for SLOTS*batches elements on U (upper bound as last batch might be emptier) 148 | w_out -> reserve(w_in->size() * SLOTS); 149 | 150 | // iterate through batches 151 | istringstream istream(stringstream::binary); 152 | for(size_t i = 0; i < w_in->size() ; i++){ 153 | // read encrypted w 154 | istream.str(""); 155 | istream.str(w_in->at(i)); 156 | Ciphertext encryptedW(context->parms()); 157 | encryptedW.load(istream); 158 | 159 | // decrypt w 160 | Plaintext encodedW(context->parms().poly_modulus().coeff_count(), 0); 161 | decryptor.decrypt(encryptedW, encodedW); 162 | //decompose it in batches to a BigUInt vector of size slot_count 163 | vector tmp(SLOTS, 0); 164 | crtbuilder->decompose(encodedW, tmp); 165 | 166 | // iterate through tmp and push each element to U, adjusted for PMAX 167 | /*for (size_t j = 0; j < tmp.size(); j ++) 168 | { 169 | uint64_t t = (tmp[j].pointer())[0]; 170 | w_out -> push_back(moduloPMAX(t)); 171 | }*/ 172 | 173 | for(uint64_t const& value: tmp){ 174 | w_out -> push_back(moduloPMAX(value)); 175 | } 176 | 177 | } 178 | 179 | } 180 | 181 | /** 182 | Performs the precomputation of the client. 183 | This is basically a computation of U = r*w - v for every given w 184 | */ 185 | vector client_precomputation(vector* w_in, 186 | vector* r_in, vector* v_in ){ 187 | 188 | // create SEAL objects 189 | Evaluator evaluator(*context); 190 | 191 | // iterate through batches 192 | istringstream istream(stringstream::binary); 193 | ostringstream ostream(stringstream::binary); 194 | vector out_w; 195 | 196 | // For every batch 197 | for(size_t i = 0; i < w_in->size() ; i++){ 198 | // read encrypted w 199 | istream.str(""); 200 | istream.str(w_in->at(i)); 201 | Ciphertext encryptedW(context->parms()); 202 | encryptedW.load(istream); 203 | 204 | // take r and v of this batch 205 | // and put them into vectors of BigUint adjusted to PMAX 206 | vector rr(SLOTS, 0); 207 | vector vv(SLOTS, 0); 208 | 209 | // Check that r (and as such v) has enough size remaining for a whole batch 210 | size_t r_size = (size_t) SLOTS; 211 | if(r_in->size() - i * SLOTS < SLOTS){ 212 | r_size = (size_t) (r_in->size() - i * SLOTS); 213 | } 214 | 215 | rr.reserve(r_size); 216 | vv.reserve(r_size); 217 | 218 | // Now iterate over r (or remaining r) and put it into rr and vv 219 | // If not a whole batch can be filled, it remains 0 (as defined in rr and vv) 220 | for (int j = 0; j < r_size; j ++){ 221 | rr[j] = r_in -> at(SLOTS*i + j); 222 | vv[j] = v_in -> at(SLOTS*i + j); 223 | } 224 | 225 | Plaintext encodedR(context->parms().poly_modulus().coeff_count(), 1); 226 | crtbuilder->compose(rr, encodedR); 227 | Plaintext encodedV(context->parms().poly_modulus().coeff_count(), 0); 228 | crtbuilder->compose(vv, encodedV); 229 | 230 | // calculate U = r*w - v 231 | evaluator.multiply_plain(encryptedW, encodedR); 232 | evaluator.sub_plain(encryptedW, encodedV); 233 | 234 | // save U back to the list 235 | encryptedW.save(ostream); 236 | out_w.push_back(py::bytes(ostream.str())); 237 | 238 | // clear ostream 239 | ostream.str(""); 240 | } 241 | 242 | return out_w; 243 | 244 | } 245 | -------------------------------------------------------------------------------- /lib/minionnCrypto.h: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #ifndef MINIONNCRYPTO_H 12 | #define MINIONNCRYPTO_H 13 | 14 | #include "minionnCommon.h" 15 | using namespace seal; 16 | 17 | void init(int slot_count); 18 | void gen_keys(string str_pk, string str_sk); 19 | vector encrypt_w(std::vector* in_w, string pkp); 20 | void decrypt_w(vector* w, string skp, vector* U); 21 | vector client_precomputation(vector* w_in, 22 | vector* r_in, vector* v_in ); 23 | 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /lib/minionnMath.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #include "minionnMath.h" 12 | 13 | /* 14 | Takes an Int and 15 | 1) calculates x % PMAX 16 | 2) Shifts it with PMAX 17 | */ 18 | Int moduloPMAX(Int x) 19 | { 20 | x = x % PMAX; 21 | if(abs(x) <= PMAX_HALF) 22 | { 23 | return x; 24 | } 25 | else 26 | { 27 | if (x > 0) 28 | return x - PMAX; 29 | else 30 | return PMAX + x; 31 | } 32 | } 33 | 34 | vector vector_add(vector x, vector y) 35 | { 36 | vector z; 37 | for(size_t i = 0; i < x.size(); i ++) 38 | z.push_back(moduloPMAX(x[i]+y[i])); 39 | 40 | return z; 41 | } 42 | 43 | vector vector_sub(vector x, vector y) 44 | { 45 | vector z; 46 | for(size_t i = 0; i < x.size(); i ++) 47 | z.push_back(moduloPMAX(x[i]-y[i])); 48 | 49 | return z; 50 | } 51 | 52 | vector vector_mul(vector x, Int a) 53 | { 54 | vector z; 55 | for(size_t i = 0; i < x.size(); i ++) 56 | z.push_back(moduloPMAX(x[i]*a)); 57 | 58 | return z; 59 | } 60 | 61 | vector vector_div(vector x, Int a) 62 | { 63 | vector z; 64 | for(size_t i = 0; i < x.size(); i ++) 65 | { 66 | double d = (double)x[i]; 67 | Int t = round(d / a); 68 | z.push_back(moduloPMAX(t)); 69 | } 70 | 71 | return z; 72 | } 73 | 74 | void vector_floor(vector* x, uInt fractional){ 75 | size_t size = x->size(); 76 | 77 | for(size_t i=0; iat(i) = Floor(x->at(i), fractional); 79 | } 80 | } 81 | 82 | void vector_raise(vector* x, uInt fractional){ 83 | size_t size = x->size(); 84 | 85 | for(size_t i=0; iat(i) = moduloPMAX(x->at(i) * fractional); 87 | } 88 | } 89 | 90 | Int Floor(Int x, uInt f) 91 | { 92 | if (x >= 0) 93 | return floor((double) x / f); 94 | else 95 | return 0 - floor((double) (0 -x) / f); 96 | } 97 | 98 | /* 99 | b is added column wise 100 | u is added element wise 101 | 102 | Wx + b + u 103 | W has shape m x n 104 | X has shape n x o 105 | Result of Wx has shape m x o 106 | b has shape o 107 | U has shape m x o 108 | */ 109 | void matrixmul(vector *W, vector *b, vector *U, 110 | vector *x_s, int nn, int oo, int mm, vector *y_s) 111 | { 112 | int c = 0; 113 | for (int i = 0; i < mm; i ++) 114 | { 115 | for (int j = 0; j < oo; j ++) 116 | { 117 | Int sum = 0; 118 | for (int k = 0; k < nn; k ++) 119 | { 120 | sum = sum + moduloPMAX((*W)[i*nn + k] * (*x_s)[k*oo + j]); 121 | sum = moduloPMAX(sum); 122 | } 123 | sum = sum + (*b)[j]; 124 | sum = sum + (*U)[c++]; 125 | y_s->push_back(moduloPMAX(sum)); 126 | } 127 | } 128 | } 129 | 130 | /* 131 | Different version of matmul where b is added row wise 132 | u is added element wise 133 | 134 | Wx + b + u 135 | W has shape m x n 136 | X has shape n x o 137 | Result of Wx has shape m x o 138 | b has shape m 139 | U has shape m x o 140 | */ 141 | void matrixmul_b_columns(vector *W, vector *b, vector *U, 142 | vector *x_s, int nn, int oo, int mm, vector *y_s) 143 | { 144 | int c = 0; 145 | for (int i = 0; i < mm; i ++) 146 | { 147 | for (int j = 0; j < oo; j ++) 148 | { 149 | Int sum = 0; 150 | for (int k = 0; k < nn; k ++) 151 | { 152 | sum = sum + moduloPMAX((*W)[i*nn + k] * (*x_s)[k*oo + j]); 153 | sum = moduloPMAX(sum); 154 | } 155 | sum = sum + (*b)[i]; 156 | sum = sum + (*U)[c++]; 157 | y_s->push_back(moduloPMAX(sum)); 158 | } 159 | } 160 | } 161 | 162 | /* 163 | 164 | just sums v 165 | */ 166 | void matrixmul_simple(vector *v, int nn, int oo, int mm, vector *y_c) 167 | { 168 | int c = 0; 169 | for (int i = 0; i < mm; i ++) 170 | { 171 | for (int j = 0; j < oo; j ++) 172 | { 173 | Int sum = 0; 174 | for (int k = 0; k < nn; k ++) 175 | { 176 | sum = moduloPMAX(sum + (*v)[c++]); 177 | } 178 | y_c->push_back(sum); 179 | // cout << i << " " << y_c[i] << endl; 180 | } 181 | } 182 | } 183 | 184 | /** 185 | Fills the out vector with random integers adjusted to PMAX 186 | */ 187 | void generate_random_vector(vector* out, int size){ 188 | mt19937_64 rand_gen (std::random_device{}()); 189 | 190 | //reserve size for performance 191 | out->reserve(size); 192 | 193 | for(int i=0; i push_back(rand_gen() % PMAX); 195 | } 196 | } 197 | 198 | /** 199 | * Takes a vector of unsigned Ints (uint64) and returns it as 200 | * Int vector with every number taken as modulo PMAX. 201 | * Note: The documentation of moduloPMAX! 202 | */ 203 | void vector_to_int_PMAX(vector* in, vector* out){ 204 | size_t size = in->size(); 205 | 206 | out-> reserve(size); 207 | 208 | for(int i=0; iat(i) % PMAX; 210 | out -> push_back(moduloPMAX(tmp)); 211 | } 212 | } 213 | 214 | 215 | /** 216 | Extracts the sum of a U matrix. 217 | This is done by reducing the srow x ccol x crow matrix to a srow x ccol matrix 218 | Only works on a submatrix of the whole array 219 | for m x n x o matrix this can also be read as 220 | extract_sum(in,out, o, n, m, offset) 221 | */ 222 | void extract_sum(vector* in_u, vector* out_u, 223 | int crow, int ccol, int srow, int start_pos) 224 | { 225 | int pos = start_pos; 226 | for (int i = 0; i < srow; i ++) 227 | { 228 | for (int j = 0; j < ccol; j ++) 229 | { 230 | Int sum = 0; 231 | for (int k = 0; k < crow; k ++) 232 | { 233 | sum = moduloPMAX(sum + (*in_u)[pos++]); 234 | } 235 | out_u->push_back(sum); 236 | } 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /lib/minionnMath.h: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | #ifndef MINIONNMATH_H 12 | #define MINIONNMATH_H 13 | 14 | #include "minionnCommon.h" 15 | 16 | const int fractional = 1000; 17 | 18 | Int moduloPMAX(Int x); 19 | Int Floor(Int x, uInt div); 20 | void vector_floor(vector* x, uInt fractional); 21 | void vector_raise(vector* x, uInt fractional); 22 | 23 | vector vector_add(vector x, vector y); 24 | vector vector_sub(vector x, vector y); 25 | vector vector_mul(vector x, Int a); 26 | vector vector_div(vector x, Int a); 27 | 28 | void matrixmul(vector *W, vector *b, vector *U, 29 | vector *x_s, int crow, int ccol, int srow, vector *y_s); 30 | void matrixmul_b_columns(vector *W, vector *b, vector *U, 31 | vector *x_s, int nn, int oo, int mm, vector *y_s); 32 | 33 | void matrixmul_simple(vector *v, int nn, int oo, int mm, vector *y_s); 34 | 35 | void generate_random_vector(vector* out, int size); 36 | void vector_to_int_PMAX(vector* in, vector* out); 37 | 38 | void extract_sum(vector* in_u, vector* out_u, 39 | int crow, int ccol, int srow, int start_pos); 40 | 41 | #endif 42 | -------------------------------------------------------------------------------- /lib/miracl.makefile: -------------------------------------------------------------------------------- 1 | cp mirdef.h64 mirdef.h 2 | gcc -c -m64 -O2 mrcore.c 3 | gcc -c -m64 -O2 mrarth0.c 4 | gcc -c -m64 -O2 mrarth1.c 5 | gcc -c -m64 -O2 mrarth2.c 6 | gcc -c -m64 -O2 mralloc.c 7 | gcc -c -m64 -O2 mrsmall.c 8 | gcc -c -m64 -O2 mrio1.c 9 | gcc -c -m64 -O2 mrio2.c 10 | gcc -c -m64 -O2 mrgcd.c 11 | gcc -c -m64 -O2 mrjack.c 12 | gcc -c -m64 -O2 mrxgcd.c 13 | gcc -c -m64 -O2 mrarth3.c 14 | gcc -c -m64 -O2 mrbits.c 15 | gcc -c -m64 -O2 mrrand.c 16 | gcc -c -m64 -O2 mrprime.c 17 | gcc -c -m64 -O2 mrcrt.c 18 | gcc -c -m64 -O2 mrscrt.c 19 | gcc -c -m64 -O2 mrmonty.c 20 | gcc -c -m64 -O2 mrpower.c 21 | gcc -c -m64 -O2 mrsroot.c 22 | gcc -c -m64 -O2 mrcurve.c 23 | gcc -c -m64 -O2 mrfast.c 24 | gcc -c -m64 -O2 mrshs.c 25 | gcc -c -m64 -O2 mrshs256.c 26 | gcc -c -m64 -O2 mrshs512.c 27 | gcc -c -m64 -O2 mrsha3.c 28 | gcc -c -m64 -O2 mrfpe.c 29 | gcc -c -m64 -O2 mraes.c 30 | gcc -c -m64 -O2 mrgcm.c 31 | gcc -c -m64 -O2 mrlucas.c 32 | gcc -c -m64 -O2 mrzzn2.c 33 | gcc -c -m64 -O2 mrzzn2b.c 34 | gcc -c -m64 -O2 mrzzn3.c 35 | gcc -c -m64 -O2 mrzzn4.c 36 | gcc -c -m64 -O2 mrecn2.c 37 | gcc -c -m64 -O2 mrstrong.c 38 | gcc -c -m64 -O2 mrbrick.c 39 | gcc -c -m64 -O2 mrebrick.c 40 | gcc -c -m64 -O2 mrec2m.c 41 | gcc -c -m64 -O2 mrgf2m.c 42 | gcc -c -m64 -O2 mrflash.c 43 | gcc -c -m64 -O2 mrfrnd.c 44 | gcc -c -m64 -O2 mrdouble.c 45 | gcc -c -m64 -O2 mrround.c 46 | gcc -c -m64 -O2 mrbuild.c 47 | gcc -c -m64 -O2 mrflsh1.c 48 | gcc -c -m64 -O2 mrpi.c 49 | gcc -c -m64 -O2 mrflsh2.c 50 | gcc -c -m64 -O2 mrflsh3.c 51 | gcc -c -m64 -O2 mrflsh4.c 52 | cp mrmuldv.g64 mrmuldv.c 53 | gcc -c -m64 -O2 mrmuldv.c 54 | gcc -c -m64 -O2 big.cpp 55 | gcc -c -m64 -O2 zzn.cpp 56 | gcc -c -m64 -O2 ecn.cpp 57 | gcc -c -m64 -O2 ec2.cpp 58 | gcc -c -m64 -O2 flash.cpp 59 | gcc -c -m64 -O2 crt.cpp 60 | ar rc miracl.a mrcore.o mrarth0.o mrarth1.o mrarth2.o mralloc.o mrsmall.o mrzzn2.o mrzzn3.o 61 | ar r miracl.a mrio1.o mrio2.o mrjack.o mrgcd.o mrxgcd.o mrarth3.o mrbits.o mrecn2.o mrzzn4.o 62 | ar r miracl.a mrrand.o mrprime.o mrcrt.o mrscrt.o mrmonty.o mrcurve.o mrsroot.o mrzzn2b.o 63 | ar r miracl.a mrpower.o mrfast.o mrshs.o mrshs256.o mraes.o mrlucas.o mrstrong.o mrgcm.o 64 | ar r miracl.a mrflash.o mrfrnd.o mrdouble.o mrround.o mrbuild.o 65 | ar r miracl.a mrflsh1.o mrpi.o mrflsh2.o mrflsh3.o mrflsh4.o 66 | ar r miracl.a mrbrick.o mrebrick.o mrec2m.o mrgf2m.o mrmuldv.o mrshs512.o mrsha3.o mrfpe.o 67 | ar r miracl.a big.o zzn.o ecn.o ec2.o flash.o crt.o -------------------------------------------------------------------------------- /lib/test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import cppimport 12 | #This will pause for a moment to compile the module 13 | cppimport.set_quiet(False) 14 | m = cppimport.imp("minionn") 15 | #import minionn as m 16 | print("\nSuccessfuly imported c++ code\n") 17 | 18 | SLOTS = 4096 19 | PMAX = 101285036033 20 | 21 | import numpy as np 22 | import os 23 | from operator import mul 24 | from functools import reduce 25 | 26 | def run_test(shape): 27 | """ 28 | Here, we just test if the homomorphic encryption works. 29 | As such, we only test if Dec(Enc(w)*a-c) = w*a-c for every element of w 30 | """ 31 | 32 | # Generate w and encrypt 33 | w_np = np.random.randint(10000,None,size=shape,dtype='int64') 34 | w_cpp = m.VectorInt(w_np.flatten().tolist()) 35 | w_cpp = m.VectorInt([i for i in range(0,100)]) 36 | encW = m.encrypt_w(w_cpp,pkey) 37 | 38 | length = reduce(mul, shape, 1) 39 | 40 | r_np = np.random.randint(PMAX, None, size=length, dtype='uint64') 41 | r = m.VectorUInt(r_np.flatten().tolist()) 42 | 43 | v_np = np.random.randint(PMAX,None,size=length, dtype='uint64') 44 | v = m.VectorUInt(v_np.flatten().tolist()) 45 | 46 | # Do client precomputation 47 | encU = m.client_precomputation(encW, r, v) 48 | 49 | # Decrypt w again 50 | decrypted_u = m.VectorInt([]) 51 | m.decrypt_w(encU, skey, decrypted_u) 52 | 53 | # check if values match with expected value 54 | ww = list(w_cpp) 55 | vv = list(v) 56 | rr = list(r) 57 | dd = list(decrypted_u)[:length] 58 | 59 | """ 60 | print("W") 61 | print(ww) 62 | print("R") 63 | print(rr[:length]) 64 | print("V") 65 | print(vv[:length]) 66 | print("D") 67 | print(dd) 68 | """ 69 | print("Testing for correctness") 70 | for i in range(0,length): 71 | assert dd[i] == m.modulo((ww[i] * rr[i]) - vv[i]) 72 | print("Testing done.") 73 | 74 | def test_two_vectors(vector, expected_list): 75 | assert len(list(vector)) == len(expected_list), "Length wrong" + str(len(list(vector))) + " instead of " + str(len(expected_list)) 76 | assert list(vector) == expected_list, "Wrong result: " + str(list(vector)) + " instead of expected " + str(expected_list) 77 | 78 | 79 | ## Maths tests 80 | print("### Basic maths tests") 81 | a = m.VectorInt([1,2]) 82 | b = m.VectorInt([3,4]) 83 | c = m.VectorInt([4,6]) 84 | d = m.VectorInt([10000000000,20000000000,30000000000,35000000000,-21000000000]) 85 | e = m.VectorInt([1,2,-2]) 86 | null_matrix = m.VectorInt([0,0,0,0]) 87 | null_vector = m.VectorInt([0,0]) 88 | print("Testing vector operations") 89 | test_two_vectors(m.vector_add(a,b), [4,6]) 90 | test_two_vectors(m.vector_sub(a,b), [-2,-2]) 91 | test_two_vectors(m.vector_mul(b,3), [9,12]) 92 | test_two_vectors(m.vector_div(c,2), [2,3]) 93 | m.vector_floor(d,10000000000) 94 | test_two_vectors(d,[1,2,3,3,-2]) 95 | m.vector_raise(e,10000000000) 96 | test_two_vectors(e,[10000000000,20000000000,-20000000000]) 97 | 98 | w = m.VectorInt([1,2,3,4]) 99 | x = m.VectorInt([4,3,2,1]) 100 | u = m.VectorInt([2,5,0,7]) 101 | b = m.VectorInt([20,10]) 102 | y = m.VectorInt([]) 103 | print("Testing matrix multiplication") 104 | print("Normal matmul (b broadcasted)") 105 | m.matrixmul(w,b,u,x,2,2,2,y) 106 | test_two_vectors(y, [30,20,40,30]) 107 | print("Row wise matmul (b.T broadcasted)") 108 | y = m.VectorInt([]) 109 | m.matrixmul_b_columns(w,b,u,x,2,2,2,y) 110 | test_two_vectors(y, [30,30,30,30]) 111 | 112 | print("Testing extract sum") 113 | dim_m = 10 114 | dim_n = 5 115 | dim_o = 6 116 | a = [i%(dim_m*dim_n) for i in range(0,dim_m*dim_n*dim_o)] 117 | a = sorted(a) 118 | a_vec = m.VectorInt(a) 119 | b_vec = m.VectorInt([]) 120 | 121 | #Test all 122 | m.extract_sum(a_vec, b_vec, dim_o, dim_n, dim_m, 0) 123 | b_baseline = [dim_o * i for i in range(0,dim_m*dim_n)] 124 | test_two_vectors(b_vec, b_baseline) 125 | 126 | #Create subset behind a and test it 127 | new_m = 2 128 | new_n = 2 129 | new_o = 3 130 | a.extend(sorted([i%(new_m*new_n) for i in range(0,new_m*new_n*new_o)])) 131 | b_baseline = [new_o * i for i in range(0,new_m*new_n)] 132 | a_vec = m.VectorInt(a) 133 | b_vec = m.VectorInt([]) 134 | m.extract_sum(a_vec, b_vec, new_o, new_n, new_m, dim_m*dim_n*dim_o) 135 | test_two_vectors(b_vec, b_baseline) 136 | 137 | ## Crypto tests 138 | #crypto operations return a list of bytes 139 | print("### Homomorphic + precomputation tests") 140 | asset_folder = "assets/" 141 | 142 | if not os.path.exists(asset_folder): 143 | os.makedirs(asset_folder) 144 | print("Created directory " + asset_folder) 145 | 146 | pkey = asset_folder + "s.pkey" 147 | skey = asset_folder + "s.skey" 148 | 149 | shape = (10,10) 150 | 151 | # Init library and generate keys 152 | m.init(SLOTS) 153 | m.gen_keys(pkey, skey) 154 | 155 | print("Running simple encrypt/decrypt example") 156 | sample = m.VectorInt([1,2,3,4,5,6,7,8,7,6,5,4,-12,-14]) 157 | encW = m.encrypt_w(sample,pkey) 158 | decrypted = m.VectorInt([]) 159 | m.decrypt_w(encW, skey, decrypted) 160 | test_two_vectors(sample, list(decrypted)[:len(list(sample))]) 161 | 162 | print("Running homomorphic test with random r and v") 163 | run_test(shape) 164 | 165 | print("Cleanup") 166 | os.remove(pkey) 167 | os.remove(skey) 168 | try: 169 | os.rmdir(asset_folder) 170 | except os.OSError as identifier: 171 | print("Not removing non-empty directory " + asset_folder) 172 | 173 | print("### All tests passed") 174 | -------------------------------------------------------------------------------- /lib/test_mpc_client.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | def test_two_vectors(vector, expected_list): 11 | assert len(list(vector)) == len(expected_list), "Length wrong" + str(len(list(vector))) + " instead of " + str(len(expected_list)) 12 | assert list(vector) == expected_list, "Wrong result: " + str(list(vector)) + " instead of expected " + str(expected_list) 13 | 14 | import cppimport 15 | #This will pause for a moment to compile the module 16 | cppimport.set_quiet(False) 17 | m = cppimport.imp("minionn") 18 | print("Successfuly imported c++ code\n") 19 | 20 | print("Testing MPC functions, client side...") 21 | m.init_aby("127.0.0.1", 5000, False) 22 | print("Connected to server, testing ReLu.") 23 | 24 | num = 5 25 | xc = m.VectorInt([1,2,5,0,0]) 26 | rc = m.VectorInt([1,1,1,1,1]) 27 | 28 | yc = m.VectorInt([]) 29 | m.relu_client(num, xc, rc, yc) 30 | 31 | print("Relu done, testing correctness.") 32 | #print("Num is " + str(num)) 33 | #print("Xc is " + str(xc)) 34 | #print("Rc is " + str(rc)) 35 | #print("After relu, yc is " + str(yc)) 36 | test_two_vectors(yc, [1, 1, 1, 1, 1]) 37 | print("Correct") 38 | print("Testing second run") 39 | 40 | num = 5 41 | xc = m.VectorInt([2,2,3,3,3]) 42 | rc = m.VectorInt([2,2,2,2,2]) 43 | 44 | yc = m.VectorInt([]) 45 | m.relu_client(num, xc, rc, yc) 46 | 47 | #print("Num is " + str(num)) 48 | #print("Xc is " + str(xc)) 49 | #print("Rc is " + str(rc)) 50 | #print("After relu, yc is " + str(yc)) 51 | 52 | test_two_vectors(yc, [2, 2, 2, 2, 2]) 53 | print("Second run correct. Shutting down...") 54 | 55 | m.shutdown_aby() 56 | 57 | print("All done, test successful.") -------------------------------------------------------------------------------- /lib/test_mpc_server.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | def test_two_vectors(vector, expected_list): 11 | assert len(list(vector)) == len(expected_list), "Length wrong" + str(len(list(vector))) + " instead of " + str(len(expected_list)) 12 | assert list(vector) == expected_list, "Wrong result: " + str(list(vector)) + " instead of expected " + str(expected_list) 13 | 14 | import cppimport 15 | #This will pause for a moment to compile the module 16 | cppimport.set_quiet(False) 17 | m = cppimport.imp("minionn") 18 | print("Successfuly imported c++ code\n") 19 | 20 | print("Testing MPC functions, server side...") 21 | m.init_aby("127.0.0.1", 5000, True) 22 | print("Connected to client, testing ReLu.") 23 | 24 | num = 5 25 | xs = m.VectorInt([-5,-4,-3,-2,1]) 26 | 27 | ys = m.VectorInt([]) 28 | m.relu_server(num, xs, ys) 29 | 30 | print("Relu done, testing correctness.") 31 | #print("Num is " + str(num)) 32 | #print("Xs is " + str(xs)) 33 | #print("After relu, ys is " + str(ys)) 34 | 35 | test_two_vectors(ys, [-1, -1, 1, -1, 0]) 36 | print("Correct") 37 | print("Testing second run") 38 | 39 | num = 5 40 | xs = m.VectorInt([-5,-5,-5,5,5]) 41 | 42 | ys = m.VectorInt([]) 43 | m.relu_server(num, xs, ys) 44 | 45 | #print("Num is " + str(num)) 46 | #print("Xs is " + str(xs)) 47 | #print("After relu, ys is " + str(ys)) 48 | 49 | test_two_vectors(ys, [-2, -2, -2, 6, 6]) 50 | print("Second run correct. Shutting down...") 51 | 52 | m.shutdown_aby() 53 | 54 | print("All done, test successful.") -------------------------------------------------------------------------------- /models/R2_S.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/models/R2_S.onnx -------------------------------------------------------------------------------- /models/R_S.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/models/R_S.onnx -------------------------------------------------------------------------------- /models/S.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/models/S.onnx -------------------------------------------------------------------------------- /models/S.tensor: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/models/S.tensor -------------------------------------------------------------------------------- /models/check_r2s.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | import struct 13 | import random 14 | import numpy as np 15 | import onnx 16 | from onnx import numpy_helper 17 | model = onnx.load("R2_S.onnx") 18 | 19 | def build(inp, shape): 20 | return np.array(inp).reshape(shape) 21 | 22 | tensor_dict = {} 23 | for t in model.graph.initializer: 24 | tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t) 25 | 26 | 27 | input_tensor = onnx.TensorProto() 28 | with open('S.tensor', 'rb') as fid: 29 | content = fid.read() 30 | input_tensor.ParseFromString(content) 31 | 32 | tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor) 33 | tensor_dict["8"] = np.reshape(tensor_dict["1"], (10,784)) 34 | 35 | # do fractionals 36 | fractional = 1000 37 | downscale = 1 38 | 39 | single = ["2","4","6","8"] 40 | double = ["3","5","7"] 41 | for s in single: 42 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional) 43 | 44 | for s in double: 45 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional) 46 | 47 | """ 48 | for s in tensor_dict: 49 | tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape) 50 | """ 51 | 52 | # compute 53 | tensor_dict["11temp"] = np.matmul(tensor_dict["8"], tensor_dict["2"].T) 54 | tensor_dict["11add"] = np.add(tensor_dict["11temp"], tensor_dict["3"]) 55 | tensor_dict["11"] = np.divide(tensor_dict["11add"],fractional*downscale) 56 | 57 | tensor_dict["12"] = np.maximum(tensor_dict["11"],0) 58 | 59 | tensor_dict["15temp"] = np.matmul(tensor_dict["12"], tensor_dict["4"].T) 60 | tensor_dict["15add"] = np.add(tensor_dict["15temp"], tensor_dict["5"]) 61 | tensor_dict["15"] = np.divide(tensor_dict["15add"],fractional*downscale) 62 | 63 | tensor_dict["16"] = np.maximum(tensor_dict["15"],0) 64 | 65 | tensor_dict["19temp"] = np.matmul(tensor_dict["16"], tensor_dict["6"].T) 66 | tensor_dict["19add"] = np.add(tensor_dict["19temp"], tensor_dict["7"]) 67 | tensor_dict["19"] = np.divide(tensor_dict["19add"],fractional*downscale) 68 | 69 | 70 | given = np.loadtxt("out.txt", delimiter=",").astype(int) 71 | diff = np.subtract(given, tensor_dict["19"]) 72 | 73 | """ 74 | print("Reshaped Input (6)") 75 | print(tensor_dict["6"]) 76 | 77 | print("W1 (2)") 78 | print(tensor_dict["2"]) 79 | 80 | print("b1 (3)") 81 | print(tensor_dict["3"]) 82 | 83 | print("Before Relu (9)") 84 | print(tensor_dict["9"]) 85 | 86 | print("After Relu (10)") 87 | print(tensor_dict["10"]) 88 | 89 | print("W2 (4)") 90 | print(tensor_dict["4"]) 91 | 92 | print("b2 (5)") 93 | print(tensor_dict["5"]) 94 | """ 95 | 96 | print("expected (19)") 97 | print(tensor_dict["19"]) 98 | 99 | print("given") 100 | print(given) 101 | 102 | print("Difference between expected and given result:") 103 | print(diff) 104 | 105 | print("\n") 106 | print("Prediction expected: " + str(np.amax(tensor_dict["19"])) + " at index " + str(np.argmax(tensor_dict["19"])) ) 107 | print("Prediction have: " + str(np.amax(given))+ " at index " + str(np.argmax(given))) 108 | print("\n") 109 | 110 | #np.testing.assert_array_equal(tensor_dict["19"], given, err_msg="Result is not the same as expected result!", verbose=True) 111 | 112 | if np.argmax(tensor_dict["19"]) == np.argmax(given): 113 | print("Prediction result equal. Test passed.") 114 | else: 115 | print("Prediction differs. TEST FAILED!") -------------------------------------------------------------------------------- /models/check_rs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | import struct 13 | import random 14 | import numpy as np 15 | import onnx 16 | from onnx import numpy_helper 17 | model = onnx.load("R_S.onnx") 18 | 19 | def build(inp, shape): 20 | return np.array(inp).reshape(shape) 21 | 22 | tensor_dict = {} 23 | for t in model.graph.initializer: 24 | tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t) 25 | 26 | 27 | input_tensor = onnx.TensorProto() 28 | with open('S.tensor', 'rb') as fid: 29 | content = fid.read() 30 | input_tensor.ParseFromString(content) 31 | 32 | tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor) 33 | tensor_dict["6"] = np.reshape(tensor_dict["1"], (10,784)) 34 | 35 | # do fractionals 36 | fractional = 1000 37 | downscale = 1 38 | 39 | single = ["2","4","6"] 40 | double = ["3","5"] 41 | for s in single: 42 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional) 43 | 44 | for s in double: 45 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional) 46 | 47 | """ 48 | for s in tensor_dict: 49 | tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape) 50 | """ 51 | 52 | # compute 53 | tensor_dict["9temp"] = np.matmul(tensor_dict["6"], tensor_dict["2"].T) 54 | tensor_dict["9add"] = np.add(tensor_dict["9temp"], tensor_dict["3"]) 55 | tensor_dict["9"] = np.divide(tensor_dict["9add"],fractional*downscale) 56 | 57 | tensor_dict["10"] = np.maximum(tensor_dict["9"],0) 58 | 59 | tensor_dict["13temp"] = np.matmul(tensor_dict["10"], tensor_dict["4"].T) 60 | tensor_dict["13add"] = np.add(tensor_dict["13temp"], tensor_dict["5"]) 61 | tensor_dict["13"] = np.divide(tensor_dict["13add"],fractional*downscale) 62 | 63 | given = np.loadtxt("out.txt", delimiter=",").astype(int) 64 | diff = np.subtract(given, tensor_dict["13"]) 65 | 66 | """ 67 | print("Reshaped Input (6)") 68 | print(tensor_dict["6"]) 69 | 70 | print("W1 (2)") 71 | print(tensor_dict["2"]) 72 | 73 | print("b1 (3)") 74 | print(tensor_dict["3"]) 75 | 76 | print("Before Relu (9)") 77 | print(tensor_dict["9"]) 78 | 79 | print("After Relu (10)") 80 | print(tensor_dict["10"]) 81 | 82 | print("W2 (4)") 83 | print(tensor_dict["4"]) 84 | 85 | print("b2 (5)") 86 | print(tensor_dict["5"]) 87 | """ 88 | 89 | print("expected (13)") 90 | print(tensor_dict["13"]) 91 | print("have") 92 | print(given) 93 | 94 | print("Difference between expected and given result:") 95 | print(diff) 96 | 97 | print("\n") 98 | print("Prediction expected: " + str(np.amax(tensor_dict["13"])) + " at index " + str(np.argmax(tensor_dict["13"])) ) 99 | print("Prediction have: " + str(np.amax(given))+ " at index " + str(np.argmax(given))) 100 | print("\n") 101 | 102 | #np.testing.assert_array_equal(tensor_dict["13"], given, err_msg="Result is not the same as expected result!", verbose=True) 103 | 104 | if np.argmax(tensor_dict["13"]) == np.argmax(given): 105 | print("Prediction result equal. Test passed.") 106 | else: 107 | print("Prediction differs. TEST FAILED!") -------------------------------------------------------------------------------- /models/check_s.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | import struct 13 | import random 14 | import numpy as np 15 | import onnx 16 | from onnx import numpy_helper 17 | model = onnx.load("S.onnx") 18 | 19 | def build(inp, shape): 20 | return np.array(inp).reshape(shape) 21 | 22 | tensor_dict = {} 23 | for t in model.graph.initializer: 24 | tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t) 25 | 26 | 27 | input_tensor = onnx.TensorProto() 28 | with open('S.tensor', 'rb') as fid: 29 | content = fid.read() 30 | input_tensor.ParseFromString(content) 31 | 32 | tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor) 33 | tensor_dict["4"] = np.reshape(tensor_dict["1"], (10,784)) 34 | 35 | # do fractionals 36 | fractional = 1000 37 | downscale = 1 38 | 39 | single = ["2","4"] 40 | double = ["3"] 41 | for s in single: 42 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional) 43 | 44 | for s in double: 45 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional) 46 | 47 | """ 48 | for s in tensor_dict: 49 | tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape) 50 | """ 51 | 52 | # compute 53 | tensor_dict["7temp"] = np.matmul(tensor_dict["4"], tensor_dict["2"].T) 54 | tensor_dict["7added"] = np.add(tensor_dict["7temp"], tensor_dict["3"]) 55 | tensor_dict["7"] = np.divide(tensor_dict["7added"],fractional*downscale) 56 | 57 | given = np.loadtxt("out.txt", delimiter=",").astype(int) 58 | diff = np.subtract(given, tensor_dict["7"]) 59 | 60 | print("Expected") 61 | print(tensor_dict["7"]) 62 | 63 | print("Have") 64 | print(given) 65 | 66 | print("Difference between expected and given result:") 67 | print(diff) 68 | 69 | print("\n") 70 | print("Prediction expected: " + str(np.amax(tensor_dict["7"])) + " at index " + str(np.argmax(tensor_dict["7"])) ) 71 | print("Prediction have: " + str(np.amax(given))+ " at index " + str(np.argmax(given))) 72 | print("\n") 73 | 74 | #np.testing.assert_array_equal(tensor_dict["7"], given, err_msg="Result is not the same as expected result!", verbose=True) 75 | 76 | if np.argmax(tensor_dict["7"]) == np.argmax(given): 77 | print("Prediction result equal. Test passed.") 78 | else: 79 | print("Prediction differs. TEST FAILED!") -------------------------------------------------------------------------------- /proto/README.md: -------------------------------------------------------------------------------- 1 | [Code generation] 2 | 3 | General protobuf code generation: 4 | protoc --proto_path=common --python_out=bin/generated common/minionn-onnx.proto common/onnx-tensor.proto 5 | 6 | For grpc generation use: 7 | python3 -m grpc_tools.protoc -I./ --python_out=../common/ --grpc_python_out=../common/ minionn-onnx.proto onnx.proto 8 | 9 | BUT: 10 | The problem with onnx is that we want to include the ModelProto and TensorProto objects from 11 | the native onnx python library into our protobuf files 12 | 13 | The easiest way to do that was (for me) to make the following changes to the 14 | generated minionn_onnx_pb2 file: 15 | 1) import onnx as onnx__pb2 16 | instead of importing the actual onnx_pb2 file 17 | 2) In the \_PRECOMPUTATIONRESPONSE.fields and \_COMPUTATIONREQUEST.fields 18 | change the \_MODELPROTO and \_TENSORPROTO references in onnx to the 19 | namings in the onnx library: ModelProto and TensorProto 20 | 3) Possibly apply this to all other onnx object references you need. 21 | this means, for all messages, exchange the generated name for the onnx name 22 | -------------------------------------------------------------------------------- /proto/minionn-onnx.proto: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | 12 | syntax = "proto3"; 13 | 14 | import "onnx.proto"; 15 | 16 | package minionn; 17 | 18 | // The service definition. 19 | service MinioNN { 20 | // Precomputation service - requests ONNX format, ~w 21 | rpc Precomputation(PrecomputationRequest) returns (PrecomputationResponse) {} 22 | 23 | // Computation message - sends ~u and x_s and receives y_s 24 | rpc Computation(ComputationRequest) returns (ComputationResponse) {} 25 | } 26 | 27 | /* 28 | * Precomputation: 29 | * Input(message view): Nothing 30 | * Output: ONNX model for client and encrypted w 31 | */ 32 | // The request for the precomputation data of the Server 33 | // Technically, this is only required once per connection 34 | message PrecomputationRequest { 35 | bool request_model = 1; 36 | bool request_w = 2; 37 | } 38 | 39 | // The response sent by the Server 40 | // Contains the onnx model and/or the precomputed SIMD ~w 41 | message PrecomputationResponse{ 42 | onnx.ModelProto model = 1; 43 | repeated bytes w = 2; 44 | } 45 | 46 | 47 | /* 48 | * Computation: 49 | * Input: x_s, encrypted U 50 | * Output: y_s 51 | */ 52 | // The request message containing the client's u and x_s 53 | message ComputationRequest { 54 | repeated bytes u = 1; 55 | repeated int64 xs = 2; 56 | } 57 | 58 | // The response message containing y_s 59 | message ComputationResponse { 60 | repeated int64 ys = 1; 61 | } 62 | -------------------------------------------------------------------------------- /proto/onnx.proto: -------------------------------------------------------------------------------- 1 | // 2 | // WARNING: This file is automatically generated! Please edit onnx.in.proto. 3 | // 4 | 5 | 6 | // Copyright (c) Facebook Inc. and Microsoft Corporation. 7 | // Licensed under the MIT license. 8 | 9 | syntax = "proto3"; 10 | 11 | package onnx; 12 | 13 | // Note [Release] 14 | // We are still in the very early stage of defining ONNX. The current 15 | // version of ONNX is a starting point. While we are actively working 16 | // towards a complete spec, we would like to get the community involved 17 | // by sharing our working version of ONNX. 18 | 19 | // Note [Protobuf compatibility] 20 | // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 21 | // Based on experience working with downstream vendors, we generally can't 22 | // assume recent versions of protobufs. This means that we do not use any 23 | // protobuf features that are only available in proto3. 24 | // 25 | // Here are the most notable contortions we have to carry out to work around 26 | // these limitations: 27 | // 28 | // - No 'map' (added protobuf 3.0). We instead represent mappings as lists 29 | // of key-value pairs, where order does not matter and duplicates 30 | // are not allowed. 31 | 32 | // Note [Namespaces] 33 | // ~~~~~~~~~~~~~~~~~ 34 | // ONNX gives explicit names to graphs, intermediate values and 35 | // serialized tensors. To make it easier to generate names, we organize 36 | // these into separate namespaces (so, e.g., a graph can have the same 37 | // name as a serialized tensor.) The namespaces are as follows: 38 | // 39 | // - Node: These names identify specific nodes in the graph (but not, necessarily 40 | // any particular input or output of the node. 41 | // - Graph: These names identify graphs in the protobuf. 42 | // - Attribute: These names identify attribute names for extra attributes that 43 | // are passed to operators. 44 | // - Operator: These names identify particular operators. 45 | // - Value: These names identify intermediate values (typically tensors) flowing through 46 | // the computation of a graph. 47 | // - Shape: These names represent parameters for unknown shape dimensions. 48 | // 49 | // We specify the namespace of a name in ONNX as comments in the form 50 | // of "namespace {Node,Graph,Operator,Attribute,Value,Shape}". Framework is responsible 51 | // for supporting the namespaces. 52 | // 53 | // Naming things is hard. Every element with a name has an optional doc_string associated 54 | // with it, providing a human-readable description in text markdown. 55 | 56 | // To be compatible with both proto2 and proto3, we will use a version number 57 | // that is not defined by the default value but an explicit enum number. 58 | enum Version { 59 | // proto3 requires the first enum value to be zero. 60 | // We add this just to appease the compiler. 61 | _START_VERSION = 0; 62 | // The version field is always serialized and we will use it to store the 63 | // version that the graph is generated from. This helps us set up version 64 | // control. We should use version as 65 | // xx(major) - xx(minor) - xxxx(bugfix) 66 | // and we are starting with 0x00000001 (0.0.1), which was the 67 | // version we published on Oct 10, 2017. 68 | IR_VERSION_2017_10_10 = 0x00000001; 69 | 70 | // IR_VERSION 0.0.2 published on Oct 30, 2017 71 | // - Added type discriminator to AttributeProto to support proto3 users 72 | IR_VERSION_2017_10_30 = 0x00000002; 73 | 74 | // IR VERSION 0.0.3 published on Nov 3, 2017 75 | // - For operator versioning: 76 | // - Added new message OperatorSetIdProto 77 | // - Added opset_import in ModelProto 78 | // - For vendor extensions, added domain in NodeProto 79 | IR_VERSION = 0x00000003; 80 | } 81 | 82 | // A named attribute containing either singular float, integer, string 83 | // and tensor values, or repeated float, integer, string and tensor values. 84 | // An AttributeProto MUST contain the name field, and *only one* of the 85 | // following content fields, effectively enforcing a C/C++ union equivalent. 86 | message AttributeProto { 87 | 88 | // Note: this enum is structurally identical to the OpSchema::AttrType 89 | // enum defined in schema.h. If you rev one, you likely need to rev the other. 90 | enum AttributeType { 91 | UNDEFINED = 0; 92 | FLOAT = 1; 93 | INT = 2; 94 | STRING = 3; 95 | TENSOR = 4; 96 | GRAPH = 5; 97 | 98 | FLOATS = 6; 99 | INTS = 7; 100 | STRINGS = 8; 101 | TENSORS = 9; 102 | GRAPHS = 10; 103 | } 104 | 105 | // The name field MUST be present for this version of the IR. 106 | string name = 1; // namespace Attribute 107 | 108 | // A human-readable documentation for this attribute. Markdown is allowed. 109 | string doc_string = 13; 110 | 111 | // The type field MUST be present for this version of the IR. 112 | // For 0.0.1 versions of the IR, this field was not defined, and 113 | // implementations needed to use has_field hueristics to determine 114 | // which value field was in use. For IR_VERSION 0.0.2 or later, this 115 | // field MUST be set and match the f|i|s|t|... field in use. This 116 | // change was made to accomodate proto3 implementations. 117 | AttributeType type = 20; // discriminator that indicates which field below is in use 118 | 119 | // Exactly ONE of the following fields must be present for this version of the IR 120 | float f = 2; // float 121 | int64 i = 3; // int 122 | bytes s = 4; // UTF-8 string 123 | TensorProto t = 5; // tensor value 124 | GraphProto g = 6; // graph 125 | // Do not use field below, it's deprecated. 126 | // optional ValueProto v = 12; // value - subsumes everything but graph 127 | 128 | repeated float floats = 7; // list of floats 129 | repeated int64 ints = 8; // list of ints 130 | repeated bytes strings = 9; // list of UTF-8 strings 131 | repeated TensorProto tensors = 10; // list of tensors 132 | repeated GraphProto graphs = 11; // list of graph 133 | } 134 | 135 | // Defines information on value, including the name, the type, and 136 | // the shape of the value. 137 | message ValueInfoProto { 138 | // This field MUST be present in this version of the IR. 139 | string name = 1; // namespace Value 140 | // This field MUST be present in this version of the IR. 141 | TypeProto type = 2; 142 | // A human-readable documentation for this value. Markdown is allowed. 143 | string doc_string = 3; 144 | } 145 | 146 | // NodeProto stores a node that is similar to the notion of "layer" 147 | // or "operator" in many deep learning frameworks. For example, it can be a 148 | // node of type "Conv" that takes in an image, a filter tensor and a bias 149 | // tensor, and produces the convolved output. 150 | message NodeProto { 151 | repeated string input = 1; // namespace Value 152 | repeated string output = 2; // namespace Value 153 | 154 | // An optional identifier for this node in a graph. 155 | // This field MAY be absent in ths version of the IR. 156 | string name = 3; // namespace Node 157 | 158 | // The symbolic identifier of the Operator to execute. 159 | string op_type = 4; // namespace Operator 160 | // The domain of the OperatorSet that specifies the operator named by op_type. 161 | string domain = 7; // namespace Domain 162 | 163 | // Additional named attributes. 164 | // NOTE: Simply using ValueProto.NameValuePairProto is the most general 165 | // solution. I kept AttributeProto to minimize churn on CI results. 166 | repeated AttributeProto attribute = 5; 167 | 168 | // A human-readable documentation for this node. Markdown is allowed. 169 | string doc_string = 6; 170 | } 171 | 172 | // ModelProto is a top-level file/container format for bundling a ML model. 173 | // The semantics of the model are described by the GraphProto that represents 174 | // a parameterized computation graph against a set of named operators that are 175 | // defined independently from the graph. 176 | message ModelProto { 177 | // The version of the IR this model targets. See Version enum above. 178 | // This field MUST be present. 179 | int64 ir_version = 1; 180 | 181 | // The OperatorSets this model relies on. 182 | // All ModelProtos MUST have at least one entry that 183 | // specifies which version of the ONNX OperatorSet is 184 | // being imported. 185 | // 186 | // All nodes in the ModelProto's graph will bind against the operator 187 | // with the same-domain/same-op_type operator with the HIGHEST version 188 | // in the referenced operator sets. 189 | repeated OperatorSetIdProto opset_import = 8; 190 | 191 | // The name of the framework or tool used to generate this model. 192 | // This field SHOULD be present to indicate which implementation/tool/framework 193 | // emitted the model. 194 | string producer_name = 2; 195 | 196 | // The version of the framework or tool used to generate this model. 197 | // This field SHOULD be present to indicate which implementation/tool/framework 198 | // emitted the model. 199 | string producer_version = 3; 200 | 201 | // Domain name of the model. 202 | // We use reverse domain names as name space indicators. For example: 203 | // `com.facebook.fair` or `com.microsoft.cognitiveservices` 204 | // 205 | // Together with `model_version` and GraphProto.name, this forms the unique identity of 206 | // the graph. 207 | string domain = 4; 208 | 209 | // The version of the graph encoded. See Version enum below. 210 | int64 model_version = 5; 211 | 212 | // A human-readable documentation for this model. Markdown is allowed. 213 | string doc_string = 6; 214 | 215 | // The parameterized graph that is evaluated to execute the model. 216 | GraphProto graph = 7; 217 | 218 | // Named metadata values; keys should be distinct. 219 | repeated StringStringEntryProto metadata_props = 14; 220 | }; 221 | 222 | // StringStringEntryProto follows the pattern for cross-proto-version maps. 223 | // See https://developers.google.com/protocol-buffers/docs/proto3#maps 224 | message StringStringEntryProto { 225 | string key = 1; 226 | string value= 2; 227 | }; 228 | 229 | // GraphProto defines a parameterized series of nodes to form a directed acyclic graph. 230 | // This is the equivalent of the "network" and "graph" in many deep learning 231 | // frameworks. 232 | message GraphProto { 233 | // The nodes in the graph. 234 | repeated NodeProto node = 1; 235 | 236 | // The name of the graph. 237 | string name = 2; // namespace Graph 238 | 239 | // A list of named tensor values (constants), used to specify default 240 | // values for some of the inputs of the graph. 241 | // Each TensorProto entry must have a distinct name (within the list) that 242 | // also appears in the input list. 243 | // In an evaluation, the default value specified here is used if and only if 244 | // user specifies no value for the corresponding input parameter. 245 | // May be used to pass serialized parameters for networks. 246 | repeated TensorProto initializer = 5; 247 | 248 | // A human-readable documentation for this graph. Markdown is allowed. 249 | string doc_string = 10; 250 | 251 | // The inputs and outputs of the graph. 252 | repeated ValueInfoProto input = 11; 253 | repeated ValueInfoProto output = 12; 254 | 255 | // Information for the values in the graph. The ValueInfoProto.name's 256 | // must be distinct. It is optional for a value to appear in value_info list. 257 | repeated ValueInfoProto value_info = 13; 258 | 259 | // DO NOT USE the following fields, they were deprecated before 260 | // repeated string input = 3; 261 | // repeated string output = 4; 262 | // optional int64 ir_version = 6; 263 | // optional int64 producer_version = 7; 264 | // optional string producer_tag = 8; 265 | // optional string domain = 9; 266 | } 267 | 268 | // A message defined to store a tensor in its serialized format. 269 | message TensorProto { 270 | enum DataType { 271 | UNDEFINED = 0; 272 | // Basic types. 273 | FLOAT = 1; // float 274 | UINT8 = 2; // uint8_t 275 | INT8 = 3; // int8_t 276 | UINT16 = 4; // uint16_t 277 | INT16 = 5; // int16_t 278 | INT32 = 6; // int32_t 279 | INT64 = 7; // int64_t 280 | STRING = 8; // string 281 | BOOL = 9; // bool 282 | 283 | // Advanced types 284 | FLOAT16 = 10; 285 | DOUBLE = 11; 286 | UINT32 = 12; 287 | UINT64 = 13; 288 | COMPLEX64 = 14; // complex with float32 real and imaginary components 289 | COMPLEX128 = 15; // complex with float64 real and imaginary components 290 | // Future extensions go here. 291 | } 292 | 293 | // The shape of the tensor. 294 | repeated int64 dims = 1; 295 | 296 | // The data type of the tensor. 297 | DataType data_type = 2; 298 | 299 | // For very large tensors, we may want to store them in chunks, in which 300 | // case the following fields will specify the segment that is stored in 301 | // the current TensorProto. 302 | message Segment { 303 | int64 begin = 1; 304 | int64 end = 2; 305 | } 306 | Segment segment = 3; 307 | 308 | // Tensor content must be in the row major order. 309 | // 310 | // Depending on the data_type field, exactly one of the fields below with 311 | // name ending in _data is used to store the elements of the tensor. 312 | 313 | // For float and complex64 values 314 | // Complex64 tensors are encoded as a single array of floats, 315 | // with the real components appearing in odd numbered positions, 316 | // and the corresponding imaginary component apparing in the 317 | // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] 318 | // is encoded as [1.0, 2.0 ,3.0 ,4.0] 319 | // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. 320 | repeated float float_data = 4 [packed = true]; 321 | 322 | // For int32, uint8, int8, uint16, int16, bool, and float16 values 323 | // float16 values must be bit-wise converted to an uint16_t prior 324 | // to writing to the buffer. 325 | // When this field is present, the data_type field MUST be 326 | // INT32, INT16, INT8, UINT16, INT8, BOOL, or FLOAT32 327 | repeated int32 int32_data = 5 [packed = true]; 328 | 329 | // For strings. 330 | // Each element of string_data is a UTF-8 encoded Unicode 331 | // string. No trailing null, no leading BOM. The protobuf "string" 332 | // scalar type is not used to match ML community conventions. 333 | // When this field is present, the data_type field MUST be STRING 334 | repeated bytes string_data = 6; 335 | 336 | // For int64. 337 | // When this field is present, the data_type field MUST be INT64 338 | repeated int64 int64_data = 7 [packed = true]; 339 | 340 | // Optionally, a name for the tensor. 341 | string name = 8; // namespace Value 342 | 343 | // A human-readable documentation for this tensor. Markdown is allowed. 344 | string doc_string = 12; 345 | 346 | // Serializations can either use one of the fields above, or use this 347 | // raw bytes field. The only exception is the string case, where one is 348 | // required to store the content in the repeated bytes string_data field. 349 | // 350 | // When this raw_data field is used to store tensor value, elements MUST 351 | // be stored in as fixed-width, little-endian order. 352 | // Floating-point data types MUST be stored in IEEE 754 format. 353 | // Complex64 elements must be written as two consecutive FLOAT values, real component first. 354 | // Complex128 elements must be written as two consecutive DOUBLE values, real component first. 355 | // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). 356 | // 357 | // Note: the advantage of specific field rather than the raw_data field is 358 | // that in some cases (e.g. int data), protobuf does a better packing via 359 | // variable length storage, and may lead to smaller binary footprint. 360 | // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED 361 | bytes raw_data = 9; 362 | 363 | // For double 364 | // Complex64 tensors are encoded as a single array of doubles, 365 | // with the real components appearing in odd numbered positions, 366 | // and the corresponding imaginary component apparing in the 367 | // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] 368 | // is encoded as [1.0, 2.0 ,3.0 ,4.0] 369 | // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 370 | repeated double double_data = 10 [packed = true]; 371 | 372 | // For uint64 and uint32 values 373 | // When this field is present, the data_type field MUST be 374 | // UINT32 or UINT64 375 | repeated uint64 uint64_data = 11 [packed = true]; 376 | } 377 | 378 | // Defines a tensor shape. A dimension can be either an integer value 379 | // or a symbolic variable. A symbolic variable represents an unknown 380 | // dimension. 381 | message TensorShapeProto { 382 | message Dimension { 383 | oneof value { 384 | int64 dim_value = 1; 385 | string dim_param = 2; // namespace Shape 386 | }; 387 | }; 388 | repeated Dimension dim = 1; 389 | } 390 | 391 | // Define the types. 392 | message TypeProto { 393 | 394 | message Tensor { 395 | // This field MUST NOT have the value of UNDEFINED 396 | // This field MUST be present for this version of the IR. 397 | TensorProto.DataType elem_type = 1; 398 | TensorShapeProto shape = 2; 399 | } 400 | 401 | 402 | oneof value { 403 | // The type of a tensor. 404 | Tensor tensor_type = 1; 405 | 406 | } 407 | } 408 | 409 | // OperatorSets are uniquely identified by a (domain, opset_version) pair. 410 | message OperatorSetIdProto { 411 | // The domain of the operator set being identified. 412 | // The empty string ("") or absence of this field implies the operator 413 | // set that is defined as part of the ONNX specification. 414 | // This field MUST be present in this version of the IR when referring to any other operator set. 415 | string domain = 1; 416 | 417 | // The version of the operator set being identified. 418 | // This field MUST be present in this version of the IR. 419 | int64 version = 2; 420 | } 421 | -------------------------------------------------------------------------------- /server.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import argparse 12 | from concurrent import futures 13 | import sys, os, time 14 | from operator import mul 15 | from functools import reduce 16 | 17 | # cpp 18 | import cppimport 19 | import cppimport.import_hook 20 | cppimport.set_quiet(True) 21 | 22 | #onnx 23 | import onnx 24 | 25 | #gRPC for client-server communication 26 | import grpc 27 | 28 | #project imports 29 | from common import minionn_onnx_pb2_grpc as minionn_grpc 30 | from common import minionn_onnx_pb2 31 | from common import onnx_helper, minionn_helper, operation_handler, config 32 | 33 | # Logging 34 | import logging 35 | import logging.config 36 | logging.config.fileConfig('common/logging.conf') 37 | logger = logging.getLogger('minionn') 38 | 39 | 40 | class MinioNNServicer(object): 41 | """ 42 | The service definition for GRPC. 43 | """ 44 | def __init__(self, model, w, nodes, ip, mpc_port): 45 | self.model_client = model 46 | self.w_precomputed = w 47 | self.nodes = nodes 48 | self.ip = ip 49 | self.mpc_port = mpc_port 50 | 51 | 52 | def Precomputation(self, request, context): 53 | """ 54 | Precomputation service - returns ONNX client model and ~w 55 | """ 56 | logger.info("Got precomputation request. Responding...") 57 | return minionn_onnx_pb2.PrecomputationResponse(model=self.model_client, w=self.w_precomputed) 58 | 59 | def Computation(self, request, context): 60 | """ 61 | Computation message - receives ~u and x_s and returns y_s 62 | """ 63 | logger.info("Got computation request.") 64 | logger.debug("xs has length: " + str((len(request.xs)))) 65 | 66 | logger.info("Opening MPC server port. Waiting for client to connect...") 67 | minionn_helper.init_mpc(self.ip, self.mpc_port, True) 68 | 69 | # Perform last precomputation step on U 70 | decU = minionn_helper.server_decrypt_u(request.u, config.server_skey) 71 | logger.debug("U has length: " + str(len(list(decU))) ) 72 | 73 | # Now system is ready to start NN 74 | handler = operation_handler.OperationHandler(self.nodes, self.model_client.graph.input[0].name) 75 | handler.init_server(decU) 76 | result = handler.run_network(x_in = request.xs, 77 | in_name = self.model_client.graph.input[0].name, 78 | out_name = self.model_client.graph.output[0].name) 79 | 80 | logger.info("Shutting down MPC server again.") 81 | minionn_helper.shutdown_mpc() 82 | 83 | logger.info("Computation response:" + str(result)) 84 | 85 | return minionn_onnx_pb2.ComputationResponse(ys=result) 86 | 87 | def main(): 88 | parser = argparse.ArgumentParser(description="MiniONN - ONNX compatible version") 89 | parser.add_argument( 90 | "-i","--input", 91 | type=str, required=True, 92 | help="The input protobuf file.", 93 | ) 94 | parser.add_argument( 95 | "-p","--port", 96 | type=int, required=False, default=config.port_rpc, 97 | help="Server port.", 98 | ) 99 | parser.add_argument( 100 | "-m","--mpc_port", 101 | type=int, required=False, default=config.port_aby, 102 | help="Server port for MPC.", 103 | ) 104 | parser.add_argument( 105 | "-v", "--verbose", 106 | required=False, default=False, action='store_true', 107 | help="Log verbosely.", 108 | ) 109 | 110 | args = parser.parse_args() 111 | 112 | """ 113 | Create and set up Logger 114 | """ 115 | loglevel = (logging.DEBUG if args.verbose else logging.INFO) 116 | logger.setLevel(loglevel) 117 | logger.info("MiniONN SERVER") 118 | 119 | """ 120 | First, read the model from input and strip it down for client 121 | """ 122 | model = onnx.load(args.input) 123 | if len(model.graph.node) == 0: 124 | logger.error("Error reading the ONNX model. Aborting.") 125 | sys.exit() 126 | 127 | # Now we have properly read the model. 128 | # Next, prepare a model without sensitive information 129 | model_client = onnx_helper.stripModelFromPrivateData(model) 130 | logger.info("Read ONNX model and generated client version.") 131 | logger.debug("Graph Input:\n" + str(model.graph.input[0].name)) 132 | logger.debug("Graph Output:\n" + str(model.graph.output[0].name)) 133 | 134 | """ 135 | With the two models loaded, we now prepare the model for local Computation 136 | This includes: 137 | - loading tensors from model as python lists 138 | - loading the model to C++ 139 | - generating key 140 | - precomputing ~w 141 | """ 142 | logger.info("Parsing model into python and C++...") 143 | 144 | # Get tensors and dimensions from onnx 145 | tensors = onnx_helper.retrieveTensorsFromModel(model) 146 | tensors_dims = onnx_helper.retrieveTensorDimensionsFromModel(model) 147 | 148 | # Get nodes from model parse it for ws and bs 149 | nodes = onnx_helper.retrieveNodesFromModel(model) 150 | tensors_b, tensors_w = onnx_helper.get_bs_and_ws(nodes, tensors) 151 | 152 | logger.debug("Retrieved ws and bs:") 153 | logger.debug("ws are:" + str(tensors_w)) 154 | logger.debug("bs are:" + str(tensors_b)) 155 | 156 | # Do a sanity test on the detected Ws 157 | # If a W gets reshaped before being used, we would not detect it 158 | # as an input to a Gemm 159 | # NOTE: This might be a problem in the future 160 | assert len(tensors_w) == len(tensors_b), "Not all W matrices detected! Do some Ws change before being used? (e.g. reshape)" 161 | 162 | # Put tensors into cpp vector dict 163 | # We use fractions to shift the tensors from floats to integers 164 | # This means we multiply every w and b with a fraction 165 | # The w gets mutliplied with the fractional 166 | # The b gets multiplied with the fractional*fractional 167 | # This is because the client also multiplies his input with the fractional and 168 | # W*x results in fractional*fractional for b 169 | 170 | # Iterate over tensor dimensions because there might be tensors 171 | # that do not exist yet (have no tensor entry) but whose dimension is known 172 | for name,dim in tensors_dims.items(): 173 | fractional = 1 174 | 175 | # Get value that belongs to this dim 176 | # It might not exist, then the dimension is an output or input 177 | # Keep the value at None then but still register it 178 | value = None 179 | if name in tensors: 180 | value = tensors[name] 181 | 182 | # Adjust the fractional for bs (see above) 183 | if name in tensors_b: 184 | fractional = pow(config.fractional_base, 2) 185 | 186 | # Same for w 187 | if name in tensors_w: 188 | fractional = pow(config.fractional_base, 1) 189 | 190 | # And call put 191 | minionn_helper.put_cpp_tensor(name, value, dim, fractional) 192 | 193 | logger.info("... parsing model done.") 194 | 195 | logger.info("Calculatung ~w") 196 | 197 | # First, generate keys 198 | if not os.path.exists(config.asset_folder): 199 | os.makedirs(config.asset_folder) 200 | logger.info("Created directory " + config.asset_folder) 201 | 202 | logger.info("Generating keys") 203 | minionn_helper.init(config.SLOTS) 204 | minionn_helper.generate_keys(config.server_pkey,config.server_skey) 205 | 206 | # Prepare w for precomputation 207 | # For this, first create a NodeOperator stub that parses the model 208 | # and can give us the list of Ws (already transposed etc) 209 | # The minionn helper then puts together the w correctly 210 | logger.info("Parsing network") 211 | tmp = operation_handler.OperationHandler(nodes, model.graph.input[0].name, simulation=True) 212 | logger.info("Performing precomputation on W") 213 | w = minionn_helper.server_prepare_w(tmp.get_w_list(), config.server_pkey) 214 | 215 | """ 216 | We are now ready for incoming connections. Open the server 217 | """ 218 | logger.info("Done with server precomputations. Starting server.") 219 | servicer = MinioNNServicer(model_client, w, nodes, config.ip, args.mpc_port) 220 | 221 | logger.info("Starting to listen on port " + str(args.port)) 222 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=config.grpc_options) 223 | minionn_grpc.add_MinioNNServicer_to_server(servicer, server) 224 | server.add_insecure_port('[::]:' + str(args.port)) 225 | server.start() 226 | 227 | _ONE_DAY_IN_SECONDS = 60 * 60 * 24 228 | try: 229 | while True: 230 | time.sleep(_ONE_DAY_IN_SECONDS) 231 | except KeyboardInterrupt: 232 | server.stop(0) 233 | 234 | if __name__ == '__main__': 235 | main() 236 | -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SSGAalto/minionn/49bc403a7d91ba4c466843952d53a815cf10d78a/tools/__init__.py -------------------------------------------------------------------------------- /tools/csv_to_tensor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | """ 12 | This file takes a csv file (e.g. written by numpy) and puts the values 13 | as an array into a TensorProto that is stored as a file. 14 | Such a TensorProto is required to run MiniONN as a client. 15 | Obviously, there are many ways to give such a TensorProto as input but 16 | this example file should be enough to get the idea of how to work with TensorProto. 17 | """ 18 | 19 | import onnx 20 | import struct 21 | import numpy as np 22 | 23 | filename = "array.txt" 24 | delimiter = "," 25 | tensor_name = "1" 26 | 27 | # Load values and convert to list 28 | values = np.loadtxt(filename, delimiter=delimiter) 29 | values_list = values.flatten().tolist() 30 | 31 | # Pack the input into raw bytes 32 | values_raw = struct.pack('%sf' % len(values_list), *values_list) 33 | 34 | # export the raw data to a tensor proto. 35 | # We use FLOAT type here but pack it in bytes 36 | t_type = onnx.TensorProto.FLOAT 37 | tensor = onnx.helper.make_tensor(tensor_name, t_type, list(values.shape), values_raw, True) 38 | 39 | # Write to file 40 | f = open(filename + '.tensor', 'wb') 41 | f.write(tensor.SerializeToString()) 42 | f.close() -------------------------------------------------------------------------------- /tools/make_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | 12 | import onnx 13 | import struct 14 | import random 15 | import numpy as np 16 | 17 | """ 18 | Define the shape of the input 19 | Also define the operators with their shapes. 20 | Example: 21 | ["Gemm", (5,10,20), ["1","2","3"], ["4"]] 22 | means a Gemm onnx operator with 1*2+3=4 23 | with the shapes 24 | 1 : 5,10 25 | 2 : 10,20 26 | 3 : 5 27 | 4 : 5, 20 28 | Operators see https://github.com/onnx/onnx/blob/master/docs/Operators.md 29 | 30 | This script always assumes that the input is multiplied first: x * W + b 31 | This can obviously be changed but the auto generation below assumes W to be 32 | at second place etc. 33 | """ 34 | model_name = "manual_model" 35 | operators = [ 36 | ["Gemm", (2,3,4), ["1", "2", "3"], ["4"]], 37 | ["Relu", (2,4), ["4"], ["5"]], 38 | ["Gemm", (2,4,2), ["5", "6", "7"], ["8"]] 39 | ] 40 | 41 | 42 | """ 43 | Helper functions 44 | """ 45 | def generate_random_tensor(name, shape): 46 | # Create random numpy input 47 | x = np.random.randint(10000,None,size=shape,dtype='int64') 48 | # x = np.array([[random.randint(0,10000) for n in range(0,cols) ] for m in range(0,rows)]) 49 | x_l = x.flatten().tolist() 50 | 51 | # Pack the input into raw bytes 52 | x_raw = struct.pack('%sf' % len(x_l), *x_l) 53 | 54 | # export the raw data to a tensor proto 55 | t_type = onnx.TensorProto.FLOAT 56 | t = onnx.helper.make_tensor(name, t_type, list(x.shape), x_raw, True) 57 | 58 | return t 59 | 60 | """ 61 | First, create the input 62 | """ 63 | x_t = generate_random_tensor(operators[0][2][0], (operators[0][1][0], operators[0][1][1])) 64 | 65 | # Write to file 66 | f = open(model_name + '.onnx.tensor', 'wb') 67 | f.write(x_t.SerializeToString()) 68 | f.close() 69 | 70 | """ 71 | Second, create the nodes 72 | Here, we have Gemm, Relu, Gemm 73 | """ 74 | nodes = [] 75 | 76 | for o in operators: 77 | name = o[0] 78 | inputs = o[2] 79 | outputs = o[3] 80 | nodes.append(onnx.helper.make_node(name, inputs, outputs)) 81 | 82 | """ 83 | Next, initializers (List of initial tensors) 84 | Create a list of tensors. Helpful for this is the shape tuple in operators. 85 | """ 86 | initializers = [] 87 | initializers_value_info = [] 88 | for o in operators: 89 | if o[0] == "Gemm": 90 | # We need two randoms, W and b 91 | w_name = o[2][1] 92 | b_name = o[2][2] 93 | w_shape = (o[1][1], o[1][2]) 94 | b_shape = [o[1][2]] 95 | initializers.append(generate_random_tensor(w_name, w_shape)) 96 | initializers.append(generate_random_tensor(b_name, b_shape)) 97 | 98 | initializers_value_info.append( 99 | onnx.helper.make_tensor_value_info( 100 | w_name, onnx.TensorProto.FLOAT,w_shape 101 | )) 102 | initializers_value_info.append( 103 | onnx.helper.make_tensor_value_info( 104 | b_name,onnx.TensorProto.FLOAT,b_shape 105 | )) 106 | 107 | """ 108 | Lastly, graph and model 109 | """ 110 | graph_inputs = [ 111 | onnx.helper.make_tensor_value_info( 112 | operators[0][2][0], 113 | onnx.TensorProto.FLOAT, 114 | (operators[0][1][0],operators[0][1][1]) 115 | ) 116 | ] 117 | graph_inputs.extend(initializers_value_info) 118 | 119 | 120 | graph_outputs = [ 121 | onnx.helper.make_tensor_value_info( 122 | operators[-1][3][0], 123 | onnx.TensorProto.FLOAT, 124 | (operators[-1][1][0],operators[-1][1][-1]) 125 | ) 126 | ] 127 | 128 | 129 | graph = onnx.helper.make_graph(nodes, name, graph_inputs, graph_outputs, initializer=initializers) 130 | model = onnx.helper.make_model(graph) 131 | 132 | # Write to file 133 | f = open(model_name + '.onnx', 'wb') 134 | f.write(model.SerializeToString()) 135 | f.close() -------------------------------------------------------------------------------- /tools/make_model_non_reversed.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | import struct 13 | import random 14 | import numpy as np 15 | 16 | """ 17 | Lets define the shape of our input 18 | Also define the operators with their shapes. 19 | Example: 20 | ["Gemm", (5,10,20), ["1","2","3"], ["4"]] 21 | means a Gemm onnx operator with 1*2+3=4 22 | with the shapes 23 | 1 : 5,10 24 | 2 : 10,20 25 | 3 : 5 26 | 4 : 5, 20 27 | Operators see https://github.com/onnx/onnx/blob/master/docs/Operators.md 28 | """ 29 | model_name = "manual_model_normal" 30 | operators = [ 31 | ["Gemm", (20,30,40), ["2", "1", "3"], ["4"]], 32 | ["Relu", (20,40), ["4"], ["5"]], 33 | ["Gemm", (50,20,40), ["6", "5", "7"], ["8"]] 34 | ] 35 | 36 | 37 | """ 38 | Helper functions 39 | """ 40 | def generate_random_tensor(name, shape): 41 | # Create random numpy input 42 | x = np.random.randint(10000,None,size=shape,dtype='int64') 43 | # x = np.array([[random.randint(0,10000) for n in range(0,cols) ] for m in range(0,rows)]) 44 | x_l = x.flatten().tolist() 45 | 46 | # Pack the input into raw bytes 47 | x_raw = struct.pack('%sf' % len(x_l), *x_l) 48 | 49 | # export the raw data to a tensor proto 50 | t_type = onnx.TensorProto.FLOAT 51 | t = onnx.helper.make_tensor(name, t_type, list(x.shape), x_raw, True) 52 | 53 | return t 54 | 55 | """ 56 | First, create the input 57 | """ 58 | x_t = generate_random_tensor(operators[0][2][1], (operators[0][1][1], operators[0][1][2])) 59 | 60 | # Write to file 61 | f = open(model_name + '.onnx.tensor', 'wb') 62 | f.write(x_t.SerializeToString()) 63 | f.close() 64 | 65 | """ 66 | Second, create the nodes 67 | Here, we have Gemm, Relu, Gemm 68 | """ 69 | nodes = [] 70 | 71 | for o in operators: 72 | name = o[0] 73 | inputs = o[2] 74 | outputs = o[3] 75 | nodes.append(onnx.helper.make_node(name, inputs, outputs)) 76 | 77 | """ 78 | Next, initializers (List of initial tensors) 79 | Create a list of tensors. Helpful for this is the shape tuple in operators. 80 | """ 81 | initializers = [] 82 | initializers_value_info = [] 83 | for o in operators: 84 | if o[0] == "Gemm": 85 | # We need two randoms, W and b 86 | w_name = o[2][0] 87 | b_name = o[2][2] 88 | w_shape = (o[1][0], o[1][1]) 89 | b_shape = [o[1][2]] 90 | initializers.append(generate_random_tensor(w_name, w_shape)) 91 | initializers.append(generate_random_tensor(b_name, b_shape)) 92 | 93 | initializers_value_info.append( 94 | onnx.helper.make_tensor_value_info( 95 | w_name, onnx.TensorProto.FLOAT,w_shape 96 | )) 97 | initializers_value_info.append( 98 | onnx.helper.make_tensor_value_info( 99 | b_name,onnx.TensorProto.FLOAT,b_shape 100 | )) 101 | 102 | """ 103 | Lastly, graph and model 104 | """ 105 | graph_inputs = [ 106 | onnx.helper.make_tensor_value_info( 107 | "1", 108 | onnx.TensorProto.FLOAT, 109 | (operators[0][1][1],operators[0][1][2]) 110 | ) 111 | ] 112 | graph_inputs.extend(initializers_value_info) 113 | 114 | 115 | graph_outputs = [ 116 | onnx.helper.make_tensor_value_info( 117 | operators[-1][3][0], 118 | onnx.TensorProto.FLOAT, 119 | (operators[-1][1][0],operators[-1][1][-1]) 120 | ) 121 | ] 122 | 123 | 124 | graph = onnx.helper.make_graph(nodes, name, graph_inputs, graph_outputs, initializer=initializers) 125 | model = onnx.helper.make_model(graph) 126 | 127 | # Write to file 128 | f = open(model_name + '.onnx', 'wb') 129 | f.write(model.SerializeToString()) 130 | f.close() -------------------------------------------------------------------------------- /tools/make_model_only_gemm.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | import struct 13 | import random 14 | import numpy as np 15 | 16 | """ 17 | Lets define the shape of our input 18 | Also define the operators with their shapes. 19 | Example: 20 | ["Gemm", (5,10,20), ["1","2","3"], ["4"]] 21 | means a Gemm onnx operator with 1*2+3=4 22 | with the shapes 23 | 1 : 5,10 24 | 2 : 10,20 25 | 3 : 5 26 | 4 : 5, 20 27 | Operators see https://github.com/onnx/onnx/blob/master/docs/Operators.md 28 | """ 29 | model_name = "manual_model_only_gemm" 30 | operators = [ 31 | ["Gemm", (2,1,3), ["2", "1", "3"], ["4"]] 32 | ] 33 | 34 | w = [] 35 | b = [] 36 | x = [] 37 | 38 | """ 39 | Helper functions 40 | """ 41 | def generate_random_tensor(name, shape, predefined=None): 42 | # Create random numpy input 43 | x = np.array(predefined) 44 | if x is None or len(x) == 0: 45 | x = np.random.randint(10000,None,size=shape,dtype='int64') 46 | else: 47 | x = x.reshape(shape) 48 | 49 | print(name + " is:") 50 | print(x) 51 | 52 | x_l = x.flatten().tolist() 53 | 54 | # Pack the input into raw bytes 55 | x_raw = struct.pack('%sf' % len(x_l), *x_l) 56 | 57 | # export the raw data to a tensor proto 58 | t_type = onnx.TensorProto.FLOAT 59 | t = onnx.helper.make_tensor(name, t_type, list(x.shape), x_raw, True) 60 | 61 | return t 62 | 63 | 64 | """ 65 | First, create the input 66 | """ 67 | x_t = generate_random_tensor(operators[0][2][1], (operators[0][1][1], operators[0][1][2]), predefined=x) 68 | 69 | # Write to file 70 | f = open(model_name + '.onnx.tensor', 'wb') 71 | f.write(x_t.SerializeToString()) 72 | f.close() 73 | 74 | """ 75 | Second, create the nodes 76 | Here, we have Gemm, Relu, Gemm 77 | """ 78 | nodes = [] 79 | 80 | for o in operators: 81 | name = o[0] 82 | inputs = o[2] 83 | outputs = o[3] 84 | nodes.append(onnx.helper.make_node(name, inputs, outputs)) 85 | 86 | """ 87 | Next, initializers (List of initial tensors) 88 | Create a list of tensors. Helpful for this is the shape tuple in operators. 89 | """ 90 | initializers = [] 91 | initializers_value_info = [] 92 | for o in operators: 93 | if o[0] == "Gemm": 94 | # We need two randoms, W and b 95 | w_name = o[2][0] 96 | b_name = o[2][2] 97 | w_shape = (o[1][0], o[1][1]) 98 | b_shape = [o[1][2]] 99 | initializers.append(generate_random_tensor(w_name, w_shape, predefined = w)) 100 | initializers.append(generate_random_tensor(b_name, b_shape, predefined = b)) 101 | 102 | initializers_value_info.append( 103 | onnx.helper.make_tensor_value_info( 104 | w_name, onnx.TensorProto.FLOAT,w_shape 105 | )) 106 | initializers_value_info.append( 107 | onnx.helper.make_tensor_value_info( 108 | b_name,onnx.TensorProto.FLOAT,b_shape 109 | )) 110 | 111 | """ 112 | Lastly, graph and model 113 | """ 114 | graph_inputs = [ 115 | onnx.helper.make_tensor_value_info( 116 | "1", 117 | onnx.TensorProto.FLOAT, 118 | (operators[0][1][1],operators[0][1][2]) 119 | ) 120 | ] 121 | graph_inputs.extend(initializers_value_info) 122 | 123 | 124 | graph_outputs = [ 125 | onnx.helper.make_tensor_value_info( 126 | operators[-1][3][0], 127 | onnx.TensorProto.FLOAT, 128 | (operators[-1][1][0],operators[-1][1][-1]) 129 | ) 130 | ] 131 | 132 | 133 | graph = onnx.helper.make_graph(nodes, name, graph_inputs, graph_outputs, initializer=initializers) 134 | model = onnx.helper.make_model(graph) 135 | 136 | # Write to file 137 | f = open(model_name + '.onnx', 'wb') 138 | f.write(model.SerializeToString()) 139 | f.close() 140 | 141 | print("Wrote W*x+b model to file " + model_name + '.onnx') 142 | -------------------------------------------------------------------------------- /tools/out.txt: -------------------------------------------------------------------------------- 1 | 4.421662200000000000e+07,4.961658100000000000e+07 2 | 5.006131100000000000e+07,1.231823700000000000e+07 3 | -------------------------------------------------------------------------------- /tools/test_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | from onnx import numpy_helper 13 | import numpy as np 14 | 15 | print("These tests work with a fractional of 1 and a downscale of 10,000. Set this in config.py accordingly before running MiniONN.") 16 | 17 | model = onnx.load("manual_model.onnx") 18 | 19 | tensor_dict = {} 20 | for t in model.graph.initializer: 21 | tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t) 22 | 23 | 24 | input_tensor = onnx.TensorProto() 25 | with open('manual_model.onnx.tensor', 'rb') as fid: 26 | content = fid.read() 27 | input_tensor.ParseFromString(content) 28 | 29 | tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor) 30 | 31 | # do fractionals 32 | fractional = 1 33 | downscale = 10000 34 | 35 | single = ["1", "2", "6"] 36 | double = ["3", "7"] 37 | for s in single: 38 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional) 39 | 40 | for s in double: 41 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional) 42 | 43 | for s in tensor_dict: 44 | tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape) 45 | 46 | tensor_dict["4temp"] = np.matmul(tensor_dict["1"], tensor_dict["2"]) 47 | tensor_dict["4added"] = np.add(tensor_dict["4temp"], tensor_dict["3"]) 48 | tensor_dict["4"] = np.divide(tensor_dict["4added"],fractional*downscale).astype(int) 49 | 50 | tensor_dict["5"] = np.maximum(tensor_dict["4"],0) 51 | 52 | tensor_dict["8temp"] = np.matmul(tensor_dict["5"], tensor_dict["6"]) 53 | tensor_dict["8added"] = np.add(tensor_dict["8temp"], tensor_dict["7"]) 54 | tensor_dict["8"] = np.divide(np.maximum(tensor_dict["8added"],0),fractional*downscale).astype(int) 55 | 56 | """ 57 | print("W1") 58 | print(tensor_dict["2"]) 59 | 60 | print("b1") 61 | print(tensor_dict["3"]) 62 | 63 | print("Before Relu") 64 | print(tensor_dict["4"]) 65 | 66 | print("After Relu") 67 | print(tensor_dict["5"]) 68 | 69 | print("W2") 70 | print(tensor_dict["6"]) 71 | 72 | print("b2") 73 | print(tensor_dict["7"]) 74 | """ 75 | 76 | print("Expected result") 77 | print(tensor_dict["8"]) 78 | 79 | # now see if the result is close 80 | given = np.loadtxt("out.txt", delimiter=",") 81 | diff = np.subtract(given, tensor_dict["8"]) 82 | 83 | print("Given result") 84 | print(given) 85 | 86 | print("Diff") 87 | print(diff) 88 | 89 | 90 | np.testing.assert_array_equal(tensor_dict["8"], given, err_msg="Result is not the same as expected result!", verbose=True) 91 | 92 | print("All numbers equal. Test passed") 93 | 94 | -------------------------------------------------------------------------------- /tools/test_non_reversed.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | from onnx import numpy_helper 13 | import numpy as np 14 | 15 | print("These tests work with a fractional of 1 and a downscale of 10,000. Set this in config.py accordingly before running MiniONN.") 16 | 17 | model = onnx.load("manual_model_normal.onnx") 18 | 19 | tensor_dict = {} 20 | for t in model.graph.initializer: 21 | tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t) 22 | 23 | 24 | input_tensor = onnx.TensorProto() 25 | with open('manual_model_normal.onnx.tensor', 'rb') as fid: 26 | content = fid.read() 27 | input_tensor.ParseFromString(content) 28 | 29 | tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor) 30 | 31 | # do fractionals 32 | fractional = 1 33 | downscale = 10000 34 | 35 | single = ["1", "2", "6"] 36 | double = ["3", "7"] 37 | for s in single: 38 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional) 39 | 40 | for s in double: 41 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional) 42 | 43 | for s in tensor_dict: 44 | tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape) 45 | 46 | 47 | tensor_dict["4temp"] = np.matmul(tensor_dict["2"], tensor_dict["1"]) 48 | tensor_dict["4added"] = np.add(tensor_dict["4temp"], tensor_dict["3"]) 49 | tensor_dict["4"] = np.divide(tensor_dict["4added"],fractional*downscale).astype(int) 50 | 51 | tensor_dict["5"] = np.maximum(tensor_dict["4"],0) 52 | 53 | tensor_dict["8temp"] = np.matmul(tensor_dict["6"], tensor_dict["5"]) 54 | tensor_dict["8added"] = np.add(tensor_dict["8temp"], tensor_dict["7"]) 55 | tensor_dict["8"] = np.divide(np.maximum(tensor_dict["8added"],0),fractional*downscale).astype(int) 56 | 57 | """ 58 | print("Input") 59 | print(tensor_dict["1"]) 60 | 61 | print("W1") 62 | print(tensor_dict["2"]) 63 | 64 | print("b1") 65 | print(tensor_dict["3"]) 66 | 67 | print("Before Relu") 68 | print(tensor_dict["4"]) 69 | 70 | print("After Relu") 71 | print(tensor_dict["5"]) 72 | 73 | print("W2") 74 | print(tensor_dict["6"]) 75 | 76 | print("b2") 77 | print(tensor_dict["7"]) 78 | """ 79 | 80 | print("Expected result") 81 | print(tensor_dict["8"]) 82 | 83 | # now see if the result is close 84 | given = np.loadtxt("out.txt", delimiter=",") 85 | diff = np.subtract(given, tensor_dict["8"]) 86 | 87 | print("Given result") 88 | print(given) 89 | 90 | print("Diff") 91 | print(diff) 92 | 93 | 94 | np.testing.assert_array_equal(tensor_dict["8"], given, err_msg="Result is not the same as expected result!", verbose=True) 95 | 96 | print("All numbers equal. Test passed") -------------------------------------------------------------------------------- /tools/test_only_gemm.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Fritz Alder 3 | Copyright: 4 | Secure Systems Group, Aalto University 5 | https://ssg.aalto.fi/ 6 | 7 | This code is released under Apache 2.0 license 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | """ 10 | 11 | import onnx 12 | from onnx import numpy_helper 13 | import numpy as np 14 | 15 | print("These tests work with a fractional of 1 and a downscale of 10,000. Set this in config.py accordingly before running MiniONN.") 16 | 17 | model = onnx.load("manual_model_only_gemm.onnx") 18 | 19 | tensor_dict = {} 20 | for t in model.graph.initializer: 21 | tensor_dict[str(t.name)] = onnx.numpy_helper.to_array(t) 22 | 23 | 24 | input_tensor = onnx.TensorProto() 25 | with open('manual_model_only_gemm.onnx.tensor', 'rb') as fid: 26 | content = fid.read() 27 | input_tensor.ParseFromString(content) 28 | 29 | tensor_dict["1"] = onnx.numpy_helper.to_array(input_tensor) 30 | #tensor_dict["1"] = np.reshape(tensor_dict["1temp"], (1,3)) 31 | 32 | 33 | # do fractionals 34 | fractional = 1 35 | downscale = 10000 36 | 37 | single = ["1", "2"] 38 | double = ["3"] 39 | for s in single: 40 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional) 41 | 42 | for s in double: 43 | tensor_dict[s] = np.multiply(tensor_dict[s], fractional*fractional) 44 | 45 | for s in tensor_dict: 46 | tensor_dict[s] = np.array([int(d) for d in tensor_dict[s].flatten().tolist()]).reshape(tensor_dict[s].shape) 47 | 48 | tensor_dict["4temp"] = np.matmul(tensor_dict["2"], tensor_dict["1"]) 49 | tensor_dict["4added"] = np.add(tensor_dict["4temp"], tensor_dict["3"]) 50 | tensor_dict["4"] = np.divide(tensor_dict["4added"],fractional*downscale).astype(int) 51 | 52 | """ 53 | print("Input") 54 | print(tensor_dict["1"]) 55 | 56 | print("W1") 57 | print(tensor_dict["2"]) 58 | 59 | print("b1") 60 | print(tensor_dict["3"]) 61 | """ 62 | 63 | print("Expected result") 64 | print(tensor_dict["4"]) 65 | 66 | # now see if the result is close 67 | given = np.loadtxt("out.txt", delimiter=",").astype(int) 68 | diff = np.subtract(given, tensor_dict["4"]) 69 | 70 | print("Given result") 71 | print(given) 72 | 73 | print("Diff") 74 | print(diff) 75 | 76 | 77 | np.testing.assert_array_equal(tensor_dict["4"], given, err_msg="Result is not the same as expected result!", verbose=True) 78 | 79 | print("All numbers equal. Test passed") --------------------------------------------------------------------------------