├── .circleci └── config.yml ├── .gitignore ├── BUILD ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── WORKSPACE ├── artifacts.py ├── build.sh ├── bundle.sh ├── configure.sh ├── docker ├── Makefile ├── build │ └── Dockerfile ├── deploy │ └── Dockerfile └── whltest │ └── Dockerfile ├── examples ├── graph_mode.py └── intro.py ├── pytest.sh ├── requirements-dev.txt ├── requirements-test.txt ├── setup.cfg ├── setup.py ├── tf_big ├── BUILD ├── __init__.py ├── cc │ ├── big_tensor.cc │ ├── big_tensor.h │ ├── kernels │ │ └── big_kernels.cc │ └── ops │ │ └── big_ops.cc └── python │ ├── __init__.py │ ├── ops │ ├── __init__.py │ ├── big_ops.py │ └── big_ops_test.py │ ├── tensor.py │ ├── tensor_test.py │ └── test │ ├── BUILD │ ├── __init__.py │ ├── execution_context.py │ └── execution_context_test.py └── third_party ├── gmp └── libgmp.BUILD └── tf ├── BUILD ├── BUILD.tpl └── tf_configure.bzl /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | commands: 4 | 5 | bootstrap-macos: 6 | steps: 7 | - run: 8 | name: Bootstrap macOS 9 | command: | 10 | HOMEBREW_NO_AUTO_UPDATE=1 brew tap bazelbuild/tap >> build.log 11 | HOMEBREW_NO_AUTO_UPDATE=1 brew install \ 12 | bazelbuild/tap/bazel gmp mmv tree >> build.log 13 | 14 | curl -O https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh --silent 15 | bash Miniconda3-latest-MacOSX-x86_64.sh -b -f >> build.log 16 | ~/miniconda3/bin/conda create -n py3.5 python=3.5 -y 17 | ln -s ~/miniconda3/envs/py3.5/bin/python ~/python3.5 18 | ~/miniconda3/bin/conda create -n py3.6 python=3.6 -y 19 | ln -s ~/miniconda3/envs/py3.6/bin/python ~/python3.6 20 | 21 | create-pyenv: 22 | # Create new Python virtual environment 23 | parameters: 24 | python-version: 25 | type: string 26 | python-environment: 27 | type: string 28 | steps: 29 | - run: 30 | name: Create Python << parameters.python-version >> environment '<< parameters.python-environment >>' 31 | command: | 32 | ~/python<< parameters.python-version >> -m venv << parameters.python-environment >> 33 | 34 | build: 35 | parameters: 36 | python-version: 37 | type: string 38 | tensorflow-package: 39 | type: string 40 | tensorflow-identifier: 41 | type: string 42 | # next parameter should be derived 43 | python-environment: 44 | type: string 45 | steps: 46 | - create-pyenv: 47 | python-environment: << parameters.python-environment >> 48 | python-version: << parameters.python-version >> 49 | - run: 50 | name: Install requirements-dev.txt in '<< parameters.python-environment >>' 51 | command: | 52 | . << parameters.python-environment >>/bin/activate 53 | pip install -q -U -r requirements-dev.txt 54 | pip freeze 55 | - run: 56 | name: Install << parameters.tensorflow-package >> in '<< parameters.python-environment >>' 57 | command: | 58 | . << parameters.python-environment >>/bin/activate 59 | make clean 60 | pip install -q -U << parameters.tensorflow-package >> 61 | make .bazelrc 62 | # reduce Bazel output to logs 63 | echo 'test --noshow_progress --noshow_loading_progress' >> .bazelrc 64 | echo 'build --noshow_progress --noshow_loading_progress' >> .bazelrc 65 | - run: 66 | name: Test in '<< parameters.python-environment >>' 67 | command: | 68 | . << parameters.python-environment >>/bin/activate 69 | python --version 70 | pip freeze 71 | make test 72 | - run: 73 | name: Build in '<< parameters.python-environment >>' 74 | command: | 75 | . << parameters.python-environment >>/bin/activate 76 | python --version 77 | pip freeze 78 | DIR_TAGGED=./out/builds/py<< parameters.python-version >>-<< parameters.tensorflow-identifier >> make build 79 | - persist_to_workspace: 80 | root: ./out 81 | paths: 82 | - builds/py<< parameters.python-version >>-<< parameters.tensorflow-identifier >> 83 | 84 | bundle: 85 | parameters: 86 | python-version: 87 | type: string 88 | # next parameter should be derived 89 | python-environment: 90 | type: string 91 | steps: 92 | - create-pyenv: 93 | python-environment: << parameters.python-environment >> 94 | python-version: << parameters.python-version >> 95 | - run: 96 | name: Install requirements-dev.txt in '<< parameters.python-environment >>' 97 | command: | 98 | . << parameters.python-environment >>/bin/activate 99 | pip install -q -U -r requirements-dev.txt 100 | pip freeze 101 | - attach_workspace: 102 | at: ./out 103 | - run: 104 | name: Merge builds 105 | command: | 106 | tree ./out 107 | rsync -avm ./out/builds/*/ ./out/merged 108 | tree ./out 109 | - run: 110 | name: Bundle package in '<< parameters.python-environment >>' 111 | command: | 112 | . << parameters.python-environment >>/bin/activate 113 | python --version 114 | pip freeze 115 | DIR_TAGGED=./out/merged DIR_WHEEL=./out/wheelhouse make bundle 116 | tree ./out 117 | - persist_to_workspace: 118 | root: ./out 119 | paths: 120 | - 'wheelhouse' 121 | 122 | whltest: 123 | parameters: 124 | python-version: 125 | type: string 126 | tensorflow-package: 127 | type: string 128 | python-environment: 129 | type: string 130 | steps: 131 | - create-pyenv: 132 | python-version: << parameters.python-version >> 133 | python-environment: << parameters.python-environment >> 134 | - attach_workspace: 135 | at: ./out 136 | - run: 137 | name: Configure '<< parameters.python-environment >>' to use << parameters.tensorflow-package >> 138 | command: | 139 | set -e 140 | set -x 141 | tree ./out/wheelhouse 142 | . << parameters.python-environment >>/bin/activate 143 | # we want to make sure that tests are run against whatever is 144 | # in the wheelhouse; for this we'd like to use --no-index but 145 | # that will also block dependencies from being installed. 146 | # as a result we first install dependencies by installing the 147 | # package and then immediately remove it again 148 | pip install -r requirements-test.txt 149 | pip install -q -U tf-big --find-links ./out/wheelhouse 150 | pip uninstall tf-big -y 151 | # install the package, but forced to only use the wheelhouse 152 | pip install -U tf-big --no-deps --no-cache-dir --no-index --find-links ./out/wheelhouse 153 | # make sure we are testing against the right version of TensorFlow 154 | pip install -q -U << parameters.tensorflow-package >> 155 | - run: 156 | name: Test wheel in '<< parameters.python-environment >>' 157 | command: | 158 | . << parameters.python-environment >>/bin/activate 159 | python --version 160 | pip freeze 161 | make pytest 162 | 163 | jobs: 164 | 165 | build-linux: 166 | parameters: 167 | python-version: 168 | type: string 169 | tensorflow-package: 170 | type: string 171 | tensorflow-identifier: 172 | type: string 173 | docker: 174 | - image: tfencrypted/tf-big:build 175 | working_directory: ~/repo 176 | steps: 177 | - checkout 178 | - build: 179 | python-version: << parameters.python-version >> 180 | tensorflow-package: << parameters.tensorflow-package >> 181 | tensorflow-identifier: << parameters.tensorflow-identifier >> 182 | python-environment: build-linux-py<< parameters.python-version >>-<< parameters.tensorflow-identifier >> 183 | 184 | build-macos: 185 | parameters: 186 | python-version: 187 | type: string 188 | tensorflow-package: 189 | type: string 190 | tensorflow-identifier: 191 | type: string 192 | macos: 193 | xcode: "10.0.0" 194 | working_directory: ~/repo 195 | steps: 196 | - checkout 197 | - bootstrap-macos 198 | - build: 199 | python-version: << parameters.python-version >> 200 | tensorflow-package: << parameters.tensorflow-package >> 201 | tensorflow-identifier: << parameters.tensorflow-identifier >> 202 | python-environment: build-macos-py<< parameters.python-version >>-<< parameters.tensorflow-identifier >> 203 | 204 | bundle-linux: 205 | parameters: 206 | python-version: 207 | type: string 208 | docker: 209 | - image: tfencrypted/tf-big:build 210 | working_directory: ~/repo 211 | steps: 212 | - checkout 213 | - bundle: 214 | python-version: << parameters.python-version >> 215 | python-environment: bundle-linux-py<< parameters.python-version >> 216 | 217 | bundle-macos: 218 | parameters: 219 | python-version: 220 | type: string 221 | macos: 222 | xcode: "10.0.0" 223 | working_directory: ~/repo 224 | steps: 225 | - checkout 226 | - bootstrap-macos 227 | - bundle: 228 | python-version: << parameters.python-version >> 229 | python-environment: bundle-macos-py<< parameters.python-version >> 230 | 231 | whltest-linux: 232 | parameters: 233 | python-version: 234 | type: string 235 | tensorflow-package: 236 | type: string 237 | docker: 238 | - image: tfencrypted/tf-big:whltest 239 | working_directory: ~/repo 240 | steps: 241 | - checkout 242 | - whltest: 243 | python-version: << parameters.python-version >> 244 | python-environment: test-linux-py<< parameters.python-version >> 245 | tensorflow-package: << parameters.tensorflow-package >> 246 | 247 | whltest-macos: 248 | parameters: 249 | python-version: 250 | type: string 251 | tensorflow-package: 252 | type: string 253 | macos: 254 | xcode: "10.0.0" 255 | working_directory: ~/repo 256 | steps: 257 | - checkout 258 | - bootstrap-macos 259 | - whltest: 260 | python-version: << parameters.python-version >> 261 | python-environment: test-macos-py<< parameters.python-version >> 262 | tensorflow-package: << parameters.tensorflow-package >> 263 | 264 | store: 265 | docker: 266 | - image: tfencrypted/tf-big:deploy 267 | working_directory: ~/repo 268 | steps: 269 | - checkout 270 | - attach_workspace: 271 | at: ./out 272 | - run: 273 | name: List content to be stored 274 | command: | 275 | tree ./out/wheelhouse 276 | - store_artifacts: 277 | path: ./out/wheelhouse 278 | destination: wheelhouse 279 | 280 | deploy: 281 | docker: 282 | - image: tfencrypted/tf-big:deploy 283 | working_directory: ~/repo 284 | steps: 285 | - checkout 286 | - create-pyenv: 287 | python-version: "3.6" 288 | python-environment: "deploy-py3.6" 289 | - attach_workspace: 290 | at: ./out 291 | - run: 292 | name: Configure 'deploy-3.6' 293 | command: | 294 | . deploy-py3.6/bin/activate 295 | pip install -q -U -r requirements-dev.txt 296 | - run: 297 | name: Upload to PyPI 298 | command: | 299 | tree ./out/wheelhouse 300 | . deploy-py3.6/bin/activate 301 | DIR_WHEEL=./out/wheelhouse make push-wheels 302 | 303 | workflows: 304 | version: 2 305 | 306 | # these workflows implement the following logic: 307 | # - non-master branch: run quick tests 308 | # - master branch: build, test, and store wheels 309 | # - non-semver tag: build, test, and store wheels 310 | # - semver tag: build, test, store, and deploy wheels 311 | 312 | quicktest: 313 | jobs: 314 | - build-linux: 315 | name: build-linux-py3.6-tfnightly 316 | python-version: "3.6" 317 | tensorflow-package: "tf-nightly" 318 | tensorflow-identifier: "tfnightly" 319 | filters: 320 | branches: 321 | ignore: master 322 | tags: 323 | ignore: /.*/ 324 | 325 | - bundle-linux: 326 | name: bundle-linux-py3.6 327 | python-version: "3.6" 328 | requires: 329 | - build-linux-py3.6-tfnightly 330 | filters: 331 | branches: 332 | ignore: master 333 | tags: 334 | ignore: /.*/ 335 | 336 | - whltest-linux: 337 | name: whltest-linux-py3.6-tfnightly 338 | python-version: "3.6" 339 | tensorflow-package: "tf-nightly" 340 | requires: 341 | - bundle-linux-py3.6 342 | filters: 343 | branches: 344 | ignore: master 345 | tags: 346 | ignore: /.*/ 347 | 348 | linux-py3.5: 349 | jobs: 350 | - build-linux: 351 | name: build-linux-py3.5-tfnightly 352 | python-version: "3.5" 353 | tensorflow-package: "tf-nightly" 354 | tensorflow-identifier: "tfnightly" 355 | filters: 356 | branches: 357 | only: master 358 | tags: 359 | only: /.*/ 360 | 361 | - bundle-linux: 362 | name: bundle-linux-py3.5 363 | python-version: "3.5" 364 | requires: 365 | - build-linux-py3.5-tfnightly 366 | filters: 367 | branches: 368 | only: master 369 | tags: 370 | only: /.*/ 371 | 372 | - whltest-linux: 373 | name: whltest-linux-py3.5-tfnightly 374 | python-version: "3.5" 375 | tensorflow-package: "tf-nightly" 376 | requires: 377 | - bundle-linux-py3.5 378 | filters: 379 | branches: 380 | only: master 381 | tags: 382 | only: /.*/ 383 | 384 | - store: 385 | name: store-linux-py3.5 386 | requires: 387 | - whltest-linux-py3.5-tfnightly 388 | filters: 389 | branches: 390 | only: master 391 | tags: 392 | only: /.*/ 393 | 394 | - hold: 395 | type: approval 396 | name: hold-linux-py3.5 397 | requires: 398 | - store-linux-py3.5 399 | filters: 400 | branches: 401 | ignore: /.*/ 402 | tags: 403 | only: /^(?:[0-9]+)\.(?:[0-9]+)\.(?:[0-9]+)(?:(\-rc[0-9]+)?)$/ 404 | 405 | - deploy: 406 | name: deploy-linux-py3.5 407 | requires: 408 | - hold-linux-py3.5 409 | filters: 410 | branches: 411 | ignore: /.*/ 412 | tags: 413 | only: /^(?:[0-9]+)\.(?:[0-9]+)\.(?:[0-9]+)(?:(\-rc[0-9]+)?)$/ 414 | 415 | linux-py3.6: 416 | jobs: 417 | - build-linux: 418 | name: build-linux-py3.6-tfnightly 419 | python-version: "3.6" 420 | tensorflow-package: "tf-nightly" 421 | tensorflow-identifier: "tfnightly" 422 | filters: 423 | branches: 424 | only: master 425 | tags: 426 | only: /.*/ 427 | 428 | - bundle-linux: 429 | name: bundle-linux-py3.6 430 | python-version: "3.6" 431 | requires: 432 | - build-linux-py3.6-tfnightly 433 | filters: 434 | branches: 435 | only: master 436 | tags: 437 | only: /.*/ 438 | 439 | - whltest-linux: 440 | name: whltest-linux-py3.6-tfnightly 441 | python-version: "3.6" 442 | tensorflow-package: "tf-nightly" 443 | requires: 444 | - bundle-linux-py3.6 445 | filters: 446 | branches: 447 | only: master 448 | tags: 449 | only: /.*/ 450 | 451 | - store: 452 | name: store-linux-py3.6 453 | requires: 454 | - whltest-linux-py3.6-tfnightly 455 | filters: 456 | branches: 457 | only: master 458 | tags: 459 | only: /.*/ 460 | 461 | - hold: 462 | type: approval 463 | name: hold-linux-py3.6 464 | requires: 465 | - store-linux-py3.6 466 | filters: 467 | branches: 468 | ignore: /.*/ 469 | tags: 470 | only: /^(?:[0-9]+)\.(?:[0-9]+)\.(?:[0-9]+)(?:(\-rc[0-9]+)?)$/ 471 | 472 | - deploy: 473 | name: deploy-linux-py3.6 474 | requires: 475 | - hold-linux-py3.6 476 | filters: 477 | branches: 478 | ignore: /.*/ 479 | tags: 480 | only: /^(?:[0-9]+)\.(?:[0-9]+)\.(?:[0-9]+)(?:(\-rc[0-9]+)?)$/ 481 | 482 | macos-py3.6: 483 | jobs: 484 | - build-macos: 485 | name: build-macos-py3.6-tfnightly 486 | python-version: "3.6" 487 | tensorflow-package: "tf-nightly" 488 | tensorflow-identifier: "tfnightly" 489 | filters: 490 | branches: 491 | only: master 492 | tags: 493 | only: /.*/ 494 | 495 | - bundle-macos: 496 | name: bundle-macos-py3.6 497 | python-version: "3.6" 498 | requires: 499 | - build-macos-py3.6-tfnightly 500 | filters: 501 | branches: 502 | only: master 503 | tags: 504 | only: /.*/ 505 | 506 | - whltest-macos: 507 | name: whltest-macos-py3.6-tfnightly 508 | python-version: "3.6" 509 | tensorflow-package: "tf-nightly" 510 | requires: 511 | - bundle-macos-py3.6 512 | filters: 513 | branches: 514 | only: master 515 | tags: 516 | only: /.*/ 517 | 518 | - store: 519 | name: store-macos-py3.6 520 | requires: 521 | - whltest-macos-py3.6-tfnightly 522 | filters: 523 | branches: 524 | only: master 525 | tags: 526 | only: /.*/ 527 | 528 | - hold: 529 | type: approval 530 | name: hold-macos-py3.6 531 | requires: 532 | - store-macos-py3.6 533 | filters: 534 | branches: 535 | ignore: /.*/ 536 | tags: 537 | only: /^(?:[0-9]+)\.(?:[0-9]+)\.(?:[0-9]+)(?:(\-rc[0-9]+)?)$/ 538 | 539 | - deploy: 540 | name: deploy-macos-py3.6 541 | requires: 542 | - hold-macos-py3.6 543 | filters: 544 | branches: 545 | ignore: /.*/ 546 | tags: 547 | only: /^(?:[0-9]+)\.(?:[0-9]+)\.(?:[0-9]+)(?:(\-rc[0-9]+)?)$/ 548 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bazel* 2 | .bazelrc 3 | .vscode 4 | 5 | **.pyc 6 | .mypy* 7 | __pycache__ 8 | **/*egg-info 9 | 10 | artifacts/* 11 | wheelhouse/* 12 | tagged/* 13 | 14 | **.DS_Store 15 | -------------------------------------------------------------------------------- /BUILD: -------------------------------------------------------------------------------- 1 | 2 | sh_binary( 3 | name = "build_sh", 4 | srcs = ["build.sh"], 5 | data = [ 6 | "README.md", 7 | "MANIFEST.in", 8 | "setup.py", 9 | "//tf_big:tf_big_py", 10 | ], 11 | ) 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include tf_big/ *.so -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DIR_TAGGED ?= ./tagged 2 | DIR_WHEEL ?= ./wheelhouse 3 | 4 | .bazelrc: 5 | TF_NEED_CUDA=0 ./configure.sh 6 | 7 | clean: 8 | bazel clean 9 | rm -f .bazelrc 10 | 11 | test: .bazelrc 12 | bazel test ... --test_output=all 13 | 14 | build: .bazelrc 15 | mkdir -p $(DIR_TAGGED) 16 | ./build.sh $(DIR_TAGGED) 17 | 18 | bundle: 19 | mkdir -p $(DIR_WHEEL) 20 | ./bundle.sh $(DIR_TAGGED) $(DIR_WHEEL) 21 | 22 | pytest: 23 | ./pytest.sh 24 | 25 | fmt: 26 | cd tf_big && find . -iname *.h -o -iname *.cc | xargs clang-format -i -style=google 27 | isort --atomic --recursive tf_big examples 28 | black tf_big examples 29 | 30 | lint: 31 | cd tf_big && find . -iname *.h -o -iname *.cc | xargs cpplint --filter=-legal/copyright 32 | flake8 tf_big examples 33 | 34 | download-wheels: 35 | rm -rf $(DIR_WHEEL) 36 | mkdir -p $(DIR_WHEEL) 37 | DESTDIR=$(DIR_WHEEL) python ./artifacts.py 38 | 39 | push-wheels: 40 | python -m twine upload $(DIR_WHEEL)/*.whl 41 | 42 | .PHONY: clean test build bundle pytest fmt lint download-wheels push-wheels 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TF Big 2 | 3 | TF Big adds big number support to TensorFlow, allowing computations to be performed on arbitrary precision integers. Internally these are represented as variant tensors of [GMP](https://gmplib.org/) values, and exposed in Python through the `tf_big.Tensor` wrapper for convenience. For importing and exporting, numbers are typically expressed as strings. 4 | 5 | [![PyPI](https://img.shields.io/pypi/v/tf-big.svg)](https://pypi.org/project/tf-big/) [![CircleCI Badge](https://circleci.com/gh/tf-encrypted/tf-big/tree/master.svg?style=svg)](https://circleci.com/gh/tf-encrypted/tf-big/tree/master) 6 | 7 | ## Usage 8 | 9 | ```python 10 | import tensorflow as tf 11 | import tf_big 12 | 13 | # load large values as strings 14 | x = tf_big.constant([["100000000000000000000", "200000000000000000000"]]) 15 | 16 | # load ordinary TensorFlow tensors 17 | y = tf_big.import_tensor(tf.constant([[3, 4]])) 18 | 19 | # perform computation as usual 20 | z = x * y 21 | 22 | # export result back into a TensorFlow tensor 23 | tf_res = tf_big.export_tensor(z) 24 | print(tf_res) 25 | ``` 26 | 27 | ## Installation 28 | 29 | Python 3 packages are available from [PyPI](https://pypi.org/project/tf-big/): 30 | 31 | ``` 32 | pip install tf-big 33 | ``` 34 | 35 | See below for further instructions for setting up a development environment. 36 | 37 | ## Development 38 | 39 | ### Requirements 40 | 41 | We recommend using [Miniconda](https://docs.conda.io/en/latest/miniconda.html) or [Anaconda](https://www.anaconda.com/distribution/) to set up and use a Python 3.5 or 3.6 environment for all instructions below: 42 | 43 | ``` 44 | conda create -n tfbig-dev python=3.6 45 | source activate tfbig-dev 46 | ``` 47 | 48 | #### Ubuntu 49 | 50 | The only requirement for Ubuntu is to have [docker installed](https://docs.docker.com/install/linux/docker-ce/ubuntu/). This is the recommended way to [build custom operations for TensorFlow](https://github.com/tensorflow/custom-op). We provide a custom development container for TF Big with all dependencies already installed. 51 | 52 | #### macOS 53 | 54 | Setting up a development environment on macOS is a little more involved since we cannot use a docker container. We need four things: 55 | 56 | - Python (>= 3.5) 57 | - [Bazel](https://www.bazel.build/) (>= 0.15.0) 58 | - [GMP](https://gmplib.org/) (>= 6.1.2) 59 | - [TensorFlow](https://www.tensorflow.org/) (see setup.py for version requirements for your TF Big version) 60 | 61 | Using [Homebrew](https://brew.sh/) we first make sure that both [Bazel](https://docs.bazel.build/versions/master/install-os-x.html#install-with-installer-mac-os-x) and GMP are installed. We recommend using a Bazel version earlier than 1.0.0, e.g.: 62 | 63 | ``` 64 | brew tap bazelbuild/tap 65 | brew extract bazel bazelbuild/tap --version 0.26.1 66 | brew install gmp 67 | brew install mmv 68 | ``` 69 | 70 | The remaining PyPI packages can then be installed using: 71 | 72 | ``` 73 | pip install -r requirements-dev.txt 74 | ``` 75 | 76 | ### Testing 77 | 78 | #### Ubuntu 79 | 80 | Run the tests on Ubuntu by running the `make test` command inside of a docker container. Right now, the docker container doesn't exist on docker hub yet so we must first build it: 81 | 82 | ``` 83 | docker build -t tf-encrypted/tf-big:build . 84 | ``` 85 | 86 | Then we can run `make test`: 87 | 88 | ``` 89 | sudo docker run -it \ 90 | -v `pwd`:/opt/my-project -w /opt/my-project \ 91 | tf-encrypted/tf-big:0.1.0 /bin/bash -c "make test" 92 | ``` 93 | 94 | #### macOS 95 | 96 | Once the development environment is set up we can simply run: 97 | 98 | ``` 99 | make test 100 | ``` 101 | 102 | This will install TensorFlow if not previously installed and build and run the tests. 103 | 104 | ### Building pip package 105 | 106 | Just run: 107 | 108 | ``` 109 | make build && make bundle 110 | ``` 111 | 112 | For linux, doing it inside the tensorflow/tensorflow:custom-op container is recommended. Note that [CircleCI](#circle-ci) is currently used to build the official pip packages. 113 | 114 | ## Circle CI 115 | 116 | We use [Circle CI](https://circleci.com/gh/tf-encrypted/workflows/tf-big) for integration testing and deployment of TF Big. 117 | 118 | ### Releasing 119 | 120 | 1. update version number in setup.py and push to master; this will build and tests wheels 121 | 2. iterate 1. until happy with the release, having potentially tested the wheel manually 122 | 3. when happy, tag a commit with semver label and push; this will build, test, and deploy wheels 123 | -------------------------------------------------------------------------------- /WORKSPACE: -------------------------------------------------------------------------------- 1 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") 2 | 3 | load("//third_party/tf:tf_configure.bzl", "tf_configure") 4 | 5 | http_archive( 6 | name = "com_google_googletest", 7 | url = "https://github.com/google/googletest/archive/release-1.8.1.zip", 8 | strip_prefix = "googletest-release-1.8.1", 9 | sha256 = "927827c183d01734cc5cfef85e0ff3f5a92ffe6188e0d18e909c5efebf28a0c7", 10 | ) 11 | 12 | tf_configure(name = "local_config_tf") 13 | 14 | new_local_repository( 15 | name = "libgmp", 16 | path = "/usr/local/", 17 | build_file = "third_party/gmp/libgmp.BUILD" 18 | ) 19 | -------------------------------------------------------------------------------- /artifacts.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import json 3 | import os 4 | 5 | from circleci.api import Api 6 | 7 | 8 | DESTDIR = os.environ.get('DESTDIR', None) 9 | CIRCLE_TOKEN = os.environ.get('CIRCLE_TOKEN', None) 10 | assert len(CIRCLE_TOKEN) >= 1, "Missing CIRCLE_TOKEN environment variable." 11 | 12 | 13 | circleci = Api(CIRCLE_TOKEN) 14 | 15 | def find_most_recent_store_builds(store_job_name_prefix='store-'): 16 | 17 | all_recent_builds = circleci.get_project_build_summary( 18 | username='tf-encrypted', 19 | project='tf-big', 20 | limit=50, 21 | status_filter=None, 22 | branch='master', 23 | vcs_type='github', 24 | ) 25 | 26 | commit_filter = None 27 | most_recent_store_builds = list() 28 | 29 | for build in all_recent_builds: 30 | job_name = build['workflows']['job_name'] 31 | commit = build['all_commit_details'][0]['commit'] 32 | 33 | # skip all jobs we're not interested in 34 | if not job_name.startswith(store_job_name_prefix): 35 | continue 36 | 37 | # we only want the most recent builds 38 | if commit_filter is None: 39 | # we have found our commit filter (first commit encountered) 40 | commit_filter = commit 41 | else: 42 | # we have a commit filter, now apply it 43 | if commit != commit_filter: 44 | continue 45 | 46 | most_recent_store_builds.append(build) 47 | 48 | return most_recent_store_builds 49 | 50 | def download_artifacts_from_builds(builds, destdir=None): 51 | for build in builds: 52 | artifacts = circleci.get_artifacts( 53 | username='tf-encrypted', 54 | project='tf-big', 55 | build_num=build['build_num'], 56 | vcs_type='github', 57 | ) 58 | print("Processing build {build_num} ({build_url}) committed at {committer_date} and stopped at {stop_time}".format( 59 | build_num=build['build_num'], 60 | committer_date=build['committer_date'], 61 | stop_time=build['stop_time'], 62 | build_url=build['build_url'], 63 | )) 64 | for artifact in artifacts: 65 | print(" - Downloading artifact '{pretty_part}'".format( 66 | pretty_part=artifact['pretty_path'], 67 | )) 68 | circleci.download_artifact( 69 | url=artifact['url'], 70 | destdir=destdir, 71 | ) 72 | 73 | store_builds = find_most_recent_store_builds() 74 | download_artifacts_from_builds(store_builds, destdir=DESTDIR) 75 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script builds all .so files using the currently installed version of 4 | # TensorFlow and tags these accordingly using pattern '_.so'. 5 | # The resulting package is copied to '${1}', ie including Python files. 6 | 7 | set -e 8 | set -x 9 | 10 | if [[ -z ${1} ]]; then 11 | echo "No output directory provided" 12 | exit 1 13 | fi 14 | OUT=${1} 15 | 16 | # build all files via `build_sh` Bazel target 17 | bazel clean 18 | bazel build :build_sh 19 | 20 | # copy out files to destination 21 | rsync -avm \ 22 | --exclude "_solib*" \ 23 | --exclude "build_sh*" \ 24 | -L ./bazel-bin/build_sh.runfiles/__main__/ ${OUT} 25 | -------------------------------------------------------------------------------- /bundle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script bundles up all files into a pip package, including the 4 | # various versions of the .so files. 5 | 6 | set -e 7 | set -x 8 | 9 | if [[ -z ${1} ]]; then 10 | echo "No input directory provided" 11 | exit 1 12 | fi 13 | TMP=${1} 14 | 15 | if [[ -z ${2} ]]; then 16 | echo "No output directory provided" 17 | exit 1 18 | fi 19 | OUT=${2} 20 | 21 | OS_NAME="$(uname -s | tr A-Z a-z)" 22 | 23 | if [[ $OS_NAME == "darwin" || $OS_NAME == "linux" ]]; then 24 | pushd ${TMP} 25 | python setup.py bdist_wheel > /dev/null 26 | popd 27 | python -m twine check ${TMP}/dist/*.whl 28 | cp ${TMP}/dist/*.whl ${OUT} 29 | 30 | else 31 | echo "Don't know how to bundle package for '$OS_NAME'" 32 | exit 1 33 | fi 34 | -------------------------------------------------------------------------------- /configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | PLATFORM="$(uname -s | tr 'A-Z' 'a-z')" 17 | 18 | function write_to_bazelrc() { 19 | echo "$1" >> .bazelrc 20 | } 21 | 22 | function write_action_env_to_bazelrc() { 23 | write_to_bazelrc "build --action_env $1=\"$2\"" 24 | } 25 | 26 | function is_linux() { 27 | [[ "${PLATFORM}" == "linux" ]] 28 | } 29 | 30 | function is_macos() { 31 | [[ "${PLATFORM}" == "darwin" ]] 32 | } 33 | 34 | function is_windows() { 35 | # On windows, the shell script is actually running in msys 36 | [[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]] 37 | } 38 | 39 | function is_ppc64le() { 40 | [[ "$(uname -m)" == "ppc64le" ]] 41 | } 42 | 43 | 44 | # Remove .bazelrc if it already exist 45 | [ -e .bazelrc ] && rm .bazelrc 46 | 47 | TF_NEED_CUDA=0 48 | 49 | if is_windows; then 50 | PIP_MANYLINUX2010=0 51 | else 52 | PIP_MANYLINUX2010=1 53 | fi 54 | 55 | TF_CUDA_VERSION=10.1 56 | 57 | 58 | # CPU 59 | if [[ "$TF_NEED_CUDA" == "0" ]]; then 60 | # Check if it's installed 61 | if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]] ; then 62 | echo 'Using installed tensorflow' 63 | else 64 | echo "Error: expected tensorflow to be installed before running configure.sh." 65 | exit 1 66 | fi 67 | fi 68 | 69 | 70 | TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) 71 | TF_LFLAGS="$(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))')" 72 | 73 | # write_to_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" 74 | # if [[ "$PIP_MANYLINUX2010" == "0" ]]; then 75 | # write_to_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" 76 | # fi 77 | # # Add Ubuntu toolchain flags 78 | # if is_linux; then 79 | # write_to_bazelrc "build:manylinux2010cuda100 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain" 80 | # write_to_bazelrc "build:manylinux2010cuda101 --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain" 81 | # fi 82 | write_to_bazelrc "build --spawn_strategy=standalone" 83 | write_to_bazelrc "build --strategy=Genrule=standalone" 84 | write_to_bazelrc "build -c opt" 85 | 86 | 87 | if is_windows; then 88 | # Use pywrap_tensorflow instead of tensorflow_framework on Windows 89 | SHARED_LIBRARY_DIR=${TF_CFLAGS:2:-7}"python" 90 | else 91 | SHARED_LIBRARY_DIR=${TF_LFLAGS:2} 92 | fi 93 | SHARED_LIBRARY_NAME=$(echo $TF_LFLAGS | rev | cut -d":" -f1 | rev) 94 | if ! [[ $TF_LFLAGS =~ .*:.* ]]; then 95 | if is_macos; then 96 | SHARED_LIBRARY_NAME="libtensorflow_framework.dylib" 97 | elif is_windows; then 98 | # Use pywrap_tensorflow's import library on Windows. It is in the same dir as the dll/pyd. 99 | SHARED_LIBRARY_NAME="_pywrap_tensorflow_internal.lib" 100 | else 101 | SHARED_LIBRARY_NAME="libtensorflow_framework.so" 102 | fi 103 | fi 104 | 105 | HEADER_DIR=${TF_CFLAGS:2} 106 | if is_windows; then 107 | SHARED_LIBRARY_DIR=${SHARED_LIBRARY_DIR//\\//} 108 | SHARED_LIBRARY_NAME=${SHARED_LIBRARY_NAME//\\//} 109 | HEADER_DIR=${HEADER_DIR//\\//} 110 | fi 111 | write_action_env_to_bazelrc "TF_HEADER_DIR" ${HEADER_DIR} 112 | write_action_env_to_bazelrc "TF_SHARED_LIBRARY_DIR" ${SHARED_LIBRARY_DIR} 113 | write_action_env_to_bazelrc "TF_SHARED_LIBRARY_NAME" ${SHARED_LIBRARY_NAME} 114 | write_action_env_to_bazelrc "TF_NEED_CUDA" ${TF_NEED_CUDA} 115 | 116 | -------------------------------------------------------------------------------- /docker/Makefile: -------------------------------------------------------------------------------- 1 | build-image: 2 | docker build ./build -t tfencrypted/tf-big:build 3 | 4 | whltest-image: 5 | docker build ./whltest -t tfencrypted/tf-big:whltest 6 | 7 | deploy-image: 8 | docker build ./deploy -t tfencrypted/tf-big:deploy 9 | 10 | push-build-image: 11 | docker push tfencrypted/tf-big:build 12 | 13 | push-whltest-image: 14 | docker push tfencrypted/tf-big:whltest 15 | 16 | push-deploy-image: 17 | docker push tfencrypted/tf-big:deploy 18 | 19 | .PHONY: build-image whltest-image deploy-image push-build-image push-whltest-image push-deploy-image 20 | -------------------------------------------------------------------------------- /docker/build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:custom-op-ubuntu16 2 | 3 | # Install tools needed for building 4 | RUN apt update && \ 5 | apt install -y \ 6 | curl git python3 tree rsync mmv \ 7 | pkg-config g++ cmake \ 8 | zip unzip zlib1g-dev 9 | 10 | # Install Python versions needed 11 | RUN curl -OL https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 12 | bash Miniconda3-latest-Linux-x86_64.sh -b -f && \ 13 | ~/miniconda3/bin/conda create -n py3.5 python=3.5 -y && \ 14 | ln -s ~/miniconda3/envs/py3.5/bin/python ~/python3.5 && \ 15 | ~/miniconda3/bin/conda create -n py3.6 python=3.6 -y && \ 16 | ln -s ~/miniconda3/envs/py3.6/bin/python ~/python3.6 17 | 18 | # Install extra dependencies; in case of TF Big this is just GMP 19 | RUN wget https://gmplib.org/download/gmp/gmp-6.1.2.tar.xz && \ 20 | tar -xf gmp-6.1.2.tar.xz && \ 21 | cd gmp-6.1.2 && \ 22 | ./configure --with-pic --enable-cxx --enable-static --disable-shared && \ 23 | make && \ 24 | make check && \ 25 | make install && \ 26 | make clean 27 | -------------------------------------------------------------------------------- /docker/deploy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:19.04 2 | 3 | # Install tools needed for deployment 4 | RUN apt update && \ 5 | apt install -y \ 6 | ca-certificates tree make git python3 curl 7 | 8 | # Install Python versions needed 9 | RUN curl -OL https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 10 | bash Miniconda3-latest-Linux-x86_64.sh -b -f && \ 11 | ~/miniconda3/bin/conda create -n py3.6 python=3.6 -y && \ 12 | ln -s ~/miniconda3/envs/py3.6/bin/python ~/python3.6 13 | -------------------------------------------------------------------------------- /docker/whltest/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | # Install tools needed for wheel testing 4 | RUN apt update && \ 5 | apt install -y \ 6 | curl git make python3 tree 7 | 8 | # Install Python versions needed 9 | RUN curl -OL https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 10 | bash Miniconda3-latest-Linux-x86_64.sh -b -f && \ 11 | ~/miniconda3/bin/conda create -n py3.5 python=3.5 -y && \ 12 | ln -s ~/miniconda3/envs/py3.5/bin/python ~/python3.5 && \ 13 | ~/miniconda3/bin/conda create -n py3.6 python=3.6 -y && \ 14 | ln -s ~/miniconda3/envs/py3.6/bin/python ~/python3.6 15 | -------------------------------------------------------------------------------- /examples/graph_mode.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | import tf_big 4 | 5 | with tf.compat.v1.Session() as sess: 6 | x = tf_big.constant([[1, 2, 3, 4]]) 7 | y = tf_big.constant([[1, 2, 3, 4]]) 8 | z = x + y 9 | 10 | res = sess.run(z) 11 | print(res) 12 | -------------------------------------------------------------------------------- /examples/intro.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import tf_big 3 | 4 | # load large values as strings 5 | x = tf_big.constant([["100000000000000000000", "200000000000000000000"]]) 6 | 7 | # load ordinary TensorFlow tensors 8 | y = tf_big.import_tensor(tf.constant([[3, 4]])) 9 | 10 | # perform computation as usual 11 | z = x * y 12 | 13 | # export result back into a TensorFlow tensor 14 | tf_res = tf_big.export_tensor(z) 15 | print(tf_res) 16 | -------------------------------------------------------------------------------- /pytest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script runs alls tests and examples against the currently 4 | # installed version of TensorFlow. 5 | 6 | set -e 7 | set -x 8 | 9 | # run all test files 10 | find ./tf_big -name '*_test.py' | xargs -I {} python {} 11 | 12 | # run all examples 13 | find ./examples -name '*.py' | xargs -I {} python {} 14 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.9.0 2 | black==19.10b 3 | cpplint==1.4.4 4 | flake8-black==0.1.1 5 | flake8-isort==3.0.0 6 | flake8==3.7.9 7 | isort==4.3.21 8 | numpy==1.16.4 9 | pip==20.1.1 10 | setuptools==41.2.0 11 | twine==1.13.0 12 | wheel==0.33.6 13 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | pip==20.1.1 2 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:isort] 2 | line_length=88 3 | force_single_line=True 4 | 5 | [flake8] 6 | max-line-length=88 7 | extend-ignore= 8 | E203 # okay: black 9 | T484 # TODO: should not be ignored 10 | T499 # TODO: status only? 11 | D10,D20,D40 # TODO 12 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Installing with setuptools.""" 2 | import setuptools 3 | 4 | from setuptools.dist import Distribution 5 | 6 | class BinaryDistribution(Distribution): 7 | """This class is needed in order to create OS specific wheels.""" 8 | 9 | def is_pure(self): 10 | return False 11 | 12 | def has_ext_modules(self): 13 | return True 14 | 15 | with open("README.md", "r") as fh: 16 | long_description = fh.read() 17 | 18 | setuptools.setup( 19 | name="tf-big", 20 | version="0.2.1", 21 | packages=setuptools.find_packages(), 22 | package_data={ 23 | '': ['*.so'], 24 | }, 25 | python_requires=">=3.5", 26 | install_requires=[ 27 | "pip>=20.1.1", 28 | "numpy>=1.14", 29 | ], 30 | extras_require={ 31 | "tf": ["tensorflow~=2.2.0"], 32 | "nightly": ["tf-nightly"], 33 | }, 34 | license="Apache License 2.0", 35 | url="https://github.com/tf-encrypted/tf-big", 36 | description="Arbitrary precision integers in TensorFlow.", 37 | long_description=long_description, 38 | long_description_content_type="text/markdown", 39 | author="The TF Encrypted Authors", 40 | author_email="contact@tf-encrypted.io", 41 | include_package_data=True, 42 | zip_safe=False, 43 | distclass=BinaryDistribution, 44 | classifiers=[ 45 | "Programming Language :: Python :: 3", 46 | "License :: OSI Approved :: Apache Software License", 47 | "Development Status :: 2 - Pre-Alpha", 48 | "Operating System :: OS Independent", 49 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 50 | "Topic :: Security :: Cryptography", 51 | ] 52 | ) 53 | -------------------------------------------------------------------------------- /tf_big/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # Apache 2.0 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | cc_binary( 6 | name = 'python/ops/_big_ops.so', 7 | srcs = [ 8 | "cc/big_tensor.h", 9 | "cc/big_tensor.cc", 10 | "cc/ops/big_ops.cc", 11 | "cc/kernels/big_kernels.cc", 12 | ], 13 | linkshared = 1, 14 | deps = [ 15 | "@local_config_tf//:libtensorflow_framework", 16 | "@local_config_tf//:tf_header_lib", 17 | "@libgmp//:lib" 18 | ], 19 | copts = ["-pthread", "-std=c++11", "-D_GLIBCXX_USE_CXX11_ABI=0", "-fPIC"], 20 | ) 21 | 22 | py_library( 23 | name = "big_ops_py", 24 | srcs = ([ 25 | "python/tensor.py", 26 | "python/ops/big_ops.py", 27 | ]), 28 | data = [ 29 | ":python/ops/_big_ops.so" 30 | ], 31 | srcs_version = "PY2AND3", 32 | ) 33 | 34 | py_test( 35 | name = "big_ops_py_test", 36 | srcs = [ 37 | "python/ops/big_ops_test.py", 38 | ], 39 | main = "python/ops/big_ops_test.py", 40 | deps = [ 41 | ":big_ops_py", 42 | "//tf_big/python/test:test_py", 43 | ], 44 | srcs_version = "PY2AND3", 45 | ) 46 | 47 | py_test( 48 | name = "tensor_test", 49 | srcs = [ 50 | "python/tensor_test.py", 51 | ], 52 | main = "python/tensor_test.py", 53 | deps = [ 54 | ":big_ops_py", 55 | "//tf_big/python/test:test_py", 56 | ], 57 | srcs_version = "PY2AND3", 58 | ) 59 | 60 | py_library( 61 | name = "tf_big_py", 62 | srcs = ([ 63 | "__init__.py", 64 | "python/__init__.py", 65 | "python/ops/__init__.py", 66 | ]), 67 | deps = [ 68 | ":big_ops_py", 69 | "//tf_big/python/test:test_py", 70 | ], 71 | srcs_version = "PY2AND3", 72 | ) 73 | -------------------------------------------------------------------------------- /tf_big/__init__.py: -------------------------------------------------------------------------------- 1 | from tf_big.python.tensor import Tensor 2 | from tf_big.python.tensor import add 3 | from tf_big.python.tensor import constant 4 | from tf_big.python.tensor import export_limbs_tensor 5 | from tf_big.python.tensor import export_tensor 6 | from tf_big.python.tensor import get_secure_default 7 | from tf_big.python.tensor import import_limbs_tensor 8 | from tf_big.python.tensor import import_tensor 9 | from tf_big.python.tensor import inv 10 | from tf_big.python.tensor import matmul 11 | from tf_big.python.tensor import mod 12 | from tf_big.python.tensor import mul 13 | from tf_big.python.tensor import pow 14 | from tf_big.python.tensor import random_rsa_modulus 15 | from tf_big.python.tensor import random_uniform 16 | from tf_big.python.tensor import set_secure_default 17 | from tf_big.python.tensor import sub 18 | 19 | __all__ = [ 20 | "set_secure_default", 21 | "get_secure_default", 22 | "Tensor", 23 | "constant", 24 | "export_limbs_tensor", 25 | "export_tensor", 26 | "import_limbs_tensor", 27 | "import_tensor", 28 | "random_uniform", 29 | "randon_rsa_modulus", 30 | "add", 31 | "sub", 32 | "mul", 33 | "pow", 34 | "matmul", 35 | "mod", 36 | "inv", 37 | ] 38 | -------------------------------------------------------------------------------- /tf_big/cc/big_tensor.cc: -------------------------------------------------------------------------------- 1 | #include "tf_big/cc/big_tensor.h" 2 | 3 | #include 4 | 5 | #include 6 | 7 | namespace tf_big { 8 | BigTensor::BigTensor(const MatrixXm& mat) { value = mat; } 9 | 10 | BigTensor::BigTensor(const BigTensor& other) { value = other.value; } 11 | 12 | BigTensor::BigTensor(mpz_class m) { 13 | value = MatrixXm(1, 1); 14 | value(0, 0) = m; 15 | } 16 | 17 | void BigTensor::Encode(VariantTensorData* data) const { 18 | auto rows = value.rows(); 19 | auto cols = value.cols(); 20 | 21 | auto shape = TensorShape{rows, cols}; 22 | Tensor t(DT_STRING, shape); 23 | 24 | auto mat = t.matrix(); 25 | for (int i = 0; i < rows; i++) { 26 | for (int j = 0; j < cols; j++) { 27 | size_t count_p; 28 | 29 | char* p = reinterpret_cast(mpz_export( 30 | NULL, &count_p, 1, sizeof(int32), 0, 0, value(i, j).get_mpz_t())); 31 | 32 | int total_size = count_p * sizeof(int32); 33 | 34 | mat(i, j) = tstring(p, total_size); 35 | } 36 | } 37 | 38 | *data->add_tensors() = t; 39 | 40 | data->set_type_name(TypeName()); 41 | } 42 | 43 | bool BigTensor::Decode(const VariantTensorData& data) { 44 | if (!TensorShapeUtils::IsMatrix(data.tensors()[0].shape())) { 45 | return false; 46 | } 47 | 48 | auto mat = data.tensors()[0].matrix(); 49 | 50 | auto rows = data.tensors()[0].dim_size(0); 51 | auto cols = data.tensors()[0].dim_size(1); 52 | 53 | value = MatrixXm(rows, cols); 54 | 55 | for (int i = 0; i < rows; i++) { 56 | for (int j = 0; j < cols; j++) { 57 | mpz_import(value(i, j).get_mpz_t(), 1, 1, sizeof(int32), 0, 0, 58 | mat(i, j).c_str()); 59 | } 60 | } 61 | 62 | return true; 63 | } 64 | 65 | const char BigTensor::kTypeName[] = "BigTensor"; 66 | 67 | } // namespace tf_big 68 | -------------------------------------------------------------------------------- /tf_big/cc/big_tensor.h: -------------------------------------------------------------------------------- 1 | #ifndef TF_BIG_CC_BIG_TENSOR_H_ 2 | #define TF_BIG_CC_BIG_TENSOR_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | #include "Eigen/Core" 12 | #include "Eigen/Dense" 13 | #include "tensorflow/core/framework/op.h" 14 | #include "tensorflow/core/framework/op_kernel.h" 15 | #include "tensorflow/core/framework/shape_inference.h" 16 | #include "tensorflow/core/framework/variant.h" 17 | #include "tensorflow/core/framework/variant_encode_decode.h" 18 | #include "tensorflow/core/framework/variant_op_registry.h" 19 | #include "tensorflow/core/framework/variant_tensor_data.h" 20 | 21 | using Eigen::Dynamic; 22 | using Eigen::Index; 23 | using Eigen::Matrix; 24 | 25 | using namespace tensorflow; // NOLINT 26 | 27 | namespace Eigen { 28 | template <> 29 | struct NumTraits : GenericNumTraits { 30 | typedef mpz_class Real; 31 | typedef mpz_class NonInteger; 32 | typedef mpz_class Nested; 33 | static inline Real epsilon() { return 0; } 34 | static inline Real dummy_precision() { return 0; } 35 | static inline int digits10() { return 0; } 36 | 37 | enum { 38 | IsInteger = 0, 39 | IsSigned = 1, 40 | IsComplex = 0, 41 | RequireInitialization = 1, 42 | ReadCost = 6, 43 | AddCost = 150, 44 | MulCost = 100 45 | }; 46 | }; 47 | } // namespace Eigen 48 | 49 | typedef Matrix MatrixXm; 50 | 51 | namespace tf_big { 52 | 53 | namespace gmp_utils { 54 | inline void reseed(unsigned long& seed) { 55 | int file; 56 | /* re-seed random number generator */ 57 | if ((file = open("/dev/urandom", O_RDONLY)) == -1) { 58 | (void)fprintf(stderr, "Error opening /dev/urandom\n"); 59 | } else { 60 | if (read(file, &seed, sizeof seed) == -1) { 61 | (void)fprintf(stderr, "Error reading from /dev/urandom\n"); 62 | (void)close(file); 63 | } 64 | } 65 | } 66 | inline void init_randstate(gmp_randstate_t& state) { 67 | gmp_randinit_mt(state); 68 | unsigned long seed; 69 | reseed(seed); 70 | gmp_randseed_ui(state, seed); 71 | } 72 | } // namespace gmp_utils 73 | 74 | inline void encode_length(uint8_t* buffer, unsigned int len) { 75 | buffer[0] = len & 0xFF; 76 | buffer[1] = (len >> 8) & 0xFF; 77 | buffer[2] = (len >> 16) & 0xFF; 78 | buffer[3] = (len >> 24) & 0xFF; 79 | } 80 | inline unsigned int decode_length(const uint8_t* buffer) { 81 | return buffer[0] + 0x100 * buffer[1] + 0x10000 * buffer[2] + 82 | 0x1000000 * buffer[3]; 83 | } 84 | 85 | struct BigTensor { 86 | BigTensor() {} 87 | BigTensor(const BigTensor& other); 88 | explicit BigTensor(mpz_class m); 89 | explicit BigTensor(const MatrixXm& mat); 90 | 91 | static const char kTypeName[]; 92 | string TypeName() const { return kTypeName; } 93 | 94 | void Encode(VariantTensorData* data) const; 95 | 96 | bool Decode(const VariantTensorData& data); 97 | 98 | string DebugString() const { return "BigTensor"; } 99 | 100 | template 101 | void FromTensor(const Tensor& t) { 102 | auto rows = t.dim_size(0); 103 | auto cols = t.dim_size(1); 104 | 105 | value = MatrixXm(rows, cols); 106 | 107 | auto mat = t.matrix(); 108 | for (int i = 0; i < rows; i++) { 109 | for (int j = 0; j < cols; j++) { 110 | value(i, j) = mpz_class(mat(i, j)); 111 | } 112 | } 113 | } 114 | 115 | template 116 | void ToTensor(Tensor* t) const { 117 | auto rows = value.rows(); 118 | auto cols = value.cols(); 119 | 120 | if ((rows == 1) && (cols == 1)) { 121 | auto mat = t->scalar(); 122 | mat(0) = value(0, 0).get_str(); 123 | } else { 124 | auto mat = t->matrix(); 125 | for (int i = 0; i < rows; i++) { 126 | for (int j = 0; j < cols; j++) { 127 | mat(i, j) = value(i, j).get_str(); 128 | } 129 | } 130 | } 131 | } 132 | template 133 | void LimbsFromTensor(const Tensor& t) { 134 | int rows = t.dim_size(0); 135 | int cols = t.dim_size(1); 136 | size_t num_real_limbs = 137 | t.dim_size(2) * sizeof(T) - 4; // get rid of header length 138 | 139 | value = MatrixXm(rows, cols); 140 | 141 | auto input_tensor = t.flat(); 142 | const uint8_t* buffer = 143 | reinterpret_cast(input_tensor.data()); 144 | 145 | size_t pointer = 0; 146 | for (int i = 0; i < rows; i++) { 147 | for (int j = 0; j < cols; j++) { 148 | unsigned int length = decode_length(buffer + pointer); 149 | pointer += 4; 150 | mpz_import(value(i, j).get_mpz_t(), length, 1, sizeof(uint8_t), 0, 0, 151 | buffer + pointer); 152 | pointer += num_real_limbs; 153 | } 154 | } 155 | } 156 | 157 | BigTensor& operator+=(const BigTensor& rhs) { 158 | this->value += rhs.value; 159 | return *this; 160 | } 161 | 162 | // friend makes this a non-member 163 | friend BigTensor operator+(BigTensor lhs, const BigTensor& rhs) { 164 | lhs += rhs; 165 | return lhs; 166 | } 167 | 168 | BigTensor& operator-=(const BigTensor& rhs) { 169 | this->value -= rhs.value; 170 | return *this; 171 | } 172 | 173 | // friend makes this a non-member 174 | friend BigTensor operator-(BigTensor lhs, const BigTensor& rhs) { 175 | lhs -= rhs; 176 | return lhs; 177 | } 178 | 179 | BigTensor& operator*=(const BigTensor& rhs) { 180 | this->value *= rhs.value; 181 | return *this; 182 | } 183 | 184 | // friend makes this a non-member 185 | friend BigTensor operator*(BigTensor lhs, const BigTensor& rhs) { 186 | lhs *= rhs; 187 | return lhs; 188 | } 189 | 190 | mpz_class operator()(Index i, Index j) const { return value(i, j); } 191 | 192 | BigTensor cwiseProduct(const BigTensor& rhs) const { 193 | return BigTensor(this->value.cwiseProduct(rhs.value)); 194 | } 195 | 196 | BigTensor cwiseQuotient(const BigTensor& rhs) const { 197 | return BigTensor(this->value.cwiseQuotient(rhs.value)); 198 | } 199 | 200 | Index rows() const { return value.rows(); } 201 | 202 | Index cols() const { return value.cols(); } 203 | 204 | TensorShape shape() const { return TensorShape{value.rows(), value.cols()}; } 205 | 206 | MatrixXm value; 207 | }; 208 | 209 | template <> 210 | inline void BigTensor::ToTensor(Tensor* t) const { 211 | auto rows = value.rows(); 212 | auto cols = value.cols(); 213 | 214 | auto mat = t->matrix(); 215 | for (int i = 0; i < rows; i++) { 216 | for (int j = 0; j < cols; j++) { 217 | mat(i, j) = value(i, j).get_si(); 218 | } 219 | } 220 | } 221 | 222 | template <> 223 | inline void BigTensor::ToTensor(Tensor* t) const { 224 | auto rows = value.rows(); 225 | auto cols = value.cols(); 226 | 227 | auto mat = t->matrix(); 228 | for (int i = 0; i < rows; i++) { 229 | for (int j = 0; j < cols; j++) { 230 | mat(i, j) = (uint8)value(i, j).get_si(); 231 | } 232 | } 233 | } 234 | 235 | template <> 236 | inline void BigTensor::FromTensor(const Tensor& t) { 237 | auto rows = t.dim_size(0); 238 | auto cols = t.dim_size(1); 239 | 240 | value = MatrixXm(rows, cols); 241 | 242 | auto mat = t.matrix(); 243 | for (int i = 0; i < rows; i++) { 244 | for (int j = 0; j < cols; j++) { 245 | value(i, j) = mpz_class(mat(i, j), 10); 246 | } 247 | } 248 | } 249 | 250 | } // namespace tf_big 251 | 252 | #endif // TF_BIG_CC_BIG_TENSOR_H_ 253 | -------------------------------------------------------------------------------- /tf_big/cc/kernels/big_kernels.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "tensorflow/core/framework/op.h" 4 | #include "tensorflow/core/framework/op_kernel.h" 5 | #include "tensorflow/core/framework/shape_inference.h" 6 | #include "tensorflow/core/framework/tensor_util.h" 7 | #include "tensorflow/core/framework/variant.h" 8 | #include "tensorflow/core/framework/variant_encode_decode.h" 9 | #include "tensorflow/core/framework/variant_op_registry.h" 10 | #include "tensorflow/core/framework/variant_tensor_data.h" 11 | #include "tf_big/cc/big_tensor.h" 12 | 13 | using namespace tensorflow; // NOLINT 14 | using tf_big::BigTensor; 15 | 16 | Status GetBigTensor(OpKernelContext* ctx, int index, const BigTensor** res) { 17 | const Tensor& input = ctx->input(index); 18 | 19 | const BigTensor* big = input.flat()(0).get(); 20 | 21 | if (big == nullptr) { 22 | return errors::InvalidArgument("Input handle is not a big tensor. Saw: '", 23 | input.flat()(0).DebugString(), "'"); 24 | } 25 | 26 | *res = big; 27 | return Status::OK(); 28 | } 29 | 30 | template 31 | class BigImportOp : public OpKernel { 32 | public: 33 | explicit BigImportOp(OpKernelConstruction* context) : OpKernel(context) {} 34 | 35 | void Compute(OpKernelContext* ctx) override { 36 | const Tensor& input = ctx->input(0); 37 | OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(input.shape()), 38 | errors::InvalidArgument( 39 | "value expected to be a matrix ", 40 | "but got shape: ", input.shape().DebugString())); 41 | 42 | Tensor* val; 43 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &val)); 44 | 45 | BigTensor big; 46 | big.FromTensor(input); 47 | 48 | val->flat()(0) = std::move(big); 49 | } 50 | }; 51 | 52 | template 53 | class BigImportLimbsOp : public OpKernel { 54 | public: 55 | explicit BigImportLimbsOp(OpKernelConstruction* context) 56 | : OpKernel(context) {} 57 | 58 | void Compute(OpKernelContext* ctx) override { 59 | const Tensor& input = ctx->input(0); 60 | OP_REQUIRES(ctx, TensorShapeUtils::IsMatrixOrHigher(input.shape()), 61 | errors::InvalidArgument( 62 | "value expected to be at least a matrix ", 63 | "but got shape: ", input.shape().DebugString())); 64 | 65 | Tensor* val; 66 | OP_REQUIRES_OK(ctx, 67 | ctx->allocate_output(0, 68 | TensorShape{input.shape().dim_size(0), 69 | input.shape().dim_size(1)}, 70 | &val)); 71 | 72 | BigTensor big; 73 | big.LimbsFromTensor(input); 74 | 75 | val->flat()(0) = std::move(big); 76 | } 77 | }; 78 | 79 | template 80 | class BigExportOp : public OpKernel { 81 | public: 82 | explicit BigExportOp(OpKernelConstruction* context) : OpKernel(context) {} 83 | 84 | void Compute(OpKernelContext* ctx) override { 85 | const BigTensor* val = nullptr; 86 | TensorShape input_shape = ctx->input(0).shape(); 87 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val)); 88 | 89 | Tensor* output; 90 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input_shape, &output)); 91 | 92 | val->ToTensor(output); 93 | } 94 | }; 95 | 96 | template 97 | class BigExportLimbsOp : public OpKernel { 98 | public: 99 | explicit BigExportLimbsOp(OpKernelConstruction* context) 100 | : OpKernel(context) {} 101 | 102 | void Compute(OpKernelContext* ctx) override { 103 | const Tensor& maxval_tensor = ctx->input(1); 104 | int32_t max_bitlen = maxval_tensor.flat()(0); 105 | 106 | const BigTensor* input = nullptr; 107 | TensorShape input_shape = ctx->input(0).shape(); 108 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &input)); 109 | 110 | // Compute maxval if left unspecified by user 111 | if (max_bitlen < 0) { 112 | for (int i = 0; i < input->rows(); i++) { 113 | for (int j = 0; j < input->cols(); j++) { 114 | auto ele = input->value(i, j).get_mpz_t(); 115 | int32_t ele_bitlen = mpz_sizeinbase(ele, 2); 116 | if (max_bitlen < ele_bitlen) { 117 | max_bitlen = ele_bitlen; 118 | } 119 | } 120 | } 121 | } 122 | OP_REQUIRES(ctx, max_bitlen >= 0, 123 | errors::Internal("Malformed max bitlength: ", max_bitlen)); 124 | unsigned int max_bytelen = max_bitlen * 8; 125 | 126 | unsigned int header_bytelen = 4; 127 | unsigned int header_bitlen = header_bytelen * 8; 128 | unsigned int entry_bitlen = header_bitlen + max_bitlen; 129 | unsigned int type_bitlen = sizeof(T) * 8; 130 | unsigned int num_limbs = (entry_bitlen + type_bitlen - 1) / type_bitlen; 131 | unsigned int entry_bytelen = num_limbs * sizeof(T); 132 | 133 | TensorShape output_shape; 134 | output_shape.AddDim(input_shape.dim_size(0)); 135 | output_shape.AddDim(input_shape.dim_size(1)); 136 | output_shape.AddDim(num_limbs); 137 | 138 | Tensor* output; 139 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &output)); 140 | auto output_flattened = output->flat(); 141 | uint8_t* output_data = reinterpret_cast(output_flattened.data()); 142 | 143 | size_t pointer = 0; 144 | for (int i = 0; i < input->rows(); i++) { 145 | for (int j = 0; j < input->cols(); j++) { 146 | auto ele = input->value(i, j).get_mpz_t(); 147 | 148 | // Write header to output buffer 149 | unsigned int ele_bytelen = mpz_sizeinbase(ele, 256); 150 | tf_big::encode_length(output_data + pointer, ele_bytelen); 151 | OP_REQUIRES( 152 | ctx, ele_bytelen <= max_bytelen, 153 | errors::Internal("User selected wrong byte length, required: ", 154 | ele_bytelen, " bytes")); 155 | 156 | // Write element to output buffer 157 | size_t exported_bytelen; 158 | mpz_export(output_data + pointer + header_bytelen, &exported_bytelen, 1, 159 | sizeof(uint8_t), 0, 0, ele); 160 | 161 | // Zero-out remaining bytes of entry 162 | for (size_t k = header_bytelen + exported_bytelen; k < entry_bytelen; 163 | k++) { 164 | output_data[pointer + k] = 0; 165 | } 166 | 167 | // Advance pointer to next entry 168 | pointer += entry_bytelen; 169 | } 170 | } 171 | } 172 | }; 173 | 174 | class BigAddOp : public OpKernel { 175 | public: 176 | explicit BigAddOp(OpKernelConstruction* context) : OpKernel(context) {} 177 | 178 | void Compute(OpKernelContext* ctx) override { 179 | const BigTensor* val0 = nullptr; 180 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val0)); 181 | 182 | const BigTensor* val1 = nullptr; 183 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &val1)); 184 | 185 | Tensor* output; 186 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, val0->shape(), &output)); 187 | 188 | auto res = *val0 + *val1; 189 | 190 | output->flat()(0) = std::move(res); 191 | } 192 | }; 193 | 194 | class BigSubOp : public OpKernel { 195 | public: 196 | explicit BigSubOp(OpKernelConstruction* context) : OpKernel(context) {} 197 | 198 | void Compute(OpKernelContext* ctx) override { 199 | const BigTensor* val0 = nullptr; 200 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val0)); 201 | 202 | const BigTensor* val1 = nullptr; 203 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &val1)); 204 | 205 | Tensor* output; 206 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, val0->shape(), &output)); 207 | 208 | auto res = *val0 - *val1; 209 | 210 | output->flat()(0) = std::move(res); 211 | } 212 | }; 213 | 214 | class BigMulOp : public OpKernel { 215 | public: 216 | explicit BigMulOp(OpKernelConstruction* context) : OpKernel(context) {} 217 | 218 | void Compute(OpKernelContext* ctx) override { 219 | const BigTensor* val0 = nullptr; 220 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val0)); 221 | 222 | const BigTensor* val1 = nullptr; 223 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &val1)); 224 | 225 | Tensor* output; 226 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, val0->shape(), &output)); 227 | 228 | auto res = (*val0).cwiseProduct(*val1); 229 | 230 | output->flat()(0) = std::move(res); 231 | } 232 | }; 233 | 234 | class BigDivOp : public OpKernel { 235 | public: 236 | explicit BigDivOp(OpKernelConstruction* context) : OpKernel(context) {} 237 | 238 | void Compute(OpKernelContext* ctx) override { 239 | const BigTensor* val0 = nullptr; 240 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val0)); 241 | 242 | const BigTensor* val1 = nullptr; 243 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &val1)); 244 | 245 | Tensor* output; 246 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, val0->shape(), &output)); 247 | 248 | auto res = (*val0).cwiseQuotient(*val1); 249 | 250 | output->flat()(0) = std::move(res); 251 | } 252 | }; 253 | 254 | class BigPowOp : public OpKernel { 255 | public: 256 | explicit BigPowOp(OpKernelConstruction* ctx) : OpKernel(ctx) { 257 | OP_REQUIRES_OK(ctx, ctx->GetAttr("secure", &secure)); 258 | } 259 | 260 | void Compute(OpKernelContext* ctx) override { 261 | const BigTensor* base = nullptr; 262 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &base)); 263 | 264 | const BigTensor* exponent_t = nullptr; 265 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &exponent_t)); 266 | 267 | // TODO(Morten) modulus should be optional 268 | const BigTensor* modulus_t = nullptr; 269 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 2, &modulus_t)); 270 | 271 | Tensor* output; 272 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, base->shape(), &output)); 273 | 274 | auto exponent = exponent_t->value.data(); 275 | auto modulus = modulus_t->value(0, 0); 276 | 277 | MatrixXm res(base->rows(), base->cols()); 278 | auto v = base->value.data(); 279 | auto size = base->value.size(); 280 | 281 | mpz_t tmp; 282 | mpz_init(tmp); 283 | for (int i = 0; i < size; i++) { 284 | if (secure) { 285 | mpz_powm_sec(tmp, v[i].get_mpz_t(), exponent[i].get_mpz_t(), 286 | modulus.get_mpz_t()); 287 | } else { 288 | mpz_powm(tmp, v[i].get_mpz_t(), exponent[i].get_mpz_t(), 289 | modulus.get_mpz_t()); 290 | } 291 | 292 | res.data()[i] = mpz_class(tmp); 293 | } 294 | mpz_clear(tmp); 295 | 296 | output->flat()(0) = BigTensor(res); 297 | } 298 | 299 | private: 300 | bool secure = false; 301 | }; 302 | 303 | class BigMatMulOp : public OpKernel { 304 | public: 305 | explicit BigMatMulOp(OpKernelConstruction* context) : OpKernel(context) {} 306 | 307 | void Compute(OpKernelContext* ctx) override { 308 | const BigTensor* val1 = nullptr; 309 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val1)); 310 | 311 | const BigTensor* val2 = nullptr; 312 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &val2)); 313 | 314 | Tensor* output; 315 | OP_REQUIRES_OK( 316 | ctx, ctx->allocate_output(0, TensorShape{val1->rows(), val2->cols()}, 317 | &output)); 318 | 319 | auto res = *val1 * *val2; 320 | 321 | output->flat()(0) = std::move(res); 322 | } 323 | }; 324 | 325 | class BigModOp : public OpKernel { 326 | public: 327 | explicit BigModOp(OpKernelConstruction* context) : OpKernel(context) {} 328 | 329 | void Compute(OpKernelContext* ctx) override { 330 | const BigTensor* val = nullptr; 331 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val)); 332 | 333 | const BigTensor* mod = nullptr; 334 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &mod)); 335 | auto modulus = mod->value(0, 0); 336 | 337 | MatrixXm res_matrix(val->rows(), val->cols()); 338 | auto res_data = res_matrix.data(); 339 | auto val_data = val->value.data(); 340 | auto size = val->value.size(); 341 | 342 | mpz_t tmp; 343 | mpz_init(tmp); 344 | for (int i = 0; i < size; i++) { 345 | mpz_mod(tmp, val_data[i].get_mpz_t(), modulus.get_mpz_t()); 346 | res_data[i] = mpz_class(tmp); 347 | } 348 | mpz_clear(tmp); 349 | 350 | Tensor* res; 351 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, val->shape(), &res)); 352 | res->flat()(0) = BigTensor(res_matrix); 353 | } 354 | }; 355 | 356 | class BigInvOp : public OpKernel { 357 | public: 358 | explicit BigInvOp(OpKernelConstruction* context) : OpKernel(context) {} 359 | 360 | void Compute(OpKernelContext* ctx) override { 361 | const BigTensor* val = nullptr; 362 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 0, &val)); 363 | 364 | const BigTensor* mod = nullptr; 365 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &mod)); 366 | auto modulus = mod->value(0, 0); 367 | 368 | MatrixXm res_matrix(val->rows(), val->cols()); 369 | auto res_data = res_matrix.data(); 370 | auto val_data = val->value.data(); 371 | auto size = val->value.size(); 372 | 373 | mpz_t tmp; 374 | mpz_init(tmp); 375 | for (int i = 0; i < size; i++) { 376 | mpz_invert(tmp, val_data[i].get_mpz_t(), modulus.get_mpz_t()); 377 | res_data[i] = mpz_class(tmp); 378 | } 379 | mpz_clear(tmp); 380 | 381 | Tensor* res; 382 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, val->shape(), &res)); 383 | res->flat()(0) = BigTensor(res_matrix); 384 | } 385 | }; 386 | 387 | class BigRandomUniformOp : public OpKernel { 388 | public: 389 | explicit BigRandomUniformOp(OpKernelConstruction* context) 390 | : OpKernel(context) {} 391 | 392 | void Compute(OpKernelContext* ctx) override { 393 | const Tensor& shape_tensor = ctx->input(0); 394 | TensorShape shape; 395 | OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_tensor, &shape)); 396 | 397 | const BigTensor* maxval_tensor = nullptr; 398 | OP_REQUIRES_OK(ctx, GetBigTensor(ctx, 1, &maxval_tensor)); 399 | auto maxval = maxval_tensor->value(0, 0).get_mpz_t(); 400 | 401 | MatrixXm res_matrix(shape.dim_size(0), shape.dim_size(1)); 402 | auto res_data = res_matrix.data(); 403 | auto size = res_matrix.size(); 404 | 405 | // TODO(Morten) offer secure randomness 406 | gmp_randstate_t state; 407 | tf_big::gmp_utils::init_randstate(state); 408 | mpz_t tmp; 409 | mpz_init(tmp); 410 | for (int i = 0; i < size; i++) { 411 | mpz_urandomm(tmp, state, maxval); 412 | res_data[i] = mpz_class(tmp); 413 | } 414 | mpz_clear(tmp); 415 | 416 | Tensor* res; 417 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, shape, &res)); 418 | res->flat()(0) = BigTensor(res_matrix); 419 | } 420 | }; 421 | 422 | class BigRandomRsaModulusOp : public OpKernel { 423 | public: 424 | explicit BigRandomRsaModulusOp(OpKernelConstruction* context) 425 | : OpKernel(context) {} 426 | 427 | void Compute(OpKernelContext* ctx) override { 428 | const Tensor& bitlength_t = ctx->input(0); 429 | auto bitlength = bitlength_t.scalar(); 430 | auto bitlength_val = bitlength.data(); 431 | 432 | MatrixXm p_matrix(1, 1); 433 | auto p_data = p_matrix.data(); 434 | MatrixXm q_matrix(1, 1); 435 | auto q_data = q_matrix.data(); 436 | MatrixXm n_matrix(1, 1); 437 | auto n_data = n_matrix.data(); 438 | 439 | gmp_randstate_t state; 440 | tf_big::gmp_utils::init_randstate(state); 441 | mpz_t p; 442 | mpz_t q; 443 | mpz_t n; 444 | mpz_init(p); 445 | mpz_init(q); 446 | mpz_init(n); 447 | 448 | do { 449 | do { 450 | mpz_urandomb(p, state, *bitlength_val / 2); 451 | } while (!mpz_probab_prime_p(p, 10)); 452 | 453 | do { 454 | mpz_urandomb(q, state, *bitlength_val / 2); 455 | } while (!mpz_probab_prime_p(q, 10)); 456 | 457 | mpz_mul(n, p, q); 458 | } while (!mpz_tstbit(n, *bitlength_val - 1)); 459 | 460 | p_data[0] = mpz_class(p); 461 | q_data[0] = mpz_class(q); 462 | n_data[0] = mpz_class(n); 463 | 464 | TensorShape shape({1, 1}); 465 | Tensor* p_res; 466 | OP_REQUIRES_OK(ctx, ctx->allocate_output(0, shape, &p_res)); 467 | p_res->flat()(0) = BigTensor(p_matrix); 468 | 469 | Tensor* q_res; 470 | OP_REQUIRES_OK(ctx, ctx->allocate_output(1, shape, &q_res)); 471 | q_res->flat()(0) = BigTensor(q_matrix); 472 | 473 | Tensor* n_res; 474 | OP_REQUIRES_OK(ctx, ctx->allocate_output(2, shape, &n_res)); 475 | n_res->flat()(0) = BigTensor(n_matrix); 476 | 477 | mpz_clear(p); 478 | mpz_clear(q); 479 | mpz_clear(n); 480 | } 481 | }; 482 | 483 | REGISTER_UNARY_VARIANT_DECODE_FUNCTION(BigTensor, BigTensor::kTypeName); 484 | 485 | REGISTER_KERNEL_BUILDER( 486 | Name("BigImport").Device(DEVICE_CPU).TypeConstraint("dtype"), 487 | BigImportOp); 488 | REGISTER_KERNEL_BUILDER( 489 | Name("BigImport").Device(DEVICE_CPU).TypeConstraint("dtype"), 490 | BigImportOp); 491 | REGISTER_KERNEL_BUILDER( 492 | Name("BigImport").Device(DEVICE_CPU).TypeConstraint("dtype"), 493 | BigImportOp); 494 | 495 | REGISTER_KERNEL_BUILDER( 496 | Name("BigExport").Device(DEVICE_CPU).TypeConstraint("dtype"), 497 | BigExportOp); 498 | REGISTER_KERNEL_BUILDER( 499 | Name("BigExport").Device(DEVICE_CPU).TypeConstraint("dtype"), 500 | BigExportOp); 501 | REGISTER_KERNEL_BUILDER( 502 | Name("BigExport").Device(DEVICE_CPU).TypeConstraint("dtype"), 503 | BigExportOp); 504 | 505 | REGISTER_KERNEL_BUILDER( 506 | Name("BigImportLimbs").Device(DEVICE_CPU).TypeConstraint("dtype"), 507 | BigImportLimbsOp); 508 | REGISTER_KERNEL_BUILDER( 509 | Name("BigImportLimbs").Device(DEVICE_CPU).TypeConstraint("dtype"), 510 | BigImportLimbsOp); 511 | 512 | REGISTER_KERNEL_BUILDER( 513 | Name("BigExportLimbs").Device(DEVICE_CPU).TypeConstraint("dtype"), 514 | BigExportLimbsOp); 515 | REGISTER_KERNEL_BUILDER( 516 | Name("BigExportLimbs").Device(DEVICE_CPU).TypeConstraint("dtype"), 517 | BigExportLimbsOp); 518 | 519 | // TODO(justin1121) there's no simple mpz to int64 convert functions 520 | // there's a suggestion here (https://stackoverflow.com/a/6248913/1116574) on 521 | // how to do it but it might take a bit of investigation from our side 522 | // perhaps arguable that we should only export/import to string for safety 523 | // can convert to number in python???? 524 | // REGISTER_CPU(int64); 525 | 526 | REGISTER_KERNEL_BUILDER(Name("BigRandomUniform").Device(DEVICE_CPU), 527 | BigRandomUniformOp); 528 | REGISTER_KERNEL_BUILDER(Name("BigRandomRsaModulus").Device(DEVICE_CPU), 529 | BigRandomRsaModulusOp); 530 | 531 | REGISTER_KERNEL_BUILDER(Name("BigAdd").Device(DEVICE_CPU), BigAddOp); 532 | REGISTER_KERNEL_BUILDER(Name("BigSub").Device(DEVICE_CPU), BigSubOp); 533 | REGISTER_KERNEL_BUILDER(Name("BigMul").Device(DEVICE_CPU), BigMulOp); 534 | REGISTER_KERNEL_BUILDER(Name("BigDiv").Device(DEVICE_CPU), BigDivOp); 535 | REGISTER_KERNEL_BUILDER(Name("BigPow").Device(DEVICE_CPU), BigPowOp); 536 | REGISTER_KERNEL_BUILDER(Name("BigMatMul").Device(DEVICE_CPU), BigMatMulOp); 537 | REGISTER_KERNEL_BUILDER(Name("BigMod").Device(DEVICE_CPU), BigModOp); 538 | REGISTER_KERNEL_BUILDER(Name("BigInv").Device(DEVICE_CPU), BigInvOp); 539 | -------------------------------------------------------------------------------- /tf_big/cc/ops/big_ops.cc: -------------------------------------------------------------------------------- 1 | #include "tensorflow/core/framework/common_shape_fns.h" 2 | #include "tensorflow/core/framework/op.h" 3 | #include "tensorflow/core/framework/shape_inference.h" 4 | 5 | REGISTER_OP("BigImport") 6 | .Attr("dtype: {int32, string, uint8}") 7 | .Input("in: dtype") 8 | .Output("val: variant") 9 | .SetIsStateful() 10 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 11 | ::tensorflow::shape_inference::ShapeHandle output; 12 | TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 2, &output)); 13 | c->set_output(0, output); 14 | return ::tensorflow::Status::OK(); 15 | }); 16 | 17 | REGISTER_OP("BigImportLimbs") 18 | .Attr("dtype: {uint8, int32}") 19 | .Input("in: dtype") 20 | .Output("val: variant") 21 | .SetIsStateful() 22 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 23 | ::tensorflow::shape_inference::ShapeHandle input_shape = c->input(0); 24 | TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &input_shape)); 25 | 26 | ::tensorflow::shape_inference::ShapeHandle val_shape; 27 | TF_RETURN_IF_ERROR(c->Subshape(input_shape, 0, -1, &val_shape)); 28 | c->set_output(0, val_shape); 29 | 30 | return ::tensorflow::Status::OK(); 31 | }); 32 | 33 | REGISTER_OP("BigExport") 34 | .Attr("dtype: {int32, string, uint8}") 35 | .Input("val: variant") 36 | .Output("out: dtype") 37 | .SetIsStateful() 38 | .SetShapeFn(::tensorflow::shape_inference::UnchangedShape); 39 | 40 | REGISTER_OP("BigExportLimbs") 41 | .Attr("dtype: {int32, uint8}") 42 | .Input("val: variant") 43 | .Input("max_bitlen: int32") 44 | .Output("out: dtype") 45 | .SetIsStateful() 46 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 47 | ::tensorflow::shape_inference::ShapeHandle input_shape = c->input(0); 48 | TF_RETURN_IF_ERROR(c->WithRank(input_shape, 2, &input_shape)); 49 | 50 | ::tensorflow::shape_inference::ShapeHandle max_bitlen_shape = c->input(1); 51 | TF_RETURN_IF_ERROR(c->WithRank(max_bitlen_shape, 0, &max_bitlen_shape)); 52 | 53 | ::tensorflow::shape_inference::ShapeHandle expansion_shape = 54 | c->MakeShape({c->UnknownDim()}); 55 | ::tensorflow::shape_inference::ShapeHandle out_shape; 56 | TF_RETURN_IF_ERROR( 57 | c->Concatenate(input_shape, expansion_shape, &out_shape)); 58 | c->set_output(0, out_shape); 59 | 60 | return ::tensorflow::Status::OK(); 61 | }); 62 | 63 | REGISTER_OP("BigRandomUniform") 64 | .Input("shape: int32") 65 | .Input("maxval: variant") 66 | .Output("out: variant") 67 | .SetIsStateful() 68 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 69 | // TODO(Morten) `maxval` should be a scalar 70 | ::tensorflow::shape_inference::ShapeHandle out; 71 | TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); 72 | c->set_output(0, out); 73 | return ::tensorflow::Status::OK(); 74 | }); 75 | 76 | REGISTER_OP("BigRandomRsaModulus") 77 | .Input("bitlength: int32") 78 | .Output("p: variant") 79 | .Output("q: variant") 80 | .Output("n: variant") 81 | .SetIsStateful() 82 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 83 | ::tensorflow::shape_inference::ShapeHandle bitlength_shape = c->input(0); 84 | ::tensorflow::shape_inference::ShapeHandle scalar_shape = 85 | c->MakeShape({1, 1}); 86 | TF_RETURN_IF_ERROR(c->WithRank(bitlength_shape, 0, &bitlength_shape)); 87 | c->set_output(0, scalar_shape); 88 | c->set_output(1, scalar_shape); 89 | c->set_output(2, scalar_shape); 90 | return ::tensorflow::Status::OK(); 91 | }); 92 | 93 | REGISTER_OP("BigAdd") 94 | .Input("val0: variant") 95 | .Input("val1: variant") 96 | .Output("res: variant") 97 | .SetIsStateful() 98 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 99 | ::tensorflow::shape_inference::ShapeHandle val0 = c->input(0); 100 | ::tensorflow::shape_inference::ShapeHandle val1 = c->input(1); 101 | ::tensorflow::shape_inference::ShapeHandle res; 102 | TF_RETURN_IF_ERROR(c->Merge(val0, val1, &res)); 103 | c->set_output(0, res); 104 | return ::tensorflow::Status::OK(); 105 | }); 106 | 107 | REGISTER_OP("BigSub") 108 | .Input("val0: variant") 109 | .Input("val1: variant") 110 | .Output("res: variant") 111 | .SetIsStateful() 112 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 113 | ::tensorflow::shape_inference::ShapeHandle val0 = c->input(0); 114 | ::tensorflow::shape_inference::ShapeHandle val1 = c->input(1); 115 | ::tensorflow::shape_inference::ShapeHandle res; 116 | TF_RETURN_IF_ERROR(c->Merge(val0, val1, &res)); 117 | c->set_output(0, res); 118 | return ::tensorflow::Status::OK(); 119 | }); 120 | 121 | REGISTER_OP("BigMul") 122 | .Input("val0: variant") 123 | .Input("val1: variant") 124 | .Output("res: variant") 125 | .SetIsStateful() 126 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 127 | ::tensorflow::shape_inference::ShapeHandle val0 = c->input(0); 128 | ::tensorflow::shape_inference::ShapeHandle val1 = c->input(1); 129 | ::tensorflow::shape_inference::ShapeHandle res; 130 | // NOTE: Bug - without this condition returns shape of [1,1,1,1] 131 | if ((c->Rank(val0) == 0) & (c->Rank(val1) == 0)) { 132 | c->set_output(0, c->MakeShape({1, 1})); 133 | } else { 134 | TF_RETURN_IF_ERROR(c->Merge(val0, val1, &res)); 135 | c->set_output(0, res); 136 | } 137 | return ::tensorflow::Status::OK(); 138 | }); 139 | 140 | REGISTER_OP("BigDiv") 141 | .Input("val0: variant") 142 | .Input("val1: variant") 143 | .Output("res: variant") 144 | .SetIsStateful() 145 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 146 | ::tensorflow::shape_inference::ShapeHandle val0 = c->input(0); 147 | ::tensorflow::shape_inference::ShapeHandle val1 = c->input(1); 148 | ::tensorflow::shape_inference::ShapeHandle res; 149 | 150 | TF_RETURN_IF_ERROR(c->Merge(val0, val1, &res)); 151 | c->set_output(0, res); 152 | return ::tensorflow::Status::OK(); 153 | }); 154 | 155 | REGISTER_OP("BigPow") 156 | .Attr("secure: bool") 157 | .Input("base: variant") 158 | .Input("exponent: variant") 159 | .Input("modulus: variant") 160 | .Output("res: variant") 161 | .SetIsStateful() 162 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 163 | ::tensorflow::shape_inference::ShapeHandle base = c->input(0); 164 | // ::tensorflow::shape_inference::ShapeHandle exponent = c->input(1); 165 | // ::tensorflow::shape_inference::ShapeHandle modulus = c->input(2); 166 | // ::tensorflow::shape_inference::ShapeHandle res; 167 | // TODO(Morten) make sure shapes match 168 | c->set_output(0, base); 169 | return ::tensorflow::Status::OK(); 170 | }); 171 | 172 | // TODO(Morten) add shape inference function 173 | REGISTER_OP("BigMatMul") 174 | .Input("val0: variant") 175 | .Input("val1: variant") 176 | .Output("res: variant") 177 | .SetIsStateful(); 178 | 179 | REGISTER_OP("BigMod") 180 | .Input("val: variant") 181 | .Input("mod: variant") 182 | .Output("res: variant") 183 | .SetIsStateful() 184 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 185 | ::tensorflow::shape_inference::ShapeHandle val = c->input(0); 186 | ::tensorflow::shape_inference::ShapeHandle mod = c->input(1); 187 | TF_RETURN_IF_ERROR(c->WithRankAtMost(val, 2, &val)); 188 | // TODO(Morten) `mod` below should be a scalar 189 | TF_RETURN_IF_ERROR(c->WithRankAtMost(mod, 2, &mod)); 190 | c->set_output(0, val); 191 | return ::tensorflow::Status::OK(); 192 | }); 193 | 194 | REGISTER_OP("BigInv") 195 | .Input("val: variant") 196 | .Input("mod: variant") 197 | .Output("res: variant") 198 | .SetIsStateful() 199 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 200 | ::tensorflow::shape_inference::ShapeHandle val = c->input(0); 201 | ::tensorflow::shape_inference::ShapeHandle mod = c->input(1); 202 | TF_RETURN_IF_ERROR(c->WithRankAtMost(val, 2, &val)); 203 | // TODO(Morten) `mod` below should be a scalar 204 | TF_RETURN_IF_ERROR(c->WithRankAtMost(mod, 2, &mod)); 205 | c->set_output(0, val); 206 | return ::tensorflow::Status::OK(); 207 | }); 208 | -------------------------------------------------------------------------------- /tf_big/python/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tf-encrypted/tf-big/386184eb5aaa3db82781f78883339340de93b6f1/tf_big/python/__init__.py -------------------------------------------------------------------------------- /tf_big/python/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tf-encrypted/tf-big/386184eb5aaa3db82781f78883339340de93b6f1/tf_big/python/ops/__init__.py -------------------------------------------------------------------------------- /tf_big/python/ops/big_ops.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.python.framework.errors import NotFoundError 3 | from tensorflow.python.platform import resource_loader 4 | 5 | big_ops_libfile = resource_loader.get_path_to_datafile("_big_ops.so") 6 | big_ops = tf.load_op_library(big_ops_libfile) 7 | 8 | big_import = big_ops.big_import 9 | big_export = big_ops.big_export 10 | 11 | big_import_limbs = big_ops.big_import_limbs 12 | big_export_limbs = big_ops.big_export_limbs 13 | # 14 | big_random_uniform = big_ops.big_random_uniform 15 | big_random_rsa_modulus = big_ops.big_random_rsa_modulus 16 | 17 | big_add = big_ops.big_add 18 | big_sub = big_ops.big_sub 19 | big_mul = big_ops.big_mul 20 | big_div = big_ops.big_div 21 | big_pow = big_ops.big_pow 22 | big_matmul = big_ops.big_mat_mul 23 | big_mod = big_ops.big_mod 24 | big_inv = big_ops.big_inv 25 | -------------------------------------------------------------------------------- /tf_big/python/ops/big_ops_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | from absl.testing import parameterized 6 | 7 | from tf_big.python.ops.big_ops import big_add 8 | from tf_big.python.ops.big_ops import big_div 9 | from tf_big.python.ops.big_ops import big_export 10 | from tf_big.python.ops.big_ops import big_import 11 | from tf_big.python.ops.big_ops import big_matmul 12 | from tf_big.python.ops.big_ops import big_mod 13 | from tf_big.python.ops.big_ops import big_mul 14 | from tf_big.python.ops.big_ops import big_pow 15 | from tf_big.python.test import tf_execution_context 16 | 17 | 18 | class BigTest(parameterized.TestCase): 19 | """BigTest test""" 20 | 21 | @parameterized.parameters( 22 | {"run_eagerly": run_eagerly, "raw": raw, "dtype": dtype} 23 | for run_eagerly in (True, False) 24 | for raw, dtype in (([[43424]], tf.int32), ([[b"43424"]], tf.string),) 25 | ) 26 | def test_import_export(self, run_eagerly, raw, dtype): 27 | context = tf_execution_context(run_eagerly) 28 | with context.scope(): 29 | variant = big_import(raw) 30 | output = big_export(variant, dtype) 31 | assert context.evaluate(output) == raw 32 | 33 | @parameterized.parameters( 34 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 35 | ) 36 | def test_add(self, run_eagerly): 37 | a = "5453452435245245245242534" 38 | b = "1424132412341234123412341234134" 39 | expected = int(a) + int(b) 40 | 41 | context = tf_execution_context(run_eagerly) 42 | with context.scope(): 43 | 44 | a_var = big_import([[a]]) 45 | b_var = big_import([[b]]) 46 | c_var = big_add(a_var, b_var) 47 | c_str = big_export(c_var, tf.string) 48 | 49 | np.testing.assert_equal(int(context.evaluate(c_str)), expected) 50 | 51 | @parameterized.parameters( 52 | {"run_eagerly": run_eagerly, "secure": secure} 53 | for run_eagerly in (True, False) 54 | for secure in (True, False) 55 | ) 56 | def test_pow(self, run_eagerly, secure): 57 | base = "54" 58 | exp = "3434" 59 | modulus = "35" 60 | expected = pow(54, 3434, 35) 61 | 62 | context = tf_execution_context(run_eagerly) 63 | with context.scope(): 64 | 65 | base_var = big_import([[base]]) 66 | exp_var = big_import([[exp]]) 67 | mod_var = big_import([[modulus]]) 68 | out = big_pow(base_var, exp_var, mod_var, secure=secure) 69 | out_str = big_export(out, tf.string) 70 | 71 | np.testing.assert_equal(int(context.evaluate(out_str)), expected) 72 | 73 | @parameterized.parameters( 74 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 75 | ) 76 | def test_2d_matrix_add(self, run_eagerly): 77 | a = np.array([[5, 5], [5, 5]]).astype(np.int32) 78 | b = np.array([[6, 6], [6, 6]]).astype(np.int32) 79 | expected = a + b 80 | 81 | context = tf_execution_context(run_eagerly) 82 | with context.scope(): 83 | 84 | a_var = big_import(a) 85 | b_var = big_import(b) 86 | c_var = big_add(a_var, b_var) 87 | c_str = big_export(c_var, tf.int32) 88 | 89 | np.testing.assert_equal(context.evaluate(c_str), expected) 90 | 91 | @parameterized.parameters( 92 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 93 | ) 94 | def test_matmul(self, run_eagerly): 95 | a = np.array([[5, 5], [5, 5]]).astype(np.int32) 96 | b = np.array([[6, 6], [6, 6]]).astype(np.int32) 97 | expected = a.dot(b) 98 | 99 | context = tf_execution_context(run_eagerly) 100 | with context.scope(): 101 | 102 | a_var = big_import(a) 103 | b_var = big_import(b) 104 | c_var = big_matmul(a_var, b_var) 105 | c_str = big_export(c_var, tf.int32) 106 | 107 | np.testing.assert_equal(context.evaluate(c_str), expected) 108 | 109 | @parameterized.parameters( 110 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 111 | ) 112 | def test_mul(self, run_eagerly): 113 | a = np.array([[5, 5], [5, 5]]).astype(np.int32) 114 | b = np.array([[6, 6], [6, 6]]).astype(np.int32) 115 | expected = a * b 116 | 117 | context = tf_execution_context(run_eagerly) 118 | with context.scope(): 119 | 120 | a_var = big_import(a) 121 | b_var = big_import(b) 122 | c_var = big_mul(a_var, b_var) 123 | c_str = big_export(c_var, tf.int32) 124 | 125 | np.testing.assert_equal(context.evaluate(c_str), expected) 126 | 127 | @parameterized.parameters( 128 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 129 | ) 130 | def test_div(self, run_eagerly): 131 | a = np.array([[10, 11], [15, 5]]).astype(np.int32) 132 | b = np.array([[5, 5], [5, 5]]).astype(np.int32) 133 | expected = a // b 134 | 135 | context = tf_execution_context(run_eagerly) 136 | with context.scope(): 137 | 138 | a_var = big_import(a) 139 | b_var = big_import(b) 140 | c_var = big_div(a_var, b_var) 141 | c_str = big_export(c_var, tf.int32) 142 | 143 | np.testing.assert_equal(context.evaluate(c_str), expected) 144 | 145 | @parameterized.parameters( 146 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 147 | ) 148 | def test_mod(self, run_eagerly): 149 | x = np.array([[123, 234], [345, 456]]).astype(np.int32) 150 | n = np.array([[37]]).astype(np.int32) 151 | expected = x % n 152 | 153 | context = tf_execution_context(run_eagerly) 154 | with context.scope(): 155 | 156 | x_big = big_import(x) 157 | n_big = big_import(n) 158 | y_big = big_mod(x_big, n_big) 159 | y_str = big_export(y_big, tf.int32) 160 | 161 | np.testing.assert_equal(context.evaluate(y_str), expected) 162 | 163 | 164 | if __name__ == "__main__": 165 | unittest.main() 166 | -------------------------------------------------------------------------------- /tf_big/python/tensor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | from tensorflow.python.client import session as tf_session 6 | from tensorflow.python.framework import ops as tf_ops 7 | from tensorflow.python.keras.utils import tf_utils 8 | 9 | import tf_big.python.ops.big_ops as ops 10 | 11 | 12 | class Tensor(object): 13 | is_tensor_like = True # needed to pass tf.is_tensor, new as of TF 2.2+ 14 | 15 | def __init__(self, value): 16 | assert isinstance(value, tf.Tensor), type(value) 17 | assert value.dtype is tf.variant, value.dtype 18 | self._raw = value 19 | 20 | @property 21 | def shape(self): 22 | return self._raw.shape 23 | 24 | @property 25 | def name(self): 26 | return self._raw.name 27 | 28 | @property 29 | def dtype(self): 30 | return tf.int32 31 | # return tf.string 32 | 33 | def eval(self, session=None, dtype=None): 34 | tf_tensor = export_tensor(self, dtype=dtype) 35 | evaluated = tf_tensor.eval(session=session) 36 | if tf_tensor.dtype is tf.string: 37 | return evaluated.astype(str) 38 | return evaluated 39 | 40 | def __add__(self, other): 41 | other = import_tensor(other) 42 | # TODO (Yann) This broadcast should be implemented 43 | # in big_kernels.cc 44 | self, other = broadcast(self, other) 45 | res = ops.big_add(self._raw, other._raw) 46 | return Tensor(res) 47 | 48 | def __radd__(self, other): 49 | other = import_tensor(other) 50 | # TODO (Yann) This broadcast should be implemented 51 | # in big_kernels.cc 52 | self, other = broadcast(self, other) 53 | res = ops.big_add(self._raw, other._raw) 54 | return Tensor(res) 55 | 56 | def __sub__(self, other): 57 | other = import_tensor(other) 58 | # TODO (Yann) This broadcast should be implemented 59 | # in big_kernels.cc 60 | self, other = broadcast(self, other) 61 | res = ops.big_sub(self._raw, other._raw) 62 | return Tensor(res) 63 | 64 | def __mul__(self, other): 65 | other = import_tensor(other) 66 | # TODO (Yann) This broadcast should be implemented 67 | # in big_kernels.cc 68 | self, other = broadcast(self, other) 69 | res = ops.big_mul(self._raw, other._raw) 70 | return Tensor(res) 71 | 72 | def __floordiv__(self, other): 73 | other = import_tensor(other) 74 | # TODO (Yann) This broadcast should be implemented 75 | # in big_kernels.cc 76 | self, other = broadcast(self, other) 77 | res = ops.big_div(self._raw, other._raw) 78 | return Tensor(res) 79 | 80 | def pow(self, exponent, modulus=None, secure=None): 81 | # TODO (Yann) This broadcast should be implemented 82 | # in big_kernels.cc 83 | exponent = import_tensor(exponent) 84 | modulus = import_tensor(modulus) 85 | self, exponent = broadcast(self, exponent) 86 | res = ops.big_pow( 87 | base=self._raw, 88 | exponent=exponent._raw, 89 | modulus=modulus._raw if modulus else None, 90 | secure=secure if secure is not None else get_secure_default(), 91 | ) 92 | return Tensor(res) 93 | 94 | def __pow__(self, exponent): 95 | return self.pow(exponent) 96 | 97 | def __mod__(self, modulus): 98 | modulus = import_tensor(modulus) 99 | res = ops.big_mod(val=self._raw, mod=modulus._raw) 100 | return Tensor(res) 101 | 102 | def inv(self, modulus): 103 | modulus = import_tensor(modulus) 104 | res = ops.big_inv(val=self._raw, mod=modulus._raw) 105 | return Tensor(res) 106 | 107 | 108 | def _fetch_function(big_tensor): 109 | unwrapped = [export_tensor(big_tensor, dtype=tf.string)] 110 | rewrapper = lambda components_fetched: components_fetched[0].astype(str) 111 | return unwrapped, rewrapper 112 | 113 | 114 | def _feed_function(big_tensor, feed_value): 115 | return [(big_tensor._raw, feed_value)] 116 | 117 | 118 | def _feed_function_for_partial_run(big_tensor): 119 | return [big_tensor._raw] 120 | 121 | 122 | # this allows tf_big.Tensor to be passed directly to tf.Session.run, 123 | # unwrapping and converting the result as needed 124 | tf_session.register_session_run_conversion_functions( 125 | tensor_type=Tensor, 126 | fetch_function=_fetch_function, 127 | feed_function=_feed_function, 128 | feed_function_for_partial_run=_feed_function_for_partial_run, 129 | ) 130 | 131 | 132 | def _tensor_conversion_function(tensor, dtype=None, name=None, as_ref=False): 133 | assert name is None, "Not implemented, name='{}'".format(name) 134 | assert not as_ref, "Not implemented, as_ref={}".format(as_ref) 135 | assert dtype in [tf.int32, None], dtype 136 | return export_tensor(tensor, dtype=dtype) 137 | 138 | 139 | # TODO(Morten) 140 | # this allows implicit convertion of tf_big.Tensor to tf.Tensor, 141 | # but since the output dtype is determined by the outer context 142 | # we essentially have to export with the implied risk of data loss 143 | tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function) 144 | 145 | 146 | # this allows tf_big.Tensor to be plumbed through Keras layers 147 | # but seems only truly useful when used in conjunction with 148 | # `register_tensor_conversion_function` 149 | tf_utils.register_symbolic_tensor_type(Tensor) 150 | 151 | 152 | def constant(tensor): 153 | assert isinstance(tensor, (np.ndarray, list, tuple)), type(tensor) 154 | return import_tensor(tensor) 155 | 156 | 157 | def _convert_to_numpy_tensor(tensor): 158 | if isinstance(tensor, np.ndarray): 159 | return tensor 160 | 161 | if isinstance(tensor, (int, str)): 162 | return np.array([[tensor]]) 163 | 164 | if isinstance(tensor, (list, tuple)): 165 | return np.array(tensor) 166 | 167 | raise ValueError("Cannot convert to NumPy tensor: '{}'".format(type(tensor))) 168 | 169 | 170 | def _import_tensor_numpy(tensor): 171 | tensor = _convert_to_numpy_tensor(tensor) 172 | 173 | if np.issubdtype(tensor.dtype, np.int64) or np.issubdtype(tensor.dtype, np.object_): 174 | tensor = tensor.astype(np.string_) 175 | elif not ( 176 | np.issubdtype(tensor.dtype, np.int32) 177 | or np.issubdtype(tensor.dtype, np.string_) 178 | or np.issubdtype(tensor.dtype, np.unicode_) 179 | ): 180 | raise ValueError("Unsupported dtype '{}'.".format(tensor.dtype)) 181 | 182 | if len(tensor.shape) != 2: 183 | raise ValueError("Tensors must have rank 2.") 184 | 185 | return Tensor(ops.big_import(tensor)) 186 | 187 | 188 | def _import_tensor_tensorflow(tensor): 189 | if tensor.dtype in [tf.int64]: 190 | tensor = tf.as_string(tensor) 191 | elif tensor.dtype not in [tf.uint8, tf.int32, tf.string]: 192 | raise ValueError("Unsupported dtype '{}'".format(tensor.dtype)) 193 | 194 | if len(tensor.shape) != 2: 195 | raise ValueError("Tensor must have rank 2.") 196 | 197 | return Tensor(ops.big_import(tensor)) 198 | 199 | 200 | def import_tensor(tensor): 201 | if isinstance(tensor, Tensor): 202 | return tensor 203 | if isinstance(tensor, tf.Tensor): 204 | return _import_tensor_tensorflow(tensor) 205 | return _import_tensor_numpy(tensor) 206 | 207 | 208 | def export_tensor(tensor, dtype=None): 209 | assert isinstance(tensor, Tensor), type(value) 210 | 211 | dtype = dtype or tf.string 212 | if dtype not in [tf.int32, tf.string]: 213 | raise ValueError("Unsupported dtype '{}'".format(dtype)) 214 | 215 | return ops.big_export(tensor._raw, dtype=dtype) 216 | 217 | 218 | def _import_limbs_tensor_tensorflow(limbs_tensor): 219 | if limbs_tensor.dtype not in [tf.uint8, tf.int32]: 220 | raise ValueError( 221 | "Not implemented limb conversion for dtype {}".format(limbs_tensor.dtype) 222 | ) 223 | 224 | if len(limbs_tensor.shape) != 3: 225 | raise ValueError("Limbs tensors must be rank 3.") 226 | 227 | return Tensor(ops.big_import_limbs(limbs_tensor)) 228 | 229 | 230 | def _import_limbs_tensor_numpy(limbs_tensor): 231 | limbs_tensor = _convert_to_numpy_tensor(limbs_tensor) 232 | 233 | if len(tensor.shape) != 3: 234 | raise ValueError("Limbs tensors must have rank 3.") 235 | 236 | if not ( 237 | np.issubdtype(limbs_tensor.dtype, np.int32) 238 | or np.issubdtype(limbs_tensor.dtype, np.uint8) 239 | ): 240 | raise ValueError( 241 | "Not implemented limb conversion for dtype {}".format(tensor.dtype) 242 | ) 243 | 244 | return Tensor(ops.big_import_limbs(limbs_tensor)) 245 | 246 | 247 | def import_limbs_tensor(limbs_tensor): 248 | if isinstance(limbs_tensor, tf.Tensor): 249 | return _import_limbs_tensor_tensorflow(limbs_tensor) 250 | return _import_limbs_tensor_numpy(limbs_tensor) 251 | 252 | 253 | def export_limbs_tensor(tensor, dtype=None, max_bitlen=None): 254 | assert isinstance(tensor, Tensor), type(value) 255 | 256 | # Indicate missing value as negative 257 | max_bitlen = max_bitlen or -1 258 | 259 | dtype = dtype or tf.uint8 260 | if dtype not in [tf.uint8, tf.int32]: 261 | raise ValueError("Unsupported dtype '{}'".format(dtype)) 262 | 263 | return ops.big_export_limbs(tensor._raw, dtype=dtype, max_bitlen=max_bitlen) 264 | 265 | 266 | _SECURE = True 267 | 268 | 269 | def set_secure_default(value): 270 | global _SECURE 271 | _SECURE = value 272 | 273 | 274 | def get_secure_default(): 275 | return _SECURE 276 | 277 | 278 | def random_uniform(shape, maxval): 279 | if not isinstance(maxval, Tensor): 280 | maxval = import_tensor(maxval) 281 | r_raw = ops.big_random_uniform(shape, maxval._raw) 282 | return Tensor(r_raw) 283 | 284 | 285 | def random_rsa_modulus(bitlength): 286 | p_raw, q_raw, n_raw = ops.big_random_rsa_modulus(bitlength) 287 | return Tensor(p_raw), Tensor(q_raw), Tensor(n_raw) 288 | 289 | 290 | def add(x, y): 291 | # TODO(Morten) lifting etc 292 | return x + y 293 | 294 | 295 | def sub(x, y): 296 | # TODO(Morten) lifting etc 297 | return x - y 298 | 299 | 300 | def mul(x, y): 301 | # TODO(Morten) lifting etc 302 | return x * y 303 | 304 | 305 | def pow(base, exponent, modulus=None, secure=None): 306 | # TODO(Morten) lifting etc 307 | assert isinstance(base, Tensor) 308 | return base.pow(exponent=exponent, modulus=modulus, secure=secure) 309 | 310 | 311 | def matmul(x, y): 312 | # TODO(Morten) lifting etc 313 | return x.matmul(y) 314 | 315 | 316 | def mod(x, n): 317 | return x.mod(n) 318 | 319 | 320 | def inv(x, n): 321 | return x.inv(n) 322 | 323 | 324 | def broadcast(x, y): 325 | 326 | x_rank = x.shape.rank 327 | y_rank = y.shape.rank 328 | x_nb_el = x.shape.num_elements() 329 | y_nb_el = y.shape.num_elements() 330 | 331 | # e.g broadcast [1] with [1, 1] 332 | if x_rank != y_rank: 333 | 334 | if x_rank < y_rank: 335 | x = export_tensor(x) 336 | x = tf.broadcast_to(x, y.shape) 337 | x = import_tensor(x) 338 | 339 | elif y_rank < x_rank: 340 | y = export_tensor(y) 341 | y = tf.broadcast_to(y, x.shape) 342 | y = import_tensor(y) 343 | 344 | return x, y 345 | 346 | # e.g broadcast [1, 1] with [1, 2] 347 | elif x_nb_el != y_nb_el: 348 | 349 | if x_nb_el < y_nb_el: 350 | x = export_tensor(x) 351 | x = tf.broadcast_to(x, y.shape) 352 | x = import_tensor(x) 353 | 354 | elif x_nb_el > y_nb_el: 355 | y = export_tensor(y) 356 | y = tf.broadcast_to(y, x.shape) 357 | y = import_tensor(y) 358 | 359 | return x, y 360 | 361 | return x, y 362 | -------------------------------------------------------------------------------- /tf_big/python/tensor_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | from absl.testing import parameterized 6 | 7 | from tf_big.python.tensor import export_limbs_tensor 8 | from tf_big.python.tensor import export_tensor 9 | from tf_big.python.tensor import import_limbs_tensor 10 | from tf_big.python.tensor import import_tensor 11 | from tf_big.python.tensor import pow 12 | from tf_big.python.tensor import random_rsa_modulus 13 | from tf_big.python.tensor import random_uniform 14 | from tf_big.python.test import tf_execution_context 15 | 16 | 17 | class EvaluationTest(parameterized.TestCase): 18 | @parameterized.parameters( 19 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 20 | ) 21 | def test_eval(self, run_eagerly): 22 | x_raw = np.array([[123456789123456789123456789, 123456789123456789123456789]]) 23 | 24 | context = tf_execution_context(run_eagerly) 25 | with context.scope(): 26 | x = import_tensor(x_raw) 27 | assert x.shape == x_raw.shape 28 | x = export_tensor(x) 29 | assert x.shape == x_raw.shape 30 | 31 | np.testing.assert_array_equal( 32 | context.evaluate(x).astype(str), x_raw.astype(str) 33 | ) 34 | 35 | 36 | class RandomTest(parameterized.TestCase): 37 | @parameterized.parameters( 38 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 39 | ) 40 | def test_uniform_random(self, run_eagerly): 41 | shape = (2, 2) 42 | maxval = 2 ** 100 43 | 44 | context = tf_execution_context(run_eagerly) 45 | with context.scope(): 46 | x = random_uniform(shape=shape, maxval=maxval) 47 | x = export_tensor(x) 48 | 49 | assert x.shape == shape 50 | assert context.evaluate(x).shape == shape 51 | 52 | @parameterized.parameters( 53 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 54 | ) 55 | def test_random_rsa_modulus(self, run_eagerly): 56 | bitlength = 128 57 | expected_shape = (1, 1) 58 | 59 | context = tf_execution_context(run_eagerly) 60 | with context.scope(): 61 | p, q, n = random_rsa_modulus(bitlength=bitlength) 62 | 63 | p = export_tensor(p) 64 | q = export_tensor(q) 65 | n = export_tensor(n) 66 | 67 | assert p.shape == expected_shape 68 | assert q.shape == expected_shape 69 | assert n.shape == expected_shape 70 | 71 | assert isinstance(context.evaluate(p)[0][0], bytes) 72 | assert isinstance(context.evaluate(q)[0][0], bytes) 73 | assert isinstance(context.evaluate(n)[0][0], bytes) 74 | 75 | 76 | class ArithmeticTest(parameterized.TestCase): 77 | @parameterized.parameters( 78 | { 79 | "run_eagerly": run_eagerly, 80 | "op_name": op_name, 81 | "op": op, 82 | "x_raw": x_raw, 83 | "y_raw": y_raw, 84 | } 85 | for run_eagerly in (True, False) 86 | for op_name, op in ( 87 | ("add", lambda x, y: x + y), 88 | ("sub", lambda x, y: x - y), 89 | ("mul", lambda x, y: x * y), 90 | ) 91 | for x_raw in ( 92 | np.array([[123456789123456789687293389]]), 93 | np.array([[123456789123456789687293389, 123456789123456789687293432]]), 94 | ) 95 | for y_raw in ( 96 | np.array([[123456789123456789687293389, 123456789123456789687293432]]), 97 | np.array([[123456789123456789687293389]]), 98 | ) 99 | ) 100 | def test_op(self, run_eagerly, op_name, op, x_raw, y_raw): 101 | z_raw = op(x_raw, y_raw) 102 | 103 | context = tf_execution_context(run_eagerly) 104 | with context.scope(): 105 | 106 | x = import_tensor(x_raw) 107 | y = import_tensor(y_raw) 108 | z = op(x, y) 109 | 110 | z = export_tensor(z) 111 | 112 | np.testing.assert_array_equal( 113 | context.evaluate(z).astype(str), z_raw.astype(str) 114 | ) 115 | 116 | @parameterized.parameters( 117 | {"run_eagerly": run_eagerly, "x_raw": x_raw, "y_raw": y_raw} 118 | for run_eagerly in (True, False) 119 | for x_raw in (np.array([[3]]), np.array([[3, 4]])) 120 | for y_raw in (np.array([[4, 2]]), np.array([[2]])) 121 | ) 122 | def test_pow(self, run_eagerly, x_raw, y_raw): 123 | m_raw = np.array([[5]]) 124 | 125 | z_raw = np.mod(np.power(x_raw, y_raw), m_raw) 126 | 127 | context = tf_execution_context(run_eagerly) 128 | with context.scope(): 129 | 130 | x = import_tensor(x_raw) 131 | y = import_tensor(y_raw) 132 | m = import_tensor(m_raw) 133 | z = pow(x, y, m) 134 | 135 | z = export_tensor(z) 136 | 137 | np.testing.assert_array_equal( 138 | context.evaluate(z).astype(str), z_raw.astype(str) 139 | ) 140 | 141 | 142 | class NumberTheoryTest(parameterized.TestCase): 143 | @parameterized.parameters( 144 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 145 | ) 146 | def test_mod(self, run_eagerly): 147 | x_raw = np.array([[123456789123456789123456789, 123456789123456789123456789]]) 148 | n_raw = np.array([[10000]]) 149 | y_raw = x_raw % n_raw 150 | 151 | context = tf_execution_context(run_eagerly) 152 | with context.scope(): 153 | 154 | x = import_tensor(x_raw) 155 | n = import_tensor(n_raw) 156 | y = x % n 157 | y = export_tensor(y) 158 | 159 | np.testing.assert_array_equal( 160 | context.evaluate(y).astype(str), y_raw.astype(str) 161 | ) 162 | 163 | @parameterized.parameters( 164 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 165 | ) 166 | def test_inv(self, run_eagerly): 167 | def egcd(a, b): 168 | if a == 0: 169 | return (b, 0, 1) 170 | g, y, x = egcd(b % a, a) 171 | return (g, x - (b // a) * y, y) 172 | 173 | def inv(a, m): 174 | g, b, _ = egcd(a, m) 175 | return b % m 176 | 177 | x_raw = np.array([[123456789123456789123456789]]) 178 | n_raw = np.array([[10000000]]) 179 | y_raw = np.array([[inv(123456789123456789123456789, 10000000)]]) 180 | 181 | context = tf_execution_context(run_eagerly) 182 | with context.scope(): 183 | 184 | x = import_tensor(x_raw) 185 | n = import_tensor(n_raw) 186 | y = x.inv(n) 187 | y = export_tensor(y) 188 | 189 | np.testing.assert_array_equal( 190 | context.evaluate(y).astype(str), y_raw.astype(str) 191 | ) 192 | 193 | 194 | class ConvertTest(parameterized.TestCase): 195 | @parameterized.parameters( 196 | { 197 | "x": x, 198 | "tf_cast": tf_cast, 199 | "np_cast": np_cast, 200 | "expected": expected, 201 | "run_eagerly": run_eagerly, 202 | "convert_to_tf_tensor": convert_to_tf_tensor, 203 | } 204 | for x, tf_cast, np_cast, expected in ( 205 | ( 206 | np.array([[1, 2, 3, 4]]).astype(np.int32), 207 | tf.int32, 208 | None, 209 | np.array([[1, 2, 3, 4]]).astype(np.int32), 210 | ), 211 | ( 212 | np.array([[1, 2, 3, 4]]).astype(np.int64), 213 | tf.int32, 214 | None, 215 | np.array([[1, 2, 3, 4]]).astype(np.int32), 216 | ), 217 | ( 218 | np.array( 219 | [["123456789123456789123456789", "123456789123456789123456789"]] 220 | ), 221 | tf.string, 222 | str, 223 | np.array( 224 | [["123456789123456789123456789", "123456789123456789123456789"]] 225 | ).astype(str), 226 | ), 227 | ( 228 | np.array( 229 | [[b"123456789123456789123456789", b"123456789123456789123456789"]] 230 | ), 231 | tf.string, 232 | str, 233 | np.array( 234 | [[b"123456789123456789123456789", b"123456789123456789123456789"]] 235 | ).astype(str), 236 | ), 237 | ) 238 | for run_eagerly in (True, False) 239 | for convert_to_tf_tensor in (True, False) 240 | ) 241 | def test_foo( 242 | self, x, tf_cast, np_cast, expected, convert_to_tf_tensor, run_eagerly, 243 | ): 244 | 245 | context = tf_execution_context(run_eagerly) 246 | with context.scope(): 247 | 248 | y = tf.convert_to_tensor(x) if convert_to_tf_tensor else x 249 | y = import_tensor(y) 250 | z = export_tensor(y, dtype=tf_cast) 251 | 252 | actual = context.evaluate(z) 253 | actual = actual.astype(np_cast) if np_cast else actual 254 | assert ( 255 | actual.dtype == expected.dtype 256 | ), "'{}' did not match expected '{}'".format(actual.dtype, expected.dtype) 257 | np.testing.assert_array_equal(actual, expected) 258 | 259 | @parameterized.parameters( 260 | {"run_eagerly": run_eagerly} for run_eagerly in (True, False) 261 | ) 262 | def test_is_tensor(self, run_eagerly): 263 | context = tf_execution_context(run_eagerly) 264 | 265 | with context.scope(): 266 | x = import_tensor(np.array([[10, 20]])) 267 | 268 | assert tf.is_tensor(x) 269 | 270 | def test_register_tensor_conversion_function(self): 271 | context = tf_execution_context(False) 272 | 273 | with context.scope(): 274 | x = import_tensor(np.array([[10, 20]])) 275 | y = tf.convert_to_tensor(np.array([[30, 40]])) 276 | z = x + y 277 | 278 | np.testing.assert_array_equal(context.evaluate(z), np.array([["40", "60"]])) 279 | 280 | def test_convert_to_tensor(self): 281 | context = tf_execution_context(False) 282 | 283 | with context.scope(): 284 | x = import_tensor(np.array([[10, 20]])) 285 | y = tf.convert_to_tensor(x) 286 | 287 | assert y.dtype is tf.string 288 | 289 | @parameterized.parameters( 290 | { 291 | "run_eagerly": run_eagerly, 292 | "x_np": x_np, 293 | "tf_type": tf_type, 294 | "max_bitlen": max_bitlen, 295 | "tf_shape": tf_shape, 296 | } 297 | for run_eagerly in (True, False) 298 | for x_np, tf_type, max_bitlen, tf_shape in [ 299 | (np.array([[10, 20]]), tf.int32, None, 2), 300 | (np.array([[10, 20]]), tf.int32, 16, 2), 301 | (np.array([[10, 20]]), tf.uint8, None, 5), 302 | (np.array([[10, 20]]), tf.uint8, 16, 6), 303 | ] 304 | ) 305 | def test_limb_conversion(self, run_eagerly, x_np, tf_type, max_bitlen, tf_shape): 306 | context = tf_execution_context(run_eagerly) 307 | 308 | with context.scope(): 309 | x = import_tensor(x_np) 310 | assert x.shape.as_list() == [1, 2], x.shape 311 | x_limbs = export_limbs_tensor(x, dtype=tf_type, max_bitlen=max_bitlen) 312 | assert x_limbs.shape.as_list() == x.shape.as_list() + ( 313 | [tf_shape] if run_eagerly else [None] 314 | ), x_limbs.shape 315 | x_norm = import_limbs_tensor(x_limbs) 316 | assert x_norm.shape.as_list() == x.shape.as_list(), x_norm.shape 317 | 318 | y = import_tensor(np.array([[30, 40]])) 319 | assert y.shape.as_list() == [1, 2], y.shape 320 | y_limbs = export_limbs_tensor(y, dtype=tf_type, max_bitlen=max_bitlen) 321 | assert y_limbs.shape.as_list() == y.shape.as_list() + ( 322 | [tf_shape] if run_eagerly else [None] 323 | ), y_limbs.shape 324 | y_norm = import_limbs_tensor(y_limbs) 325 | assert y_norm.shape.as_list() == y.shape.as_list(), y_norm.shape 326 | 327 | z = x_norm + y_norm 328 | res = export_tensor(z) 329 | 330 | np.testing.assert_array_equal( 331 | context.evaluate(res).astype(str), np.array([["40", "60"]]) 332 | ) 333 | 334 | 335 | if __name__ == "__main__": 336 | unittest.main() 337 | -------------------------------------------------------------------------------- /tf_big/python/test/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | 4 | py_library( 5 | name = "test_py", 6 | srcs = ([ 7 | "__init__.py", 8 | "execution_context.py", 9 | ]), 10 | ) 11 | 12 | py_test( 13 | name = "execution_context_py_test", 14 | srcs = [ 15 | "execution_context_test.py", 16 | ], 17 | main = "execution_context_test.py", 18 | deps = [ 19 | ":test_py", 20 | ], 21 | ) 22 | -------------------------------------------------------------------------------- /tf_big/python/test/__init__.py: -------------------------------------------------------------------------------- 1 | from .execution_context import tf_execution_context 2 | 3 | __all__ = [ 4 | "tf_execution_context", 5 | ] 6 | -------------------------------------------------------------------------------- /tf_big/python/test/execution_context.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | 3 | import tensorflow as tf 4 | 5 | 6 | class EagerExecutionContext: 7 | def scope(self): 8 | return contextlib.suppress() 9 | 10 | def evaluate(self, value): 11 | return value.numpy() 12 | 13 | 14 | class GraphExecutionContext: 15 | def __init__(self): 16 | self._graph = None 17 | self._session = None 18 | 19 | @property 20 | def graph(self): 21 | if self._graph is None: 22 | self._graph = tf.Graph() 23 | return self._graph 24 | 25 | @property 26 | def session(self): 27 | if self._session is None: 28 | with self._graph.as_default(): 29 | self._session = tf.compat.v1.Session() 30 | return self._session 31 | 32 | def scope(self): 33 | return self.graph.as_default() 34 | 35 | def evaluate(self, value): 36 | return self.session.run(value) 37 | 38 | 39 | def tf_execution_context(run_eagerly): 40 | if run_eagerly: 41 | return EagerExecutionContext() 42 | return GraphExecutionContext() 43 | -------------------------------------------------------------------------------- /tf_big/python/test/execution_context_test.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-docstring 2 | import unittest 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | from absl.testing import parameterized 7 | 8 | from tf_big.python.test import tf_execution_context 9 | 10 | 11 | class TestExecutionContext(parameterized.TestCase): 12 | @parameterized.parameters({"run_eagerly": True}, {"run_eagerly": False}) 13 | def test_tf_execution_mode(self, run_eagerly): 14 | context = tf_execution_context(run_eagerly) 15 | with context.scope(): 16 | x = tf.fill(dims=(2, 2), value=5.0) 17 | assert tf.executing_eagerly() == run_eagerly 18 | 19 | assert isinstance(x, tf.Tensor) 20 | actual_result = context.evaluate(x) 21 | assert isinstance(actual_result, np.ndarray) 22 | 23 | expected_result = np.array([[5.0, 5.0], [5.0, 5.0]]) 24 | np.testing.assert_equal(actual_result, expected_result) 25 | 26 | 27 | if __name__ == "__main__": 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /third_party/gmp/libgmp.BUILD: -------------------------------------------------------------------------------- 1 | cc_library( 2 | name = "lib", 3 | srcs = select({ 4 | "@bazel_tools//src/conditions:darwin": [ 5 | "lib/libgmp.a", 6 | "lib/libgmpxx.a", 7 | ], 8 | "//conditions:default": [ 9 | "lib/libgmpxx.a", 10 | "lib/libgmp.a", 11 | ] 12 | }), 13 | hdrs = ["include/gmp.h", "include/gmpxx.h"], 14 | visibility = ["//visibility:public"], 15 | strip_include_prefix = "include" 16 | ) 17 | -------------------------------------------------------------------------------- /third_party/tf/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tf-encrypted/tf-big/386184eb5aaa3db82781f78883339340de93b6f1/third_party/tf/BUILD -------------------------------------------------------------------------------- /third_party/tf/BUILD.tpl: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | cc_library( 4 | name = "tf_header_lib", 5 | hdrs = [":tf_header_include"], 6 | includes = ["include"], 7 | visibility = ["//visibility:public"], 8 | ) 9 | 10 | cc_library( 11 | name = "libtensorflow_framework", 12 | srcs = [":libtensorflow_framework.so"], 13 | #data = ["lib/libtensorflow_framework.so"], 14 | visibility = ["//visibility:public"], 15 | ) 16 | 17 | %{TF_HEADER_GENRULE} 18 | %{TF_SHARED_LIBRARY_GENRULE} -------------------------------------------------------------------------------- /third_party/tf/tf_configure.bzl: -------------------------------------------------------------------------------- 1 | """Setup TensorFlow as external dependency""" 2 | 3 | _TF_HEADER_DIR = "TF_HEADER_DIR" 4 | _TF_SHARED_LIBRARY_DIR = "TF_SHARED_LIBRARY_DIR" 5 | _TF_SHARED_LIBRARY_NAME = "TF_SHARED_LIBRARY_NAME" 6 | 7 | def _tpl(repository_ctx, tpl, substitutions = {}, out = None): 8 | if not out: 9 | out = tpl 10 | repository_ctx.template( 11 | out, 12 | Label("//third_party/tf:%s.tpl" % tpl), 13 | substitutions, 14 | ) 15 | 16 | def _fail(msg): 17 | """Output failure message when auto configuration fails.""" 18 | red = "\033[0;31m" 19 | no_color = "\033[0m" 20 | fail("%sPython Configuration Error:%s %s\n" % (red, no_color, msg)) 21 | 22 | def _is_windows(repository_ctx): 23 | """Returns true if the host operating system is windows.""" 24 | os_name = repository_ctx.os.name.lower() 25 | if os_name.find("windows") != -1: 26 | return True 27 | return False 28 | 29 | def _execute( 30 | repository_ctx, 31 | cmdline, 32 | error_msg = None, 33 | error_details = None, 34 | empty_stdout_fine = False): 35 | """Executes an arbitrary shell command. 36 | Args: 37 | repository_ctx: the repository_ctx object 38 | cmdline: list of strings, the command to execute 39 | error_msg: string, a summary of the error if the command fails 40 | error_details: string, details about the error or steps to fix it 41 | empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise 42 | it's an error 43 | Return: 44 | the result of repository_ctx.execute(cmdline) 45 | """ 46 | result = repository_ctx.execute(cmdline) 47 | if result.stderr or not (empty_stdout_fine or result.stdout): 48 | _fail("\n".join([ 49 | error_msg.strip() if error_msg else "Repository command failed", 50 | result.stderr.strip(), 51 | error_details if error_details else "", 52 | ])) 53 | return result 54 | 55 | def _read_dir(repository_ctx, src_dir): 56 | """Returns a string with all files in a directory. 57 | Finds all files inside a directory, traversing subfolders and following 58 | symlinks. The returned string contains the full path of all files 59 | separated by line breaks. 60 | """ 61 | if _is_windows(repository_ctx): 62 | src_dir = src_dir.replace("/", "\\") 63 | find_result = _execute( 64 | repository_ctx, 65 | ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"], 66 | empty_stdout_fine = True, 67 | ) 68 | 69 | # src_files will be used in genrule.outs where the paths must 70 | # use forward slashes. 71 | result = find_result.stdout.replace("\\", "/") 72 | else: 73 | find_result = _execute( 74 | repository_ctx, 75 | ["find", src_dir, "-follow", "-type", "f"], 76 | empty_stdout_fine = True, 77 | ) 78 | result = find_result.stdout 79 | return result 80 | 81 | def _genrule(genrule_name, command, outs): 82 | """Returns a string with a genrule. 83 | 84 | Genrule executes the given command and produces the given outputs. 85 | 86 | Args: 87 | genrule_name: A unique name for genrule target. 88 | command: The command to run. 89 | outs: A list of files generated by this rule. 90 | 91 | Returns: 92 | A genrule target. 93 | """ 94 | return ( 95 | "genrule(\n" + 96 | ' name = "' + 97 | genrule_name + '",\n' + 98 | " outs = [\n" + 99 | outs + 100 | "\n ],\n" + 101 | ' cmd = """\n' + 102 | command + 103 | '\n """,\n' + 104 | ")\n" 105 | ) 106 | 107 | def _norm_path(path): 108 | """Returns a path with '/' and remove the trailing slash.""" 109 | path = path.replace("\\", "/") 110 | if path[-1] == "/": 111 | path = path[:-1] 112 | return path 113 | 114 | def _symlink_genrule_for_dir( 115 | repository_ctx, 116 | src_dir, 117 | dest_dir, 118 | genrule_name, 119 | src_files = [], 120 | dest_files = [], 121 | tf_pip_dir_rename_pair = []): 122 | """Returns a genrule to symlink(or copy if on Windows) a set of files. 123 | 124 | If src_dir is passed, files will be read from the given directory; otherwise 125 | we assume files are in src_files and dest_files. 126 | 127 | Args: 128 | repository_ctx: the repository_ctx object. 129 | src_dir: source directory. 130 | dest_dir: directory to create symlink in. 131 | genrule_name: genrule name. 132 | src_files: list of source files instead of src_dir. 133 | dest_files: list of corresonding destination files. 134 | tf_pip_dir_rename_pair: list of the pair of tf pip parent directory to 135 | replace. For example, in TF pip package, the source code is under 136 | "tensorflow_core", and we might want to replace it with 137 | "tensorflow" to match the header includes. 138 | 139 | Returns: 140 | genrule target that creates the symlinks. 141 | """ 142 | # Check that tf_pip_dir_rename_pair has the right length 143 | tf_pip_dir_rename_pair_len = len(tf_pip_dir_rename_pair) 144 | if tf_pip_dir_rename_pair_len != 0 and tf_pip_dir_rename_pair_len !=2: 145 | _fail("The size of argument tf_pip_dir_rename_pair should be either 0 or 2, but %d is given." % tf_pip_dir_rename_pair_len) 146 | 147 | if src_dir != None: 148 | src_dir = _norm_path(src_dir) 149 | dest_dir = _norm_path(dest_dir) 150 | files = "\n".join(sorted(_read_dir(repository_ctx, src_dir).splitlines())) 151 | 152 | # Create a list with the src_dir stripped to use for outputs. 153 | if tf_pip_dir_rename_pair_len: 154 | dest_files = files.replace(src_dir, "").replace(tf_pip_dir_rename_pair[0], tf_pip_dir_rename_pair[1]).splitlines() 155 | else: 156 | dest_files = files.replace(src_dir, "").splitlines() 157 | src_files = files.splitlines() 158 | command = [] 159 | outs = [] 160 | 161 | for i in range(len(dest_files)): 162 | if dest_files[i] != "": 163 | # If we have only one file to link we do not want to use the dest_dir, as 164 | # $(@D) will include the full path to the file. 165 | dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i] 166 | 167 | # Copy the headers to create a sandboxable setup. 168 | cmd = "cp -f" 169 | command.append(cmd + ' "%s" "%s"' % (src_files[i], dest)) 170 | outs.append(' "' + dest_dir + dest_files[i] + '",') 171 | dest_dir = "abc" 172 | genrule = _genrule( 173 | genrule_name, 174 | " && ".join(command), 175 | "\n".join(outs), 176 | ) 177 | return genrule 178 | 179 | def _tf_pip_impl(repository_ctx): 180 | tf_header_dir = repository_ctx.os.environ[_TF_HEADER_DIR] 181 | tf_header_rule = _symlink_genrule_for_dir( 182 | repository_ctx, 183 | tf_header_dir, 184 | "include", 185 | "tf_header_include", 186 | tf_pip_dir_rename_pair = ["tensorflow_core", "tensorflow"] 187 | ) 188 | 189 | tf_shared_library_dir = repository_ctx.os.environ[_TF_SHARED_LIBRARY_DIR] 190 | tf_shared_library_name = repository_ctx.os.environ[_TF_SHARED_LIBRARY_NAME] 191 | tf_shared_library_path = "%s/%s" % (tf_shared_library_dir, tf_shared_library_name) 192 | 193 | tf_shared_library_rule = _symlink_genrule_for_dir( 194 | repository_ctx, 195 | None, 196 | "", 197 | "libtensorflow_framework.so", 198 | [tf_shared_library_path], 199 | ["_pywrap_tensorflow_internal.lib" if _is_windows(repository_ctx) else "libtensorflow_framework.so"], 200 | ) 201 | 202 | _tpl(repository_ctx, "BUILD", { 203 | "%{TF_HEADER_GENRULE}": tf_header_rule, 204 | "%{TF_SHARED_LIBRARY_GENRULE}": tf_shared_library_rule, 205 | }) 206 | 207 | tf_configure = repository_rule( 208 | implementation = _tf_pip_impl, 209 | environ = [ 210 | _TF_HEADER_DIR, 211 | _TF_SHARED_LIBRARY_DIR, 212 | _TF_SHARED_LIBRARY_NAME, 213 | ], 214 | ) 215 | --------------------------------------------------------------------------------