├── runtime ├── test │ ├── ttmetal │ │ └── CMakeLists.txt │ ├── ttnn │ │ └── CMakeLists.txt │ └── common │ │ ├── CMakeLists.txt │ │ └── test_generate_sys_desc.cpp ├── tools │ ├── CMakeLists.txt │ └── python │ │ ├── requirements.txt │ │ ├── ttrt │ │ ├── common │ │ │ └── __init__.py │ │ ├── binary │ │ │ └── __init__.py │ │ └── runtime │ │ │ └── __init__.py │ │ └── test │ │ └── conftest.py ├── lib │ ├── ttnn │ │ ├── operations │ │ │ ├── conv │ │ │ │ └── conv2d.h │ │ │ ├── ccl │ │ │ │ └── all_gather.h │ │ │ ├── pool │ │ │ │ └── maxpool2d.h │ │ │ ├── layout │ │ │ │ ├── typecast.h │ │ │ │ ├── to_device.h │ │ │ │ ├── to_layout.h │ │ │ │ ├── from_device.h │ │ │ │ └── to_memory_config.h │ │ │ ├── creation │ │ │ │ ├── full.h │ │ │ │ ├── empty.h │ │ │ │ └── arange.h │ │ │ ├── context │ │ │ │ └── get_device.h │ │ │ ├── deletion │ │ │ │ └── deallocate.h │ │ │ ├── embedding │ │ │ │ └── embedding.h │ │ │ ├── reduction │ │ │ │ └── reduction.h │ │ │ ├── data_movement │ │ │ │ ├── slice.h │ │ │ │ ├── concat.h │ │ │ │ ├── reshape.h │ │ │ │ ├── transpose.h │ │ │ │ └── reshape.cpp │ │ │ ├── normalization │ │ │ │ └── softmax.h │ │ │ ├── matmul │ │ │ │ └── matmul.h │ │ │ ├── eltwise │ │ │ │ ├── unary │ │ │ │ │ └── unary.h │ │ │ │ ├── binary │ │ │ │ │ └── binary.h │ │ │ │ └── ternary │ │ │ │ │ └── ternary.h │ │ │ └── include │ │ │ │ └── tt │ │ │ │ └── runtime │ │ │ │ └── ttnn │ │ │ │ └── operations │ │ │ │ └── eltwise │ │ │ │ ├── unary │ │ │ │ ├── utils.cpp │ │ │ │ └── utils.h │ │ │ │ └── binary │ │ │ │ └── utils.h │ │ └── CMakeLists.txt │ ├── ttmetal │ │ └── CMakeLists.txt │ └── common │ │ ├── workarounds.cpp │ │ └── debug.cpp └── CMakeLists.txt ├── tools ├── explorer │ ├── .gitignore │ ├── tt_adapter │ │ ├── pyproject.toml │ │ ├── src │ │ │ └── tt_adapter │ │ │ │ └── utils.py │ │ └── README.md │ ├── run.py │ └── README.md ├── CMakeLists.txt ├── ttnn-standalone │ ├── ttnn-dylib.hpp │ └── run ├── scripts │ ├── filter-clang-tidy-fixes.py │ └── filter-compile-commands.py ├── ttmlir-opt │ ├── CMakeLists.txt │ └── ttmlir-opt.cpp └── ttmlir-translate │ └── CMakeLists.txt ├── lib ├── OpModel │ └── CMakeLists.txt ├── Dialect │ ├── TT │ │ ├── CMakeLists.txt │ │ └── IR │ │ │ ├── CMakeLists.txt │ │ │ └── TTOps.cpp │ ├── TTKernel │ │ ├── CMakeLists.txt │ │ └── IR │ │ │ └── CMakeLists.txt │ ├── TTMetal │ │ ├── CMakeLists.txt │ │ ├── IR │ │ │ ├── CMakeLists.txt │ │ │ └── TTMetalOpsTypes.cpp │ │ └── Pipelines │ │ │ └── CMakeLists.txt │ ├── TTIR │ │ ├── CMakeLists.txt │ │ ├── Pipelines │ │ │ └── CMakeLists.txt │ │ ├── IR │ │ │ └── CMakeLists.txt │ │ └── Transforms │ │ │ ├── CMakeLists.txt │ │ │ └── Transforms.cpp │ ├── CMakeLists.txt │ └── TTNN │ │ ├── CMakeLists.txt │ │ ├── Utils │ │ └── CMakeLists.txt │ │ ├── Pipelines │ │ └── CMakeLists.txt │ │ ├── Transforms │ │ └── CMakeLists.txt │ │ ├── Analysis │ │ ├── CMakeLists.txt │ │ └── OpConfigAnalysis.cpp │ │ └── IR │ │ ├── CMakeLists.txt │ │ └── TTNNOpsTypes.cpp ├── Target │ ├── CMakeLists.txt │ ├── TTMetal │ │ └── CMakeLists.txt │ └── TTNN │ │ └── CMakeLists.txt ├── SharedLib │ └── empty.cpp ├── Scheduler │ └── CMakeLists.txt ├── Conversion │ ├── TTIRToTTNN │ │ └── CMakeLists.txt │ ├── TTIRToTTMetal │ │ └── CMakeLists.txt │ ├── TosaToTTIR │ │ └── CMakeLists.txt │ ├── TTIRToTTIRDecomposition │ │ └── CMakeLists.txt │ ├── TTKernelToEmitC │ │ └── CMakeLists.txt │ ├── TTNNToEmitC │ │ └── CMakeLists.txt │ ├── StableHLOToTTIR │ │ └── CMakeLists.txt │ └── CMakeLists.txt └── CAPI │ ├── CMakeLists.txt │ ├── TTKernelTypes.cpp │ └── Dialects.cpp ├── include ├── ttmlir │ ├── Dialect │ │ ├── TT │ │ │ ├── CMakeLists.txt │ │ │ └── IR │ │ │ │ ├── TTOps.td │ │ │ │ ├── TT.h │ │ │ │ ├── TTOps.h │ │ │ │ └── CMakeLists.txt │ │ ├── TTKernel │ │ │ ├── CMakeLists.txt │ │ │ └── IR │ │ │ │ ├── TTKernel.h │ │ │ │ ├── TTKernelOpsTypes.h │ │ │ │ └── TTKernelOps.h │ │ ├── TTIR │ │ │ ├── CMakeLists.txt │ │ │ ├── Transforms │ │ │ │ ├── CMakeLists.txt │ │ │ │ └── Passes.h │ │ │ └── IR │ │ │ │ ├── TTIR.h │ │ │ │ ├── TTIROpsInterfaces.h │ │ │ │ └── TTIROpsEnums.td │ │ ├── TTMetal │ │ │ ├── CMakeLists.txt │ │ │ ├── Transforms │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── Passes.td │ │ │ │ └── Passes.h │ │ │ └── IR │ │ │ │ ├── TTMetalOpsEnums.td │ │ │ │ ├── TTMetal.h │ │ │ │ └── TTMetalOpsTypes.h │ │ ├── TTNN │ │ │ ├── CMakeLists.txt │ │ │ ├── Transforms │ │ │ │ ├── CMakeLists.txt │ │ │ │ └── TTNNToCpp.h │ │ │ ├── IR │ │ │ │ ├── TTNN.h │ │ │ │ └── TTNNOpsTypes.h │ │ │ └── Types │ │ │ │ └── Types.h │ │ └── CMakeLists.txt │ ├── Target │ │ ├── Common │ │ │ ├── version.fbs │ │ │ ├── CMakeLists.txt │ │ │ ├── system_desc.fbs │ │ │ ├── debug_info.fbs │ │ │ └── Target.h │ │ ├── CMakeLists.txt │ │ ├── TTNN │ │ │ ├── CMakeLists.txt │ │ │ ├── binary.fbs │ │ │ └── Target.h │ │ └── TTMetal │ │ │ ├── CMakeLists.txt │ │ │ ├── Target.h │ │ │ ├── binary.fbs │ │ │ └── TTMetalToFlatbuffer.h │ ├── CMakeLists.txt │ ├── RegisterAll.h │ ├── Conversion │ │ ├── CMakeLists.txt │ │ ├── ArithToStableHLO │ │ │ └── ArithToStableHLO.h │ │ ├── TTIRToTTNN │ │ │ └── TTIRToTTNN.h │ │ ├── TTNNToEmitC │ │ │ └── TTNNToEmitC.h │ │ ├── TosaToTTIR │ │ │ └── TosaToTTIR.h │ │ ├── TTIRToTTMetal │ │ │ └── TTIRToTTMetal.h │ │ ├── StableHLOToTTIR │ │ │ └── StableHLOToTTIR.h │ │ └── TTIRToTTIRDecomposition │ │ │ └── TTIRToTTIRDecomposition.h │ └── OpModel │ │ └── TTNN │ │ └── TTNNOpModel.h ├── CMakeLists.txt └── ttmlir-c │ ├── Dialects.h │ ├── TTKernelTypes.h │ └── TTTypes.h ├── .clang-format ├── test ├── python │ ├── requirements.txt │ └── lit.local.cfg ├── unittests │ ├── Optimizer │ │ └── CMakeLists.txt │ ├── TestScheduler │ │ └── CMakeLists.txt │ └── CMakeLists.txt ├── ttmlir │ ├── Conversion │ │ ├── StableHLOToTTIR │ │ │ ├── test_stablehlo_dialects.mlir │ │ │ ├── scalar_add_op.mlir │ │ │ ├── binary │ │ │ │ ├── add_op.mlir │ │ │ │ ├── maximum_op.mlir │ │ │ │ ├── divide_op.mlir │ │ │ │ ├── multiply_op.mlir │ │ │ │ ├── subtract_op.mlir │ │ │ │ ├── minimum_op.mlir │ │ │ │ └── remainder_op.mlir │ │ │ ├── iota_op.mlir │ │ │ ├── unary │ │ │ │ ├── permute_transpose_op.mlir │ │ │ │ ├── transpose_op.mlir │ │ │ │ ├── cbrt_op.mlir │ │ │ │ ├── absolute_op.mlir │ │ │ │ ├── sqrt_op.mlir │ │ │ │ ├── negate_op.mlir │ │ │ │ ├── exponential_op.mlir │ │ │ │ ├── sine_op.mlir │ │ │ │ ├── ceil_op.mlir │ │ │ │ └── cosine_op.mlir │ │ │ ├── floor_op.mlir │ │ │ ├── rsqrt_op.mlir │ │ │ ├── reduce_add_op.mlir │ │ │ ├── dot_general │ │ │ │ ├── dot_general_2d.mlir │ │ │ │ └── dot_general_3d.mlir │ │ │ ├── reduce_maximum_op.mlir │ │ │ ├── dynamic_iota_op.mlir │ │ │ ├── slice_op.mlir │ │ │ ├── test_remove_dead_values.mlir │ │ │ ├── isfinite_op.mlir │ │ │ ├── composite_op.mlir │ │ │ ├── sign_op.mlir │ │ │ ├── exponential_minus_one_op.mlir │ │ │ └── log_plus_one_op.mlir │ │ └── ArithToStableHLO │ │ │ └── constant_op.mlir │ ├── ttmlir-opt.mlir │ ├── Dialect │ │ ├── TTIR │ │ │ ├── tosa_to_ttir_multiply.mlir │ │ │ ├── Decomposition │ │ │ │ └── arange_decomposition.mlir │ │ │ ├── test_generic.mlir │ │ │ ├── constant_as_fill.mlir │ │ │ └── clamp │ │ │ │ └── clamp_tests_negative.mlir │ │ └── TTNN │ │ │ ├── simple_max.mlir │ │ │ ├── simple_mean.mlir │ │ │ ├── simple_sum.mlir │ │ │ ├── simple_squeeze.mlir │ │ │ ├── transpose │ │ │ ├── simple_transpose_8x8.mlir │ │ │ ├── simple_transpose.mlir │ │ │ ├── simple_transpose_8x16_reverse_dims.mlir │ │ │ └── simple_transpose_negative_dims.mlir │ │ │ ├── simple_get_dimension_size.mlir │ │ │ ├── simple_unsqueeze.mlir │ │ │ ├── simple_reshape.mlir │ │ │ ├── ccl │ │ │ ├── all_gather_negative.mlir │ │ │ └── all_gather.mlir │ │ │ ├── eltwise │ │ │ ├── unary │ │ │ │ ├── abs │ │ │ │ │ └── simple_abs.mlir │ │ │ │ ├── cos │ │ │ │ │ └── simple_cos.mlir │ │ │ │ ├── negate │ │ │ │ │ └── simple_neg.mlir │ │ │ │ ├── sin │ │ │ │ │ └── simple_sin.mlir │ │ │ │ ├── cbrt │ │ │ │ │ └── simple_cbrt.mlir │ │ │ │ ├── ceil │ │ │ │ │ └── simple_ceil.mlir │ │ │ │ ├── rsqrt │ │ │ │ │ └── simple_rsqrt.mlir │ │ │ │ ├── sqrt │ │ │ │ │ └── simple_sqrt.mlir │ │ │ │ ├── sigmoid │ │ │ │ │ └── simple_sigmoid.mlir │ │ │ │ ├── reciprocal │ │ │ │ │ └── simple_reciprocal.mlir │ │ │ │ ├── cast │ │ │ │ │ └── simple_cast.mlir │ │ │ │ ├── logical_not │ │ │ │ │ └── simple_not.mlir │ │ │ │ ├── gelu │ │ │ │ │ └── simple_gelu.mlir │ │ │ │ ├── floor │ │ │ │ │ └── simple_floor.mlir │ │ │ │ └── isfinite │ │ │ │ │ └── simple_isfinite.mlir │ │ │ ├── binary │ │ │ │ └── minimum │ │ │ │ │ └── simple_minimum.mlir │ │ │ └── operand_broadcasts_negative.mlir │ │ │ ├── remove_empty_op.mlir │ │ │ ├── simple_slice.mlir │ │ │ ├── concat │ │ │ ├── concat_dim_oob.mlir │ │ │ ├── concat_negative_dim_oob.mlir │ │ │ ├── simple_concat.mlir │ │ │ └── concat_negative_dim.mlir │ │ │ ├── embedding │ │ │ ├── embedding_1d_tensor.mlir │ │ │ ├── simple_embedding.mlir │ │ │ └── embedding_non_tile.mlir │ │ │ ├── softmax │ │ │ ├── softmax_negative_1.mlir │ │ │ └── softmax_negative_2.mlir │ │ │ ├── simple_div.mlir │ │ │ ├── simple_maximum.mlir │ │ │ ├── simple_multiply.mlir │ │ │ ├── simple_subtract.mlir │ │ │ ├── matmul │ │ │ └── simple_matmul.mlir │ │ │ ├── simple_subtract_to_add.mlir │ │ │ └── pooling │ │ │ └── simple_maxpool2d.mlir │ ├── Translate │ │ └── TTNN │ │ │ └── 1d_tensor.mlir │ └── Silicon │ │ ├── TTNN │ │ ├── simple_nop.mlir │ │ ├── emitc │ │ │ └── simple_add.mlir │ │ ├── perf_unit │ │ │ ├── test_perf_max.mlir │ │ │ ├── test_perf_sum.mlir │ │ │ ├── test_perf_neg.mlir │ │ │ ├── test_perf_typecast.mlir │ │ │ ├── test_perf_transpose.mlir │ │ │ ├── test_perf_slice.mlir │ │ │ ├── test_perf_ceil.mlir │ │ │ ├── test_perf_log.mlir │ │ │ ├── test_perf_relu.mlir │ │ │ ├── test_perf_sine.mlir │ │ │ ├── test_perf_sqrt.mlir │ │ │ ├── test_perf_cosine.mlir │ │ │ ├── test_perf_rsqrt.mlir │ │ │ ├── test_perf_sigmoid.mlir │ │ │ └── test_perf_reciprocal.mlir │ │ ├── simple_mean.mlir │ │ ├── simple_index.mlir │ │ ├── simple_typecast.mlir │ │ └── simple_slice.mlir │ │ └── StableHLO │ │ ├── Iota │ │ ├── simple_device_iota_dim2.mlir │ │ ├── simple_device_iota_dim3.mlir │ │ └── simple_device_dynamic_iota_dim2.mlir │ │ └── Unary │ │ ├── absolute_op.mlir │ │ ├── cbrt_op.mlir │ │ ├── ceil_op.mlir │ │ ├── negate_op.mlir │ │ ├── sign_op.mlir │ │ ├── sine_op.mlir │ │ ├── sqrt_op.mlir │ │ ├── floor_op.mlir │ │ ├── rsqrt_op.mlir │ │ ├── cosine_op.mlir │ │ ├── exponential_op.mlir │ │ └── logical_op.mlir └── lit.site.cfg.py.in ├── env ├── build-requirements.txt └── build_venv.sh ├── CODE_OF_CONDUCT.md ├── docs ├── book.toml └── src │ ├── doxygen.md │ ├── ttmlir-opt.md │ ├── flatbuffers.md │ └── tools.md ├── python ├── ttmlir │ ├── overrides.py │ ├── passes.py │ └── dialects │ │ ├── ttir.py │ │ ├── ttkernel.py │ │ ├── tt.py │ │ ├── ttnn.py │ │ ├── TTBinding.td │ │ ├── TTNNBinding.td │ │ ├── TTEnumBinding.td │ │ ├── TTNNEnumBinding.td │ │ ├── TTIRBinding.td │ │ └── TTKernelBinding.td ├── Overrides.cpp └── TTIRModule.cpp ├── cmake └── modules │ ├── ConfigureDoxygen.cmake │ ├── TTMLIRPythonSitePackages.cmake │ ├── LintTools.cmake │ ├── TTMLIRConfig.cmake.in │ └── FindMLIR.cmake ├── .gitignore ├── .github ├── workflows │ ├── spdx.yml │ ├── pre-commit.yml │ ├── on-push.yml │ └── on-pr.yml ├── actions │ └── install-deps │ │ └── dependencies.json ├── get-docker-tag.sh └── check-spdx.yaml └── .pre-commit-config.yaml /runtime/test/ttmetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/explorer/.gitignore: -------------------------------------------------------------------------------- 1 | model-explorer 2 | -------------------------------------------------------------------------------- /lib/OpModel/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(TTNN) 2 | -------------------------------------------------------------------------------- /lib/Dialect/TT/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | -------------------------------------------------------------------------------- /runtime/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(python) 2 | -------------------------------------------------------------------------------- /lib/Dialect/TTKernel/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TT/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTKernel/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: LLVM 2 | AlwaysBreakTemplateDeclarations: Yes 3 | -------------------------------------------------------------------------------- /lib/Target/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(TTMetal) 2 | add_subdirectory(TTNN) 3 | -------------------------------------------------------------------------------- /runtime/test/ttnn/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_runtime_gtest(subtract_test test_subtract.cpp) 2 | -------------------------------------------------------------------------------- /lib/Dialect/TTMetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | add_subdirectory(Pipelines) 3 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTIR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | add_subdirectory(Transforms) 3 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | add_subdirectory(Transforms) 3 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTNN/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | add_subdirectory(Transforms) 3 | -------------------------------------------------------------------------------- /runtime/test/common/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_runtime_gtest(sys_desc_sanity test_generate_sys_desc.cpp) 2 | -------------------------------------------------------------------------------- /runtime/tools/python/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.3.0 --index-url https://download.pytorch.org/whl/cpu 2 | -------------------------------------------------------------------------------- /test/python/requirements.txt: -------------------------------------------------------------------------------- 1 | lit 2 | pytest 3 | torch==2.3.0 --index-url https://download.pytorch.org/whl/cpu 4 | -------------------------------------------------------------------------------- /lib/Dialect/TTIR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | add_subdirectory(Pipelines) 3 | add_subdirectory(Transforms) 4 | -------------------------------------------------------------------------------- /tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(ttmlir-opt) 2 | add_subdirectory(ttmlir-translate) 3 | add_subdirectory(explorer) 4 | -------------------------------------------------------------------------------- /env/build-requirements.txt: -------------------------------------------------------------------------------- 1 | cmake 2 | ninja 3 | clang-format 4 | clang-tidy 5 | wheel 6 | setuptools 7 | black 8 | pre-commit 9 | -------------------------------------------------------------------------------- /lib/SharedLib/empty.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | -------------------------------------------------------------------------------- /test/python/lit.local.cfg: -------------------------------------------------------------------------------- 1 | config.suffixes.add(".py") 2 | 3 | if not config.enable_bindings_python: 4 | config.unsupported = True 5 | -------------------------------------------------------------------------------- /runtime/tools/python/ttrt/common/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | The tt-mlir Code of Conduct can be found at: 4 | -------------------------------------------------------------------------------- /docs/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Nicholas Smith"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | title = "tt-mlir documentation" 7 | -------------------------------------------------------------------------------- /include/ttmlir/Target/Common/version.fbs: -------------------------------------------------------------------------------- 1 | namespace tt.target; 2 | 3 | struct Version { 4 | major: uint16; 5 | minor: uint16; 6 | patch: uint32; 7 | } 8 | -------------------------------------------------------------------------------- /lib/Dialect/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(TT) 2 | add_subdirectory(TTIR) 3 | add_subdirectory(TTNN) 4 | add_subdirectory(TTMetal) 5 | add_subdirectory(TTKernel) 6 | -------------------------------------------------------------------------------- /include/ttmlir/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(Conversion) 2 | add_subdirectory(Dialect) 3 | add_subdirectory(Target) 4 | add_dependencies(mlir-headers FBS_GENERATION) 5 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(TT) 2 | add_subdirectory(TTIR) 3 | add_subdirectory(TTNN) 4 | add_subdirectory(TTMetal) 5 | add_subdirectory(TTKernel) 6 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(IR) 2 | add_subdirectory(Pipelines) 3 | add_subdirectory(Transforms) 4 | add_subdirectory(Utils) 5 | add_subdirectory(Analysis) 6 | -------------------------------------------------------------------------------- /python/ttmlir/overrides.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._mlir_libs._ttmlir import overrides 6 | -------------------------------------------------------------------------------- /python/ttmlir/passes.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._mlir_libs._ttmlir.passes import * 6 | -------------------------------------------------------------------------------- /include/ttmlir/Target/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(Common) 2 | add_subdirectory(TTMetal) 3 | add_subdirectory(TTNN) 4 | 5 | add_custom_target(FBS_GENERATION DEPENDS TTMETAL_FBS TTNN_FBS COMMON_FBS) 6 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTNN/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(BuildFlatbuffers) 2 | 3 | set(TTNN_FBS_GEN_SOURCES 4 | binary.fbs 5 | program.fbs 6 | ) 7 | 8 | build_flatbuffers("${TTNN_FBS_GEN_SOURCES}" TTNN_FBS) 9 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/Utils/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(TTMLIRTTNNUtils 2 | Utils.cpp 3 | OptimizerOverrides.cpp 4 | PassOverrides.cpp 5 | 6 | ADDITIONAL_HEADER_DIRS 7 | ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/TTNN 8 | ) 9 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/ttir.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._ttir_ops_gen import * 6 | from .._mlir_libs._ttmlir import register_dialect, ttir_ir as ir 7 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTMetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(BuildFlatbuffers) 2 | 3 | set(TTMETAL_FBS_GEN_SOURCES 4 | command.fbs 5 | program.fbs 6 | binary.fbs 7 | ) 8 | 9 | build_flatbuffers("${TTMETAL_FBS_GEN_SOURCES}" TTMETAL_FBS) 10 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTIR/Transforms/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(LLVM_TARGET_DEFINITIONS Passes.td) 2 | mlir_tablegen(Passes.h.inc --gen-pass-decls) 3 | add_public_tablegen_target(MLIRTTIRPassesIncGen) 4 | add_dependencies(mlir-headers MLIRTTIRPassesIncGen) 5 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTNN/Transforms/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(LLVM_TARGET_DEFINITIONS Passes.td) 2 | mlir_tablegen(Passes.h.inc --gen-pass-decls) 3 | add_public_tablegen_target(MLIRTTNNPassesIncGen) 4 | add_dependencies(mlir-headers MLIRTTNNPassesIncGen) 5 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/ttkernel.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._ttkernel_ops_gen import * 6 | from .._mlir_libs._ttmlir import register_dialect, ttkernel_ir as ir 7 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/Transforms/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(LLVM_TARGET_DEFINITIONS Passes.td) 2 | mlir_tablegen(Passes.h.inc --gen-pass-decls) 3 | add_public_tablegen_target(MLIRTTMetalPassesIncGen) 4 | add_dependencies(mlir-headers MLIRTTMetalPassesIncGen) 5 | -------------------------------------------------------------------------------- /lib/Scheduler/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(MLIRScheduler 2 | Scheduler.cpp 3 | 4 | ADDITIONAL_HEADER_DIRS 5 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Scheduler 6 | 7 | LINK_LIBS PUBLIC 8 | MLIRIR 9 | MLIRPass 10 | ) 11 | -------------------------------------------------------------------------------- /tools/ttnn-standalone/ttnn-dylib.hpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttnn-precompiled.hpp" 6 | 7 | std::vector forward(std::vector inputs); 8 | -------------------------------------------------------------------------------- /include/ttmlir/Target/Common/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(BuildFlatbuffers) 2 | 3 | set(COMMON_FBS_GEN_SOURCES 4 | system_desc.fbs 5 | types.fbs 6 | version.fbs 7 | debug_info.fbs 8 | ) 9 | 10 | build_flatbuffers("${COMMON_FBS_GEN_SOURCES}" COMMON_FBS) 11 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/tt.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._tt_ops_gen import * 6 | from ._tt_enum_gen import * 7 | from .._mlir_libs._ttmlir import tt_ir as ir, register_dialect 8 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/ttnn.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._ttnn_ops_gen import * 6 | from ._ttnn_enum_gen import * 7 | from .._mlir_libs._ttmlir import register_dialect, ttnn_ir as ir 8 | -------------------------------------------------------------------------------- /tools/explorer/tt_adapter/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "tt-adapter" 3 | version = "0.0.1" 4 | description = "Model Explorer Adapter built for TT-MLIR Compiled outputs." 5 | readme = "README.md" 6 | 7 | [tool.poetry] 8 | packages = [{ include = "src/tt_adapter" }] 9 | -------------------------------------------------------------------------------- /cmake/modules/ConfigureDoxygen.cmake: -------------------------------------------------------------------------------- 1 | set(SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/..) 2 | set(DOXYGEN_DIR ${CMAKE_CURRENT_BINARY_DIR}/book/doxygen) 3 | 4 | configure_file(${CMAKE_CURRENT_SOURCE_DIR}/doxygen.cfg.in 5 | ${CMAKE_CURRENT_BINARY_DIR}/book/doxygen/doxygen.cfg @ONLY) 6 | -------------------------------------------------------------------------------- /test/unittests/Optimizer/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_unittest(OptimizerTests 2 | TestShardSolver.cpp 3 | TestOptimizerOverrides.cpp 4 | ) 5 | 6 | target_link_libraries(OptimizerTests 7 | PRIVATE 8 | MLIR 9 | MLIRTTDialect 10 | MLIRTTNNPipelines 11 | ) 12 | -------------------------------------------------------------------------------- /lib/Dialect/TTIR/Pipelines/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTIRPipelines 2 | TTIRPipelines.cpp 3 | 4 | ADDITIONAL_HEADER_DIRS 5 | ${PROJECT_SOURCE_DIR}/include/ttmlir 6 | 7 | LINK_LIBS PUBLIC 8 | MLIRTTIRDialect 9 | MLIRPass 10 | MLIRTransforms 11 | ) 12 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/test_stablehlo_dialects.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --show-dialects | FileCheck %s 3 | // CHECK: Available Dialects: 4 | // CHECK: chlo 5 | // CHECK: quant 6 | // CHECK: sparse_tensor 7 | // CHECK: stablehlo 8 | // CHECK: vhlo 9 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TT/IR/TTOps.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TTMLIR_TTOPS_TD 6 | #define TTMLIR_TTMLIR_TTOPS_TD 7 | 8 | include "ttmlir/Dialect/TT/IR/TTOpsTypes.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/TTBinding.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef PYTHON_BINDINGS_TTMLIR_TTOPS 6 | #define PYTHON_BINDINGS_TTMLIR_TTOPS 7 | 8 | include "ttmlir/Dialect/TT/IR/TTOps.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /test/unittests/TestScheduler/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_unittest(SchedulerTests 2 | TestScheduler.cpp 3 | ) 4 | 5 | target_link_libraries(SchedulerTests 6 | PRIVATE 7 | MLIR 8 | MLIRTTDialect 9 | MLIRTTIRDialect 10 | MLIRTTNNPipelines 11 | MLIRScheduler 12 | ) 13 | -------------------------------------------------------------------------------- /include/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(ttmlir) 2 | 3 | install(DIRECTORY ttmlir ttmlir-c ${TTMLIR_BINARY_DIR}/include/ttmlir 4 | DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" 5 | COMPONENT SharedLib 6 | FILES_MATCHING 7 | PATTERN "*.h" 8 | PATTERN "*.inc" 9 | PATTERN "*.td" 10 | ) 11 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/TTNNBinding.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef PYTHON_BINDINGS_TTMLIR_TTNNOPS 6 | #define PYTHON_BINDINGS_TTMLIR_TTNNOPS 7 | 8 | include "ttmlir/Dialect/TTNN/IR/TTNNOps.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .local 2 | build 3 | third_party/tt-metal 4 | .DS_STORE 5 | .vscode/* 6 | .cache 7 | *pycache* 8 | *.egg-info 9 | ttrt-artifacts/* 10 | query_results.json 11 | run_results.json 12 | ttrt_report.xml 13 | cluster_descriptor.yaml 14 | 15 | # TTNN and TTMetal flatbuffers 16 | *.ttnn 17 | *.ttm 18 | -------------------------------------------------------------------------------- /lib/Dialect/TT/IR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTDialect 2 | TTOpsTypes.cpp 3 | TTDialect.cpp 4 | TTOps.cpp 5 | 6 | ADDITIONAL_HEADER_DIRS 7 | ${PROJECT_SOURCE_DIR}/include/ttmlir 8 | 9 | DEPENDS 10 | MLIRTTOpsIncGen 11 | ) 12 | -------------------------------------------------------------------------------- /lib/Dialect/TT/IR/TTOps.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Dialect/TT/IR/TTOps.h" 6 | #include "ttmlir/Dialect/TT/IR/TT.h" 7 | 8 | #define GET_OP_CLASSES 9 | #include "ttmlir/Dialect/TT/IR/TTOps.cpp.inc" 10 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/TTEnumBinding.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef PYTHON_BINDINGS_TTMLIR_TTENUMS 6 | #define PYTHON_BINDINGS_TTMLIR_TTENUMS 7 | 8 | include "ttmlir/Dialect/TT/IR/TTOpsEnums.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /docs/src/doxygen.md: -------------------------------------------------------------------------------- 1 | # Doxygen 2 | 3 | This is a link to a doxygen autogenerated code reference. 4 | [Doxygen](./doxygen/html/files.html) 5 | 6 | ## Build Instructions 7 | 8 | To build Doxygen use the doxygen target in CMake 9 | 10 | ```sh 11 | cmake -B build 12 | cmake --build build -- doxygen 13 | ``` 14 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/TTNNEnumBinding.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef PYTHON_BINDINGS_TTMLIR_TTNNENUMS 6 | #define PYTHON_BINDINGS_TTMLIR_TTNNENUMS 7 | 8 | include "ttmlir/Dialect/TTNN/IR/TTNNOpsEnums.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /runtime/test/common/test_generate_sys_desc.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | #include "tt/runtime/runtime.h" 5 | #include 6 | 7 | TEST(GenerateSysDesc, Sanity) { 8 | auto sysDesc = ::tt::runtime::getCurrentSystemDesc(); 9 | } 10 | -------------------------------------------------------------------------------- /test/unittests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_custom_target(MLIRUnitTests) 2 | set_target_properties(MLIRUnitTests PROPERTIES FOLDER "MLIR Tests") 3 | 4 | function(add_mlir_unittest test_dirname) 5 | add_unittest(MLIRUnitTests ${test_dirname} ${ARGN}) 6 | endfunction() 7 | 8 | add_subdirectory(TestScheduler) 9 | add_subdirectory(Optimizer) 10 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/Transforms/Passes.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TTMLIR_DIALECT_TTMETAL_TTMETALPASSES_TD 6 | #define TTMLIR_TTMLIR_DIALECT_TTMETAL_TTMETALPASSES_TD 7 | 8 | include "mlir/Pass/PassBase.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /lib/Conversion/TTIRToTTNN/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(TTMLIRTTIRToTTNN 2 | TTIRToTTNN.cpp 3 | TTIRToTTNNPass.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/TTIRToTTNN 7 | 8 | DEPENDS 9 | TTMLIRConversionPassIncGen 10 | 11 | LINK_LIBS PUBLIC 12 | MLIRIR 13 | MLIRPass 14 | ) 15 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/TTIRBinding.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef PYTHON_BINDINGS_TTMLIR_TTIROPS 6 | #define PYTHON_BINDINGS_TTMLIR_TTIROPS 7 | 8 | include "ttmlir/Dialect/TTIR/IR/TTIROps.td" 9 | 10 | #endif // PYTHON_BINDINGS_TTMLIR_TTIROPS 11 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/IR/TTMetalOpsEnums.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TTMLIR_DIALECT_TTMETAL_TTMETALOPSENUMS_TD 6 | #define TTMLIR_TTMLIR_DIALECT_TTMETAL_TTMETALOPSENUMS_TD 7 | 8 | include "mlir/IR/EnumAttr.td" 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /lib/Conversion/TTIRToTTMetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(TTMLIRTTIRToTTMetal 2 | TTIRToTTMetal.cpp 3 | TTIRToTTMetalPass.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/TTIRToTTMetal 7 | 8 | DEPENDS 9 | TTMLIRConversionPassIncGen 10 | 11 | LINK_LIBS PUBLIC 12 | MLIRIR 13 | MLIRPass 14 | ) 15 | -------------------------------------------------------------------------------- /lib/Dialect/TTMetal/IR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTMetalDialect 2 | TTMetalDialect.cpp 3 | TTMetalOps.cpp 4 | TTMetalOpsTypes.cpp 5 | 6 | ADDITIONAL_HEADER_DIRS 7 | ${PROJECT_SOURCE_DIR}/include/ttmlir 8 | 9 | DEPENDS 10 | MLIRTTMetalOpsIncGen 11 | MLIRTTOpsIncGen 12 | ) 13 | -------------------------------------------------------------------------------- /lib/Dialect/TTMetal/Pipelines/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTMetalPipelines 2 | TTMetalPipelines.cpp 3 | 4 | ADDITIONAL_HEADER_DIRS 5 | ${PROJECT_SOURCE_DIR}/include/ttmlir 6 | 7 | LINK_LIBS PUBLIC 8 | MLIRTTIRDialect 9 | MLIRTTMetalDialect 10 | MLIRTTIRTransforms 11 | MLIRTTNNAnalysis 12 | MLIRPass 13 | MLIRTransforms 14 | ) 15 | -------------------------------------------------------------------------------- /python/ttmlir/dialects/TTKernelBinding.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef PYTHON_BINDINGS_TTMLIR_TTKERNELOPS 6 | #define PYTHON_BINDINGS_TTMLIR_TTKERNELOPS 7 | 8 | include "ttmlir/Dialect/TTKernel/IR/TTKernelOps.td" 9 | 10 | #endif // PYTHON_BINDINGS_TTMLIR_TTKERNELOPS 11 | -------------------------------------------------------------------------------- /.github/workflows/spdx.yml: -------------------------------------------------------------------------------- 1 | name: spdx 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_call: 6 | 7 | jobs: 8 | check-spdx-headers: 9 | 10 | timeout-minutes: 10 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: checkout 14 | uses: actions/checkout@v4 15 | - uses: enarx/spdx@master 16 | with: 17 | licenses: Apache-2.0 18 | -------------------------------------------------------------------------------- /lib/Conversion/TosaToTTIR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(TTMLIRTosaToTTIR 2 | TosaToTTIRPass.cpp 3 | TosaToTTIRPatterns.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/TosaToTTIR 7 | 8 | DEPENDS 9 | TTMLIRConversionPassIncGen 10 | 11 | LINK_LIBS PUBLIC 12 | MLIRIR 13 | MLIRPass 14 | MLIRTosaDialect 15 | ) 16 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/Pipelines/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTNNPipelines 2 | TTNNPipelines.cpp 3 | 4 | ADDITIONAL_HEADER_DIRS 5 | ${PROJECT_SOURCE_DIR}/include/ttmlir 6 | 7 | LINK_LIBS PUBLIC 8 | MLIRTTIRDialect 9 | MLIRTTNNDialect 10 | MLIRTTIRTransforms 11 | MLIRTTNNTransforms 12 | MLIRTTNNAnalysis 13 | MLIRPass 14 | MLIRTransforms 15 | ) 16 | -------------------------------------------------------------------------------- /lib/Dialect/TTIR/IR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTIRDialect 2 | TTIRDialect.cpp 3 | TTIROps.cpp 4 | TTIROpsInterfaces.cpp 5 | 6 | ADDITIONAL_HEADER_DIRS 7 | ${PROJECT_SOURCE_DIR}/include/ttmlir 8 | 9 | DEPENDS 10 | MLIRTTIROpsIncGen 11 | MLIRTTIRPassesIncGen 12 | MLIRTTOpsIncGen 13 | ) 14 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TT/IR/TT.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TT_IR_TT_H 6 | #define TTMLIR_DIALECT_TT_IR_TT_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/Dialect.h" 10 | 11 | #include "ttmlir/Dialect/TT/IR/TTOpsDialect.h.inc" 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /lib/Conversion/TTIRToTTIRDecomposition/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(TTMLIRTTIRToTTIRDecomposition 2 | TTIRToTTIRDecomposition.cpp 3 | TTIRToTTIRDecompositionPass.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/TTIRToTTIR 7 | 8 | DEPENDS 9 | TTMLIRConversionPassIncGen 10 | 11 | LINK_LIBS PUBLIC 12 | MLIRIR 13 | MLIRPass 14 | ) 15 | -------------------------------------------------------------------------------- /include/ttmlir/Target/Common/system_desc.fbs: -------------------------------------------------------------------------------- 1 | include "types.fbs"; 2 | include "version.fbs"; 3 | 4 | namespace tt.target; 5 | 6 | table SystemDescRoot { 7 | version: tt.target.Version; 8 | ttmlir_git_hash: string; 9 | product_identifier: string; 10 | system_desc: tt.target.SystemDesc; 11 | } 12 | 13 | root_type SystemDescRoot; 14 | file_identifier "TTSY"; 15 | file_extension "ttsys"; 16 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTIR/IR/TTIR.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTIR_IR_TTIR_H 6 | #define TTMLIR_DIALECT_TTIR_IR_TTIR_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/Dialect.h" 10 | 11 | #include "ttmlir/Dialect/TTIR/IR/TTIROpsDialect.h.inc" 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTNN/IR/TTNN.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTNN_IR_TTNN_H 6 | #define TTMLIR_DIALECT_TTNN_IR_TTNN_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/Dialect.h" 10 | 11 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsDialect.h.inc" 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /cmake/modules/TTMLIRPythonSitePackages.cmake: -------------------------------------------------------------------------------- 1 | execute_process( 2 | COMMAND python -c "from distutils import sysconfig; print(sysconfig.get_python_lib(prefix='', plat_specific=True))" 3 | OUTPUT_VARIABLE PYTHON_SITE_PACKAGES 4 | OUTPUT_STRIP_TRAILING_WHITESPACE) 5 | set(TTMLIR_PYTHON_SITE_PACKAGES "${TTMLIR_TOOLCHAIN_DIR}/venv/${PYTHON_SITE_PACKAGES}" CACHE STRING "Path to the Python site-packages directory") 6 | -------------------------------------------------------------------------------- /lib/CAPI/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_public_c_api_library(TTMLIRCAPI 2 | Dialects.cpp 3 | TTKernelTypes.cpp 4 | TTAttrs.cpp 5 | TTTypes.cpp 6 | TTNNAttrs.cpp 7 | 8 | ADDITIONAL_HEADER_DIRS 9 | ${PROJECT_SOURCE_DIR}/include/ttmlir-c/ 10 | 11 | ENABLE_AGGREGATION 12 | 13 | LINK_LIBS PUBLIC 14 | MLIRIR 15 | MLIRCAPITransforms 16 | MLIRSupport 17 | MLIRTargetCpp 18 | TTMLIRStatic 19 | ) 20 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: pre-commit 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_call: 6 | 7 | jobs: 8 | pre-commit: 9 | 10 | timeout-minutes: 10 11 | runs-on: macos-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Set up Python 15 | uses: actions/setup-python@v5 16 | - name: Run pre-commit 17 | uses: pre-commit/action@v3.0.1 18 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTNN/binary.fbs: -------------------------------------------------------------------------------- 1 | include "Common/types.fbs"; 2 | include "Common/version.fbs"; 3 | include "program.fbs"; 4 | 5 | namespace tt.target.ttnn; 6 | 7 | table TTNNBinary { 8 | version: tt.target.Version; 9 | ttmlir_git_hash: string; 10 | system_desc: tt.target.SystemDesc; 11 | programs: [Program]; 12 | } 13 | 14 | root_type TTNNBinary; 15 | file_identifier "TTNN"; 16 | file_extension "ttnn"; 17 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/IR/TTMetal.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTMETAL_IR_TTMETAL_H 6 | #define TTMLIR_DIALECT_TTMETAL_IR_TTMETAL_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/Dialect.h" 10 | 11 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsDialect.h.inc" 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /lib/Conversion/TTKernelToEmitC/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(TTMLIRTTKernelToEmitC 2 | TTKernelToEmitC.cpp 3 | 4 | ADDITIONAL_HEADER_DIRS 5 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/TTKernelToEmitC 6 | 7 | DEPENDS 8 | TTMLIRConversionPassIncGen 9 | 10 | LINK_LIBS PUBLIC 11 | MLIRIR 12 | MLIRPass 13 | MLIRArithToEmitC 14 | MLIREmitCDialect 15 | MLIRTargetCpp 16 | MLIRTransformUtils 17 | ) 18 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTKernel/IR/TTKernel.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTKERNEL_IR_TTKERNEL_H 6 | #define TTMLIR_DIALECT_TTKERNEL_IR_TTKERNEL_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/Dialect.h" 10 | 11 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsDialect.h.inc" 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /lib/Conversion/TTNNToEmitC/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_library(TTMLIRTTNNToEmitC 2 | TTNNToEmitC.cpp 3 | TTNNToEmitCPass.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/TTNNToEmitC 7 | 8 | DEPENDS 9 | TTMLIRConversionPassIncGen 10 | 11 | LINK_LIBS PUBLIC 12 | MLIRIR 13 | MLIRPass 14 | MLIRSCFToEmitC 15 | MLIREmitCDialect 16 | MLIRFuncTransforms 17 | MLIRTargetCpp 18 | ) 19 | -------------------------------------------------------------------------------- /lib/Dialect/TTKernel/IR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTKernelDialect 2 | TTKernelDialect.cpp 3 | TTKernelOps.cpp 4 | TTKernelOpsTypes.cpp 5 | 6 | ADDITIONAL_HEADER_DIRS 7 | ${PROJECT_SOURCE_DIR}/include/ttmlir 8 | 9 | DEPENDS 10 | MLIRTTKernelOpsIncGen 11 | MLIRTTOpsIncGen 12 | 13 | LINK_LIBS PUBLIC 14 | MLIRTTMetalDialect 15 | ) 16 | -------------------------------------------------------------------------------- /test/ttmlir/ttmlir-opt.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --show-dialects | FileCheck %s 2 | // CHECK: Available Dialects: 3 | // CHECK: arith 4 | // CHECK: builtin 5 | // CHECK: cf 6 | // CHECK: emitc 7 | // CHECK: func 8 | // CHECK: linalg 9 | // CHECK: ml_program 10 | // CHECK: scf 11 | // CHECK: tensor 12 | // CHECK: tosa 13 | // CHECK: tt 14 | // CHECK: ttir 15 | // CHECK: ttkernel 16 | // CHECK: ttmetal 17 | // CHECK: ttnn 18 | // CHECK: vector 19 | -------------------------------------------------------------------------------- /runtime/tools/python/ttrt/binary/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | from ._C import ( 6 | load_from_path, 7 | load_binary_from_path, 8 | load_binary_from_capsule, 9 | load_system_desc_from_path, 10 | Flatbuffer, 11 | ) 12 | from . import stats 13 | 14 | import json 15 | 16 | 17 | def as_dict(bin): 18 | return json.loads(bin.as_json()) 19 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTNN/Types/Types.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTNN_TYPES_TYPES_H 6 | #define TTMLIR_DIALECT_TTNN_TYPES_TYPES_H 7 | #include 8 | 9 | namespace mlir::tt::ttnn { 10 | static constexpr const uint32_t TILE_HEIGHT = 32; 11 | static constexpr const uint32_t TILE_WIDTH = 32; 12 | } // namespace mlir::tt::ttnn 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /python/Overrides.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Bindings/Python/TTMLIRModule.h" 6 | 7 | namespace mlir::ttmlir::python { 8 | 9 | void populateOverridesModule(py::module &m) { 10 | 11 | m.def( 12 | "get_ptr", [](void *op) { return reinterpret_cast(op); }, 13 | py::arg("op").noconvert()); 14 | } 15 | 16 | } // namespace mlir::ttmlir::python 17 | -------------------------------------------------------------------------------- /lib/Dialect/TTIR/Transforms/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTIRTransforms 2 | Allocate.cpp 3 | Broadcast.cpp 4 | Constant.cpp 5 | Generic.cpp 6 | Layout.cpp 7 | Transforms.cpp 8 | Utility.cpp 9 | 10 | ADDITIONAL_HEADER_DIRS 11 | ${PROJECT_SOURCE_DIR}/include/ttmlir 12 | 13 | DEPENDS 14 | MLIRTTIROpsIncGen 15 | MLIRTTIRPassesIncGen 16 | MLIRTTOpsIncGen 17 | ) 18 | -------------------------------------------------------------------------------- /python/TTIRModule.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Bindings/Python/TTMLIRModule.h" 6 | 7 | #include "mlir/CAPI/IR.h" 8 | 9 | namespace mlir::ttmlir::python { 10 | void populateTTIRModule(py::module &m) { 11 | m.def("is_dps", [](MlirOperation op) { 12 | return mlir::isa(unwrap(op)); 13 | }); 14 | } 15 | } // namespace mlir::ttmlir::python 16 | -------------------------------------------------------------------------------- /docs/src/ttmlir-opt.md: -------------------------------------------------------------------------------- 1 | # `ttmlir-opt` 2 | 3 | The `ttmlir` optimizer driver. This tool is used to run the `ttmlir` compiler passes on a `.mlir` source files and is central to developing and testing the compiler. 4 | 5 | ## Simple Test 6 | 7 | ```bash 8 | ./build/bin/ttmlir-opt --ttir-to-ttnn-backend-pipeline test/ttmlir/Dialect/TTNN/simple_multiply.mlir 9 | # Or 10 | ./build/bin/ttmlir-opt --ttir-to-ttmetal-backend-pipeline test/ttmlir/Dialect/TTNN/simple_multiply.mlir 11 | ``` 12 | -------------------------------------------------------------------------------- /tools/explorer/run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 4 | # 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import model_explorer 8 | 9 | # TODO(odjuricic): Hack to make our extension default for .mlir files. 10 | # This can be handled better when we switch to our model-explorer fork. 11 | model_explorer.extension_manager.ExtensionManager.BUILTIN_ADAPTER_MODULES = [] 12 | model_explorer.visualize(extensions=["tt_adapter"]) 13 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/scalar_add_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_scalar_add attributes {} { 4 | func.func public @test_scalar_add(%arg0: tensor, %arg1: tensor) -> tensor { 5 | %0 = stablehlo.add %arg0, %arg1 : tensor 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.add"[[C:.*]] 8 | return %0 : tensor 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.github/workflows/on-push.yml: -------------------------------------------------------------------------------- 1 | name: On push 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: [ "main" ] 7 | 8 | jobs: 9 | pre-commit: 10 | uses: ./.github/workflows/pre-commit.yml 11 | secrets: inherit 12 | spdx: 13 | uses: ./.github/workflows/spdx.yml 14 | secrets: inherit 15 | macos-build: 16 | uses: ./.github/workflows/macos-build.yml 17 | secrets: inherit 18 | build-and-test: 19 | uses: ./.github/workflows/build-and-test.yml 20 | secrets: inherit 21 | -------------------------------------------------------------------------------- /.github/workflows/on-pr.yml: -------------------------------------------------------------------------------- 1 | name: On PR 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: [ "main" ] 7 | 8 | jobs: 9 | pre-commit: 10 | uses: ./.github/workflows/pre-commit.yml 11 | secrets: inherit 12 | spdx: 13 | uses: ./.github/workflows/spdx.yml 14 | secrets: inherit 15 | macos-build: 16 | uses: ./.github/workflows/macos-build.yml 17 | secrets: inherit 18 | build-and-test: 19 | uses: ./.github/workflows/build-and-test.yml 20 | secrets: inherit 21 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/add_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_add attributes {} { 4 | func.func public @test_add(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = stablehlo.add %arg0, %arg1 : tensor<13x21x3xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.add"[[C:.*]] 8 | return %0 : tensor<13x21x3xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.github/actions/install-deps/dependencies.json: -------------------------------------------------------------------------------- 1 | { 2 | "ubuntu-20.04": [ 3 | "libnuma-dev", 4 | "libyaml-cpp-dev", 5 | "libhwloc-dev", 6 | "libgtest-dev", 7 | "libboost-dev" 8 | ], 9 | "ubuntu-22.04": [ 10 | "libnuma-dev", 11 | "libyaml-cpp-dev", 12 | "libhwloc-dev", 13 | "libgtest-dev", 14 | "libboost-dev" 15 | ], 16 | "ubuntu-latest": [ 17 | "libnuma-dev", 18 | "libyaml-cpp-dev", 19 | "libhwloc-dev", 20 | "libgtest-dev", 21 | "libboost-dev" 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /env/build_venv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 3 | # 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | OS=$(uname) 7 | TTMLIR_VENV=$TTMLIR_TOOLCHAIN_DIR/venv 8 | TTMLIR_PYTHON_VERSION="${TTMLIR_PYTHON_VERSION:-python3.10}" 9 | 10 | $TTMLIR_PYTHON_VERSION -m venv $TTMLIR_VENV 11 | source $CURRENT_SOURCE_DIR/activate 12 | python -m pip install --upgrade pip 13 | pip install -r $CURRENT_SOURCE_DIR/build-requirements.txt 14 | pip install -r $CURRENT_SOURCE_DIR/../test/python/requirements.txt 15 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/iota_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_iota attributes {} { 5 | func.func public @test_iota() -> tensor<1x32x128x128xf32> { 6 | // CHECK: %[[C:.*]] = "ttir.arange"[[C:.*]] 7 | %0 = "stablehlo.iota"() {iota_dimension = 1: i64} : () -> tensor<1x32x128x128xf32> 8 | return %0 : tensor<1x32x128x128xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.github/get-docker-tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 3 | # 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | # Calculate hash from the following files. This hash is used to tag the docker images. 7 | # Any change in these files will result in a new docker image build 8 | DOCKERFILE_HASH_FILES=".github/Dockerfile.base .github/Dockerfile.ci .github/Dockerfile.ird env/CMakeLists.txt" 9 | DOCKERFILE_HASH=$(sha256sum $DOCKERFILE_HASH_FILES | sha256sum | cut -d ' ' -f 1) 10 | echo dt-$DOCKERFILE_HASH 11 | -------------------------------------------------------------------------------- /lib/Target/TTMetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_translation_library(TTMetalTargetFlatbuffer 2 | TTMetalToFlatbuffer.cpp 3 | TTMetalToFlatbufferRegistration.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/Target/TTMetal 7 | 8 | LINK_LIBS PUBLIC 9 | MLIRTTMetalDialect 10 | MLIRTTKernelDialect 11 | MLIRTTIRDialect 12 | MLIRTTDialect 13 | TTMLIRTTKernelToEmitC 14 | ) 15 | 16 | target_include_directories(TTMetalTargetFlatbuffer PUBLIC ${PROJECT_BINARY_DIR}/include/ttmlir/Target/Common) 17 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/permute_transpose_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module { 4 | func.func @main(%arg0: tensor<1x32x64x128xf32>) -> tensor<1x128x32x64xf32> { 5 | // CHECK: %[[C:.*]] = "ttir.transpose"[[C:.*]] 6 | // CHECK: %[[C:.*]] = "ttir.transpose"[[C:.*]] 7 | %0 = stablehlo.transpose %arg0, dims = [0, 3, 1, 2] : (tensor<1x32x64x128xf32>) -> tensor<1x128x32x64xf32> 8 | return %0 : tensor<1x128x32x64xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/RegisterAll.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_REGISTERALL_H 6 | #define TTMLIR_REGISTERALL_H 7 | 8 | namespace mlir { 9 | 10 | class DialectRegistry; 11 | 12 | } // namespace mlir 13 | 14 | namespace mlir::tt { 15 | 16 | void registerAllDialects(mlir::DialectRegistry ®istry); 17 | void registerAllExtensions(mlir::DialectRegistry ®istry); 18 | void registerAllPasses(); 19 | 20 | } // namespace mlir::tt 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /lib/Target/TTNN/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_translation_library(TTNNTargetFlatbuffer 2 | TTNNToFlatbuffer.cpp 3 | TTNNToFlatbufferRegistration.cpp 4 | 5 | ADDITIONAL_HEADER_DIRS 6 | ${PROJECT_SOURCE_DIR}/include/Target/TTNN 7 | 8 | LINK_LIBS PUBLIC 9 | MLIRTTNNDialect 10 | MLIRTTIRDialect 11 | MLIRTTDialect 12 | MLIRTTKernelDialect 13 | MLIRTTNNTransforms 14 | TTMLIRTTNNToEmitC 15 | ) 16 | 17 | target_include_directories(TTNNTargetFlatbuffer PUBLIC ${PROJECT_BINARY_DIR}/include/ttmlir/Target/Common) 18 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/maximum_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_maximum attributes {} { 4 | func.func public @test_maximum(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = stablehlo.maximum %arg0, %arg1 : tensor<13x21x3xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.maximum"[[C:.*]] 8 | return %0 : tensor<13x21x3xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/transpose_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_transpose attributes {} { 4 | func.func public @test_transpose(%arg0: tensor<64x128xf32>) -> tensor<128x64xf32> { 5 | %0 = stablehlo.transpose %arg0, dims = [1,0] : (tensor<64x128xf32>) -> tensor<128x64xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.transpose"[[C:.*]] 8 | return %0 : tensor<128x64xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${TTMLIR_TOOLCHAIN_DIR}/src/stablehlo) 2 | include_directories(${TTMLIR_TOOLCHAIN_DIR}/src/stablehlo-build) 3 | include_directories(${TTMLIR_SOURCE_DIR}/include) 4 | 5 | set(LLVM_TARGET_DEFINITIONS Passes.td) 6 | if (TTMLIR_ENABLE_STABLEHLO) 7 | mlir_tablegen(Passes.h.inc -gen-pass-decls -name TTMLIRConversion -DTTMLIR_ENABLE_STABLEHLO) 8 | else() 9 | mlir_tablegen(Passes.h.inc -gen-pass-decls -name TTMLIRConversion) 10 | endif() 11 | add_public_tablegen_target(TTMLIRConversionPassIncGen) 12 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/Transforms/Passes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTMETAL_TRANSFORMS_PASSES_H 6 | #define TTMLIR_DIALECT_TTMETAL_TRANSFORMS_PASSES_H 7 | 8 | namespace mlir::tt::ttmetal { 9 | #define GEN_PASS_DECL 10 | #include "ttmlir/Dialect/TTMetal/Transforms/Passes.h.inc" 11 | 12 | #define GEN_PASS_REGISTRATION 13 | #include "ttmlir/Dialect/TTMetal/Transforms/Passes.h.inc" 14 | } // namespace mlir::tt::ttmetal 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /tools/explorer/tt_adapter/src/tt_adapter/utils.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | import ttmlir 5 | 6 | 7 | def parse_mlir_file(model_path): 8 | with ttmlir.ir.Context() as ctx, open(model_path, "r") as model_file: 9 | ttmlir.dialects.ttir.register_dialect(ctx) 10 | ttmlir.dialects.tt.register_dialect(ctx) 11 | ttmlir.dialects.ttnn.register_dialect(ctx) 12 | module = ttmlir.ir.Module.parse(model_file.read(), ctx) 13 | return module 14 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/cbrt_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_rsqrt attributes {} { 5 | func.func public @test_cbrt(%arg0: tensor<4xf64>) -> tensor<4xf64> { 6 | %0 = stablehlo.cbrt %arg0 : tensor<4xf64> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.cbrt"[[C:.*]] 9 | return %0 : tensor<4xf64> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTNN/Transforms/TTNNToCpp.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTNN_TRANSFORMS_TTNNTOCPP_H 6 | #define TTMLIR_DIALECT_TTNN_TRANSFORMS_TTNNTOCPP_H 7 | 8 | #include "mlir/IR/BuiltinOps.h" 9 | #include "mlir/Support/LogicalResult.h" 10 | 11 | #include "ttmlir/Dialect/TTNN/IR/TTNNOps.h" 12 | 13 | namespace mlir::tt::ttnn { 14 | LogicalResult emitTTNNAsCpp(ModuleOp origOp, llvm::raw_ostream &os); 15 | } // namespace mlir::tt::ttnn 16 | #endif 17 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/conv/conv2d.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_CONV_CONV2D_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_CONV_CONV2D_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::conv { 12 | void run(const ::tt::target::ttnn::Conv2dOp *op, ProgramContext &context); 13 | 14 | } // namespace tt::runtime::ttnn::operations::conv 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/ccl/all_gather.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_CCL_ALL_GATHER_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_CCL_ALL_GATHER_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::ccl { 12 | void run(const ::tt::target::ttnn::AllGatherOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::ccl 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/pool/maxpool2d.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_POOL_MAXPOOL2D_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_POOL_MAXPOOL2D_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::pool { 12 | void run(const ::tt::target::ttnn::MaxPool2dOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::pool 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/floor_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_floor attributes {} { 5 | func.func public @test_floor(%arg0: tensor<32x32x3xf32>) -> tensor<32x32x3xf32> { 6 | %0 = stablehlo.floor %arg0 : tensor<32x32x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.floor"[[C:.*]] 9 | return %0 : tensor<32x32x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/rsqrt_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_rsqrt attributes {} { 5 | func.func public @test_rsqrt(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.rsqrt %arg0 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.rsqrt"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/absolute_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_abs attributes {} { 5 | func.func public @test_abs(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.abs %arg0 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.abs"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/sqrt_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_sqrt attributes {} { 5 | func.func public @test_sqrt(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.sqrt %arg0 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.sqrt"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/layout/typecast.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TYPECAST_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TYPECAST_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::layout { 12 | void run(const ::tt::target::ttnn::TypecastOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::layout 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/negate_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_neg attributes {} { 5 | func.func public @test_neg(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.negate %arg0 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.neg"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /cmake/modules/LintTools.cmake: -------------------------------------------------------------------------------- 1 | # clang-tidy setup 2 | add_custom_target(clang-tidy-filter-out-external-srcs COMMAND python3 ${TTMLIR_SOURCE_DIR}/tools/scripts/filter-compile-commands.py ${TTMLIR_BINARY_DIR}/compile_commands.json "${TTMLIR_SOURCE_DIR}") 3 | add_custom_target(clang-tidy COMMAND run-clang-tidy.py -p ${PROJECT_BINARY_DIR} -export-fixes clang-tidy-fixes.yaml -warnings-as-errors '*' -extra-arg-before=-DDISABLE_STATIC_ASSERT_TESTS -extra-arg-before=-D__cpp_structured_bindings=202400 DEPENDS clang-tidy-filter-out-external-srcs) 4 | add_custom_target(clang-format COMMAND git-clang-format) 5 | -------------------------------------------------------------------------------- /include/ttmlir/Target/Common/debug_info.fbs: -------------------------------------------------------------------------------- 1 | include "Common/types.fbs"; 2 | 3 | namespace tt.target; 4 | 5 | table GoldenTensor { 6 | name: string; 7 | shape: [int64]; 8 | stride: [int64]; 9 | dtype: tt.target.DataType; 10 | data: [uint8]; 11 | } 12 | 13 | table GoldenKV { 14 | key: string; 15 | value: GoldenTensor; 16 | } 17 | 18 | table GoldenInfo { 19 | golden_map: [GoldenKV]; 20 | } 21 | 22 | table MLIR { 23 | name: string; 24 | source: string; 25 | } 26 | 27 | table DebugInfo { 28 | mlir: MLIR; 29 | cpp: string; 30 | golden_info: GoldenInfo; 31 | } 32 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/Transforms/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTNNTransforms 2 | TTNNLayout.cpp 3 | Passes.cpp 4 | Optimizer.cpp 5 | TTNNToCpp.cpp 6 | 7 | ADDITIONAL_HEADER_DIRS 8 | ${PROJECT_SOURCE_DIR}/include/ttmlir 9 | 10 | DEPENDS 11 | MLIRTTNNOpsIncGen 12 | MLIRTTNNPassesIncGen 13 | MLIRTTOpsIncGen 14 | 15 | LINK_LIBS PUBLIC 16 | MLIRTTNNDialect 17 | ) 18 | 19 | target_include_directories(MLIRTTNNTransforms PUBLIC ${PROJECT_BINARY_DIR}/include/ttmlir/Target/Common) 20 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/creation/full.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_CREATION_FULL_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_CREATION_FULL_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::creation { 12 | 13 | void run(const ::tt::target::ttnn::FullOp *op, ProgramContext &context); 14 | 15 | } // namespace tt::runtime::ttnn::operations::creation 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/layout/to_device.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TO_DEVICE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TO_DEVICE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::layout { 12 | void run(const ::tt::target::ttnn::ToDeviceOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::layout 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/layout/to_layout.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TO_LAYOUT_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TO_LAYOUT_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::layout { 12 | void run(const ::tt::target::ttnn::ToLayoutOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::layout 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/Analysis/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTNNAnalysis 2 | LegalGridAnalysis.cpp 3 | OpConfigAnalysis.cpp 4 | MemoryLayoutAnalysis.cpp 5 | L1ChainConfig.cpp 6 | DFShardingPolicy.cpp 7 | L1InterleavedPolicy.cpp 8 | ShardSolver.cpp 9 | 10 | ADDITIONAL_HEADER_DIRS 11 | ${PROJECT_SOURCE_DIR}/include/ttmlir 12 | 13 | DEPENDS 14 | MLIRTTNNOpsIncGen 15 | MLIRTTNNPassesIncGen 16 | MLIRTTOpsIncGen 17 | 18 | LINK_LIBS 19 | MLIRScheduler 20 | ) 21 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/creation/empty.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_CREATION_EMPTY_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_CREATION_EMPTY_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::creation { 12 | 13 | void run(const ::tt::target::ttnn::EmptyOp *op, ProgramContext &context); 14 | 15 | } // namespace tt::runtime::ttnn::operations::creation 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/layout/from_device.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_FROM_DEVICE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_FROM_DEVICE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::layout { 12 | void run(const ::tt::target::ttnn::FromDeviceOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::layout 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/exponential_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_exp attributes {} { 5 | func.func public @test_exp(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.exponential %arg0 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.exp"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /tools/scripts/filter-clang-tidy-fixes.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | import yaml 5 | import sys 6 | 7 | with open(sys.argv[1], "r") as fd: 8 | d = yaml.load(fd, yaml.SafeLoader) 9 | 10 | fixes = d["Diagnostics"] 11 | uniques = set([str(f) for f in fixes]) 12 | unique_fixes = [] 13 | for f in fixes: 14 | if str(f) in uniques: 15 | unique_fixes.append(f) 16 | uniques.remove(str(f)) 17 | d["Diagnostics"] = unique_fixes 18 | 19 | with open(sys.argv[1], "w") as fd: 20 | yaml.dump(d, fd) 21 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/creation/arange.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_CREATION_ARANGE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_CREATION_ARANGE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::creation { 12 | 13 | void run(const ::tt::target::ttnn::ArangeOp *op, ProgramContext &context); 14 | 15 | } // namespace tt::runtime::ttnn::operations::creation 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/reduce_add_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_reduce_add attributes {} { 4 | func.func public @test_reduce_add(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { 5 | %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.add across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.sum"[[C:.*]] 8 | return %0 : tensor<128xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tools/explorer/tt_adapter/README.md: -------------------------------------------------------------------------------- 1 | # tt-adapter 2 | Model Explorer Adapter built for TT-MLIR outputs. Contains the logic for converting IRs into model explorer graphs. 3 | 4 | ## Integration into model-explorer 5 | Model-Explorer currently primarily supports loading extensions through the CLI. An example of a run call: 6 | 7 | ```sh 8 | model-explorer --extensions=tt_adapter 9 | ``` 10 | 11 | You should be able to see 12 | 13 | ```sh 14 | Loading extensions... 15 | - ... 16 | - Tenstorrent Adapter 17 | - JSON adapter 18 | ``` 19 | 20 | in the command output to verify that it has been run. 21 | -------------------------------------------------------------------------------- /tools/ttnn-standalone/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit on error 4 | echo "==================== BUILDING TTNN STANDALONE ===================" 5 | cmake -G Ninja -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ 6 | cmake --build build -- ttnn-standalone 7 | echo "================ FINISH BUILDING TTNN STANDALONE ================" 8 | set +e # unset exit on error 9 | 10 | echo "==================== RUNNING TTNN STANDALONE ====================" 11 | ./build/ttnn-standalone 12 | echo "================= FINISH RUNNING TTNN STANDALONE ================" 13 | -------------------------------------------------------------------------------- /cmake/modules/TTMLIRConfig.cmake.in: -------------------------------------------------------------------------------- 1 | find_package(MLIR REQUIRED CONFIG 2 | HINTS "@TTMLIR_CONFIG_MLIR_CMAKE_DIR@") 3 | 4 | set(TTMLIR_EXPORTED_TARGETS "@TTMLIR_EXPORTS@") 5 | set(TTMLIR_CMAKE_DIR "@TTMLIR_CONFIG_CMAKE_DIR@") 6 | set(TTMLIR_BINARY_DIR "@TTMLIR_CONFIG_BINARY_DIR@") 7 | set(TTMLIR_INCLUDE_DIRS "@TTMLIR_CONFIG_INCLUDE_DIRS@") 8 | set(TTMLIR_LIBRARY_DIRS "@TTMLIR_CONFIG_LIBRARY_DIRS@") 9 | set(TTMLIR_TOOLS_DIR "@TTMLIR_CONFIG_TOOLS_DIR@") 10 | 11 | # Provide all our library targets to users. 12 | if(NOT TARGET TTMLIRSupport) 13 | @TTMLIR_CONFIG_INCLUDE_EXPORTS@ 14 | endif() 15 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTNN_IR_TTNNOPSTYPES_H 6 | #define TTMLIR_DIALECT_TTNN_IR_TTNNOPSTYPES_H 7 | 8 | #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" 9 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.h" 10 | 11 | #include "mlir/IR/BuiltinAttributes.h" 12 | #include "mlir/IR/BuiltinTypes.h" 13 | 14 | #define GET_TYPEDEF_CLASSES 15 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.h.inc" 16 | 17 | #endif // TTMLIR_DIALECT_TTNN_IR_TTNNOPSTYPES_H 18 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/context/get_device.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_CONTEXT_GET_DEVICE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_CONTEXT_GET_DEVICE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::context { 12 | void run(const ::tt::target::ttnn::GetDeviceOp *op, ProgramContext &context); 13 | 14 | } // namespace tt::runtime::ttnn::operations::context 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/deletion/deallocate.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_DELETION_DEALLOCATE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_DELETION_DEALLOCATE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::deletion { 12 | void run(const ::tt::target::ttnn::DeallocateOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::deletion 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/embedding/embedding.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_EMBEDDING_EMBEDDING_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_EMBEDDING_EMBEDDING_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::embedding { 12 | void run(const ::tt::target::ttnn::EmbeddingOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::embedding 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/reduction/reduction.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_REDUCTION_REDUCTION_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_REDUCTION_REDUCTION_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::reduction { 12 | void run(const ::tt::target::ttnn::ReductionOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::reduction 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /.github/check-spdx.yaml: -------------------------------------------------------------------------------- 1 | DEFAULT: 2 | perform_check: yes # Perform check for all files 3 | allowed_licenses: 4 | - Apache-2.0 5 | license_for_new_files: Apache-2.0 # license to be used when inserting a new copyright notice 6 | new_notice_c: |+ # notice for new C, CPP, H, HPP and LD files 7 | // SPDX-FileCopyrightText: (c) {years} Tenstorrent AI ULC 8 | // 9 | // SPDX-License-Identifier: {license} 10 | 11 | new_notice_python: |+ # notice for new python files 12 | # SPDX-FileCopyrightText: (c) {years} Tenstorrent AI ULC 13 | # 14 | # SPDX-License-Identifier: {license} 15 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/data_movement/slice.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_SLICE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_SLICE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::data_movement { 12 | void run(const ::tt::target::ttnn::SliceOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::data_movement 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTIR/tosa_to_ttir_multiply.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --convert-tosa-to-ttir %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @test_mul(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = tosa.mul %arg0, %arg1 {shift = 0 : i8} : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.multiply"[[C:.*]] 8 | return %0 : tensor<13x21x3xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/data_movement/concat.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_CONCAT_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_CONCAT_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::data_movement { 12 | void run(const ::tt::target::ttnn::ConcatOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::data_movement 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/data_movement/reshape.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_RESHAPE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_RESHAPE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::data_movement { 12 | void run(const ::tt::target::ttnn::ReshapeOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::data_movement 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/normalization/softmax.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_NORMALIZATION_SOFTMAX_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_NORMALIZATION_SOFTMAX_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::normalization { 12 | void run(const ::tt::target::ttnn::SoftmaxOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::normalization 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/dot_general/dot_general_2d.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_dot_general attributes {} { 4 | func.func public @test_dot_general(%arg0 : tensor<16x32xf32>, %arg1 : tensor<32x8xf32>) -> tensor<16x8xf32> { 5 | %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<16x32xf32>, tensor<32x8xf32>) -> tensor<16x8xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] 8 | return %0 : tensor<16x8xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir-c/Dialects.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_C_DIALECTS_H 6 | #define TTMLIR_C_DIALECTS_H 7 | 8 | #include "mlir-c/IR.h" 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(TT, tt); 15 | MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(TTIR, ttir); 16 | MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(TTKernel, ttkernel); 17 | MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(TTNN, ttnn); 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | 23 | #endif // TTMLIR_C_DIALECTS_H 24 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/reduce_maximum_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_reduce_maximum attributes {} { 4 | func.func public @test_reduce_maximum(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { 5 | %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.maximum across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.max"[[C:.*]] 8 | return %0 : tensor<128xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/IR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect_library(MLIRTTNNDialect 2 | TTNNOpsAttrs.cpp 3 | TTNNDialect.cpp 4 | TTNNOps.cpp 5 | TTNNOpModelInterface.cpp 6 | TTNNOpsTypes.cpp 7 | 8 | ADDITIONAL_HEADER_DIRS 9 | ${PROJECT_SOURCE_DIR}/include/ttmlir 10 | 11 | DEPENDS 12 | MLIRTTNNOpsIncGen 13 | MLIRTTOpsIncGen 14 | TTNNOpModelLib 15 | 16 | LINK_LIBS PUBLIC 17 | TTMLIRTTNNUtils 18 | MLIRSCFToEmitC 19 | MLIRLinalgDialect 20 | MLIRMLProgramDialect 21 | TTNNOpModelLib 22 | ) 23 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/data_movement/transpose.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_TRANSPOSE_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_DATA_MOVEMENT_TRANSPOSE_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::data_movement { 12 | void run(const ::tt::target::ttnn::TransposeOp *op, ProgramContext &context); 13 | } // namespace tt::runtime::ttnn::operations::data_movement 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/divide_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_divice attributes {} { 5 | func.func public @test_divide(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.divide %arg0, %arg1 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.div"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_max.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<512x32xbf16>) -> tensor<512xbf16> { 5 | %0 = tensor.empty() : tensor<512xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] 7 | %1 = "ttir.max"(%arg0, %0) <{dim_arg = [1: i32], keep_dim = false, operand_constraints = [#any_device, #any_device]}> : (tensor<512x32xbf16>, tensor<512xbf16>) -> tensor<512xbf16> 8 | return %1 : tensor<512xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/Target/Common/Target.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TARGET_COMMON_TARGET_H 6 | #define TTMLIR_TARGET_COMMON_TARGET_H 7 | 8 | #pragma clang diagnostic push 9 | #pragma clang diagnostic ignored "-Wcovered-switch-default" 10 | 11 | #include "ttmlir/Target/Common/debug_info_generated.h" 12 | #include "ttmlir/Target/Common/system_desc_generated.h" 13 | #include "ttmlir/Target/Common/types_generated.h" 14 | #include "ttmlir/Target/Common/version_generated.h" 15 | 16 | #pragma clang diagnostic pop 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/layout/to_memory_config.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TO_MEMORY_CONFIG_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_LAYOUT_TO_MEMORY_CONFIG_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::layout { 12 | void run(const ::tt::target::ttnn::ToMemoryConfigOp *op, 13 | ProgramContext &context); 14 | } // namespace tt::runtime::ttnn::operations::layout 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_mean.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module { 4 | func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { 5 | %0 = tensor.empty() : tensor<512x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] 7 | %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> 8 | return %1 : tensor<512x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/multiply_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_multiply attributes {} { 5 | func.func public @test_multiply(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.multiply %arg0, %arg1 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.multiply"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/subtract_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_subtract attributes {} { 5 | func.func public @test_subtract(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.subtract %arg0, %arg1 : tensor<13x21x3xf32> 7 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttir.subtract"[[C:.*]] 9 | return %0 : tensor<13x21x3xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_sum.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { 5 | %0 = tensor.empty() : tensor<512x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] 7 | %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> 8 | return %1 : tensor<512x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/ArithToStableHLO/constant_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_constant attributes {} { 4 | func.func public @test_splat() -> tensor<64xf32> { 5 | %0 = arith.constant dense<0.3> : tensor<64xf32> 6 | // CHECK: %[[C:.*]] = "ttir.constant"[[C:.*]] 7 | return %0 : tensor<64xf32> 8 | } 9 | 10 | func.func public @test_multiple() -> tensor<2x2xf32> { 11 | %0 = arith.constant dense<[[0.0, 1.0], [2.0, 3.0]]> : tensor<2x2xf32> 12 | // CHECK: %[[C:.*]] = "ttir.constant"[[C:.*]] 13 | return %0 : tensor<2x2xf32> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Translate/TTNN/1d_tensor.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | ttmlir-translate --ttnn-to-flatbuffer 2 | #any_device = #tt.operand_constraint 3 | 4 | func.func @embedding_1d_tensor(%arg0: tensor<32xf32>, %arg1: tensor<512x128xf32>) -> tensor<32x128xf32> { 5 | %0 = tensor.empty() : tensor<32x128xf32> 6 | %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32xf32>, tensor<512x128xf32>, tensor<32x128xf32>) -> tensor<32x128xf32> 7 | return %1 : tensor<32x128xf32> 8 | } 9 | -------------------------------------------------------------------------------- /lib/Dialect/TTIR/Transforms/Transforms.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Dialect/TT/IR/TT.h" 6 | #include "ttmlir/Dialect/TTIR/Transforms/Passes.h" 7 | 8 | #include "mlir/Dialect/Func/IR/FuncOps.h" 9 | #include "mlir/Dialect/Tensor/IR/Tensor.h" 10 | #include "mlir/Rewrite/FrozenRewritePatternSet.h" 11 | #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 12 | 13 | namespace mlir::tt::ttir { 14 | #define GEN_PASS_DEF_TTIRSLIDINGWINDOW2DFIXSHAPES 15 | #include "ttmlir/Dialect/TTIR/Transforms/Passes.h.inc" 16 | 17 | } // namespace mlir::tt::ttir 18 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/simple_nop.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-load-system-desc="path=%system_desc_path%" --ttir-to-ttnn-backend-pipeline %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | module @jit_convert_element_type attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { 5 | func.func public @main(%arg0: tensor<2x2xf32> {mhlo.layout_mode = "default"}) -> (tensor<2x2xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { 6 | // CHECK: return %arg0 : tensor<2x2xf32, #ttnn_layout> 7 | return %arg0 : tensor<2x2xf32> 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/matmul/matmul.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_MATMUL_MATMUL_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_MATMUL_MATMUL_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::matmul { 12 | void run(const ::tt::target::ttnn::MatmulOp *op, ProgramContext &context); 13 | void run(const ::tt::target::ttnn::LinearOp *op, ProgramContext &context); 14 | } // namespace tt::runtime::ttnn::operations::matmul 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_squeeze.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<1x2x1x32x32xbf16>) -> tensor<1x2x32x32xbf16> { 5 | %0 = tensor.empty() : tensor<1x2x32x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] 7 | %1 = "ttir.squeeze"(%arg0, %0) <{dim = -3 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<1x2x1x32x32xbf16>, tensor<1x2x32x32xbf16>) -> tensor<1x2x32x32xbf16> 8 | return %1 : tensor<1x2x32x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /docs/src/flatbuffers.md: -------------------------------------------------------------------------------- 1 | # Flatbuffers 2 | 3 | Flatbuffers are the binary serialization format used by TTMLIR and they 4 | currently come in a few flavors (designated by the file extension): 5 | 6 | - `.ttsys`: A system description file that is the mechanism for supplying target 7 | information to the compiler. These can be collected on a target machine and 8 | downloaded to a development machine to enable cross-compilation. 9 | - `.ttnn`: A compiled binary file intended to be loaded and executed by the 10 | TTNN backend runtime. 11 | - `.ttb`: A compiled binary file intended to be loaded and executed by the 12 | TTMetal backend runtime (Unsupported). 13 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> { 5 | %0 = tensor.empty() : tensor<32x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] 7 | %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> 8 | return %1 : tensor<32x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/ArithToStableHLO/ArithToStableHLO.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_ARITHTOSTABLEHLO_ARITHTOSTABLEHLO_H 6 | #define TTMLIR_CONVERSION_ARITHTOSTABLEHLO_ARITHTOSTABLEHLO_H 7 | 8 | #include "mlir/Pass/Pass.h" 9 | #include "mlir/Transforms/DialectConversion.h" 10 | 11 | namespace mlir::tt { 12 | 13 | #ifdef TTMLIR_ENABLE_STABLEHLO 14 | std::unique_ptr> createConvertArithToStableHLOPass(); 15 | #endif 16 | 17 | } // namespace mlir::tt 18 | 19 | #endif // TTMLIR_CONVERSION_ARITHTOSTABLEHLO_ARITHTOSTABLEHLO_H 20 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(operations) 2 | add_library(TTRuntimeTTNN 3 | STATIC 4 | runtime.cpp 5 | program.cpp 6 | ) 7 | # We have to set the C++ standard to 20 because tt-metal requires it 8 | set_property(TARGET TTRuntimeTTNN PROPERTY CXX_STANDARD 20) 9 | target_include_directories(TTRuntimeTTNN PUBLIC 10 | ${PROJECT_SOURCE_DIR}/runtime/include 11 | ${PROJECT_BINARY_DIR}/include/ttmlir/Target/Common 12 | ) 13 | target_include_directories(TTRuntimeTTNN SYSTEM PUBLIC "$") 14 | target_link_libraries(TTRuntimeTTNN PUBLIC TTRuntimeTTNNOps) 15 | add_dependencies(TTRuntimeTTNN TTRuntimeTTNNOps) 16 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_get_dimension_size.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<13x21x3xf32>) -> tensor<1xi32> { 5 | %0 = "ttir.get_dimension_size"(%arg0) <{dimension = 1 : i32}> : (tensor<13x21x3xf32>) -> tensor<1xi32> 6 | // CHECK: [[VAL:%[0-9]+]] = "ttnn.full"(%{{[0-9]+}}) <{fillValue = 2.100000e+01 : f32}> : (!tt.device<#device>) -> tensor<1xi32, {{.*}}> 7 | return %0 : tensor<1xi32> 8 | // CHECK: return [[VAL]] : tensor<1xi32, {{.*}}> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_unsqueeze.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<4x1x2x32x32xbf16> { 5 | %0 = tensor.empty() : tensor<4x1x2x32x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] 7 | %1 = "ttir.unsqueeze"(%arg0, %0) <{dim = -4 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<4x1x2x32x32xbf16>) -> tensor<4x1x2x32x32xbf16> 8 | return %1 : tensor<4x1x2x32x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xbf16>) -> tensor<128x64xbf16> { 5 | %0 = tensor.empty() : tensor<128x64xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] 7 | %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> 8 | return %1 : tensor<128x64xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTNN/Target.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TARGET_TTNN_TARGET_H 6 | #define TTMLIR_TARGET_TTNN_TARGET_H 7 | 8 | #pragma clang diagnostic push 9 | #pragma clang diagnostic ignored "-Wcovered-switch-default" 10 | 11 | #include "ttmlir/Target/Common/debug_info_generated.h" 12 | #include "ttmlir/Target/Common/system_desc_generated.h" 13 | #include "ttmlir/Target/Common/types_generated.h" 14 | #include "ttmlir/Target/Common/version_generated.h" 15 | #include "ttmlir/Target/TTNN/binary_generated.h" 16 | 17 | #pragma clang diagnostic pop 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /lib/Conversion/StableHLOToTTIR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${TTMLIR_TOOLCHAIN_DIR}/src/stablehlo) 2 | include_directories(${TTMLIR_TOOLCHAIN_DIR}/src/stablehlo-build) 3 | include_directories(${TTMLIR_SOURCE_DIR}/include) 4 | include_directories(${PROJECT_SOURCE_DIR}/include) 5 | 6 | add_mlir_library(TTMLIRStableHLOToTTIR 7 | StableHLOToTTIRPatterns.cpp 8 | StableHLOToTTIRPass.cpp 9 | ArithToStableHLOPass.cpp 10 | 11 | ADDITIONAL_HEADER_DIRS 12 | ${PROJECT_SOURCE_DIR}/include/ttmlir/Conversion/StableHLOToTTIR 13 | 14 | DEPENDS 15 | TTMLIRConversionPassIncGen 16 | 17 | LINK_LIBS PUBLIC 18 | MLIRIR 19 | MLIRPass 20 | StablehloPasses 21 | ) 22 | -------------------------------------------------------------------------------- /lib/CAPI/TTKernelTypes.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir-c/TTKernelTypes.h" 6 | #include "mlir/CAPI/IR.h" 7 | #include "mlir/CAPI/Support.h" 8 | 9 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsTypes.h" 10 | 11 | using namespace mlir::tt::ttkernel; 12 | 13 | MlirType ttmlirTTKernelCBTypeGet(MlirContext ctx, uint64_t address, 14 | uint64_t port, MlirType memrefType) { 15 | return wrap(CBType::get(unwrap(ctx), symbolizeCBPort(port).value(), address, 16 | mlir::cast(unwrap(memrefType)))); 17 | } 18 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/dynamic_iota_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_dnamic_iota attributes {} { 5 | func.func public @test_dynamic_iota() -> tensor<1x32x128x128xf32> { 6 | // CHECK: %[[C:.*]] = "ttir.arange"[[C:.*]] 7 | %output_shape = stablehlo.constant dense<[1, 32, 128, 128]> : tensor<4xi64> 8 | %0 = "stablehlo.dynamic_iota"(%output_shape) {iota_dimension = 1: i64} : (tensor<4xi64>) -> tensor<1x32x128x128xf32> 9 | return %0 : tensor<1x32x128x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x16xbf16>) -> tensor<16x64xbf16> { 5 | %0 = tensor.empty() : tensor<16x64xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] 7 | %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 1 : si32, dim1 = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x16xbf16>, tensor<16x64xbf16>) -> tensor<16x64xbf16> 8 | return %1 : tensor<16x64xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> { 5 | %0 = tensor.empty() : tensor<32x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] 7 | %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -1 : si32, dim1 = -2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> 8 | return %1 : tensor<32x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_reshape.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { 5 | %0 = tensor.empty() : tensor<2x4x32x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] 7 | %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> 8 | return %1 : tensor<2x4x32x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tools/ttmlir-opt/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) 2 | get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) 3 | get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS) 4 | 5 | set(LIBS ${dialect_libs} ${conversion_libs} ${extension_libs} MLIROptLib MLIRTargetCpp TTMLIRStatic) 6 | 7 | add_llvm_executable(ttmlir-opt ttmlir-opt.cpp DISABLE_LLVM_LINK_LLVM_DYLIB) 8 | 9 | llvm_update_compile_flags(ttmlir-opt) 10 | target_link_libraries(ttmlir-opt PRIVATE ${LIBS}) 11 | 12 | mlir_check_all_link_libraries(ttmlir-opt) 13 | 14 | install(TARGETS ttmlir-opt DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT Test EXCLUDE_FROM_ALL) 15 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTIR/Decomposition/arange_decomposition.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttir-decomposition %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttir.arange"[[C:.*]] 6 | // CHECK: %[[C:.*]] = "ttir.transpose"[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.broadcast"[[C:.*]] 8 | %1 = "ttir.arange"() <{start = 0: si64, end = 32: si64, step = 1: si64, arange_dimension = 1: i64}> : () -> tensor<1x32x128x128xf32> 9 | return %1 : tensor<1x32x128x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTIR/test_generic.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-generic %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | %0 = tensor.empty() : tensor<64x128xf32> 6 | // CHECK: %[[C:.*]] = "ttir.generic"[[C:.*]] 7 | %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 8 | return %1 : tensor<64x128xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTIR/Transforms/Passes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTIR_TRANSFORMS_PASSES_H 6 | #define TTMLIR_DIALECT_TTIR_TRANSFORMS_PASSES_H 7 | 8 | #include "ttmlir/Dialect/TTIR/IR/TTIR.h" 9 | #include "ttmlir/Dialect/TTIR/IR/TTIROps.h" 10 | 11 | #include "mlir/IR/BuiltinOps.h" 12 | #include "mlir/Pass/Pass.h" 13 | 14 | namespace mlir::tt::ttir { 15 | #define GEN_PASS_DECL 16 | #include "ttmlir/Dialect/TTIR/Transforms/Passes.h.inc" 17 | 18 | #define GEN_PASS_REGISTRATION 19 | #include "ttmlir/Dialect/TTIR/Transforms/Passes.h.inc" 20 | } // namespace mlir::tt::ttir 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/ccl/all_gather_negative.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s 2 | // CHECK: error: 'ttir.all_gather' op Invalid dimension for all gather op 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @forward(%arg0: tensor<1x1x32x32xbf16>) -> tensor<1x1x32x128xbf16> { 6 | %0 = tensor.empty() : tensor<1x1x32x128xbf16> 7 | %1 = "ttir.all_gather"(%arg0, %0) <{dim = 4 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x32x32xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> 8 | return %1 : tensor<1x1x32x128xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir-c/TTKernelTypes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_C_TTKERNELTYPES_H 6 | #define TTMLIR_C_TTKERNELTYPES_H 7 | 8 | #include "ttmlir-c/Dialects.h" 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | MLIR_CAPI_EXPORTED MlirType ttmlirTTKernelCBTypeGet(MlirContext ctx, 15 | uint64_t address, 16 | uint64_t port, 17 | MlirType memrefType); 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | 23 | #endif // TTMLIR_C_TTKERNELTYPES_H 24 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTIR_IR_TTIROPSINTERFACES_H 6 | #define TTMLIR_DIALECT_TTIR_IR_TTIROPSINTERFACES_H 7 | 8 | #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" 9 | #include "ttmlir/Dialect/TTIR/IR/TTIR.h" 10 | 11 | namespace mlir { 12 | namespace tt { 13 | namespace ttir { 14 | namespace detail { 15 | mlir::LogicalResult verifyBroadcastable(mlir::Operation *op); 16 | } // namespace detail 17 | } // namespace ttir 18 | } // namespace tt 19 | } // namespace mlir 20 | 21 | #include "ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.h.inc" 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /test/lit.site.cfg.py.in: -------------------------------------------------------------------------------- 1 | @LIT_SITE_CFG_IN_HEADER@ 2 | 3 | config.llvm_tools_dir = lit_config.substitute("@LLVM_TOOLS_DIR@") 4 | config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@" 5 | config.mlir_obj_dir = "@MLIR_BINARY_DIR@" 6 | config.enable_bindings_python = @MLIR_ENABLE_BINDINGS_PYTHON@ and "@TTMLIR_ENABLE_BINDINGS_PYTHON@" == "ON" 7 | config.ttmlir_obj_root = "@TTMLIR_BINARY_DIR@" 8 | config.llvm_shlib_ext = "@SHLIBEXT@" 9 | config.enable_stablehlo = "@TTMLIR_ENABLE_STABLEHLO@" and "@TTMLIR_ENABLE_STABLEHLO@" == "ON" 10 | 11 | import lit.llvm 12 | lit.llvm.initialize(lit_config, config) 13 | 14 | # Let the main config do the real work. 15 | lit_config.load_config(config, "@TTMLIR_SOURCE_DIR@/test/lit.cfg.py") 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/dot_general/dot_general_3d.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module { 4 | func.func @main(%arg0: tensor<8x1x920xbf16>, %arg1: tensor<8x100x32xbf16>, %arg2: tensor<8x32x920xbf16>) -> tensor<8x100x920xbf16> { 5 | %0 = stablehlo.broadcast_in_dim %arg2, dims = [0, 1, 2] : (tensor<8x32x920xbf16>) -> tensor<8x32x920xbf16> 6 | // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] 7 | %1 = stablehlo.dot_general %arg1, %0, batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<8x100x32xbf16>, tensor<8x32x920xbf16>) -> tensor<8x100x920xbf16> 8 | return %1 : tensor<8x100x920xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.abs"[[C:.*]] 8 | %1 = "ttir.abs"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]] 8 | %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] 8 | %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]] 8 | %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /tools/scripts/filter-compile-commands.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | import sys 6 | import json 7 | import re 8 | 9 | assert len(sys.argv) == 3, "Usage: {} ".format( 10 | sys.argv[0] 11 | ) 12 | 13 | with open(sys.argv[1], "r") as f: 14 | compile_commands = json.load(f) 15 | 16 | filtered_commands = [] 17 | m = re.compile(r"^{}/((?!third_party).)*$".format(sys.argv[2])) 18 | for command in compile_commands: 19 | if m.match(command["file"]): 20 | filtered_commands.append(command) 21 | 22 | with open(sys.argv[1], "w") as f: 23 | json.dump(filtered_commands, f, indent=2) 24 | -------------------------------------------------------------------------------- /cmake/modules/FindMLIR.cmake: -------------------------------------------------------------------------------- 1 | # Find MLIR and LLVM 2 | find_package(MLIR REQUIRED CONFIG) 3 | message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}") 4 | message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") 5 | set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin) 6 | set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib) 7 | set(MLIR_BINARY_DIR ${CMAKE_BINARY_DIR}) 8 | list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}") 9 | list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}") 10 | include(TableGen) 11 | include(AddLLVM) 12 | include(AddMLIR) 13 | include(HandleLLVMOptions) 14 | 15 | if(MLIR_ENABLE_BINDINGS_PYTHON) 16 | include(MLIRDetectPythonEnv) 17 | mlir_configure_python_dev_packages() 18 | endif() 19 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/sine_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_sine attributes {} { 4 | func.func public @test_sine(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = stablehlo.sine %arg0 : tensor<13x21x3xf32> 6 | // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] 7 | // CHECK: [[VAL1:%[0-9]+]] = "ttir.sin"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] 8 | return %0 : tensor<13x21x3xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.cbrt"[[C:.*]] 8 | %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]] 8 | %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] 8 | %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] 8 | %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/TTIRToTTNN/TTIRToTTNN.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_TTIRTOTTNN_TTIRTOTTNN_H 6 | #define TTMLIR_CONVERSION_TTIRTOTTNN_TTIRTOTTNN_H 7 | 8 | #include "mlir/Pass/Pass.h" 9 | #include "mlir/Transforms/DialectConversion.h" 10 | 11 | namespace mlir::tt { 12 | 13 | void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns, 14 | TypeConverter &typeConverter); 15 | 16 | std::unique_ptr> createConvertTTIRToTTNNPass(); 17 | 18 | } // namespace mlir::tt 19 | 20 | #endif // TTMLIR_CONVERSION_TTIRTOTTNN_TTIRTOTTNN_H 21 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/minimum_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_minimum attributes {} { 4 | func.func public @test_minimum(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | // CHECK: %[[C:.*]] = tensor.empty() 6 | // CHECK-SAME: [[TENSOR:tensor<13x21x3xf32>]] 7 | // CHECK: %[[C:.*]] = "ttir.minimum" 8 | // CHECK-SAME: [[TENSOR]] 9 | // CHECK-SAME: [[TENSOR]] 10 | // CHECK-SAME: [[TENSOR]] 11 | // CHECK-SAME: -> [[TENSOR]] 12 | %0 = stablehlo.minimum %arg0, %arg1 : tensor<13x21x3xf32> 13 | return %0 : tensor<13x21x3xf32> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/slice_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_subtract attributes {} { 5 | func.func @slice_op(%arg0: tensor<32x64xf32>) -> tensor<8x8xf32> { 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] 8 | %result = "stablehlo.slice"(%arg0) { 9 | start_indices = array, 10 | limit_indices = array, 11 | strides = array 12 | } : (tensor<32x64xf32>) -> tensor<8x8xf32> 13 | return %result : tensor<8x8xf32> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/ceil_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_ceil attributes {} { 4 | func.func public @test_ceil(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = stablehlo.ceil %arg0 : tensor<13x21x3xf32> 6 | // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] 7 | // CHECK: [[VAL1:%[0-9]+]] = "ttir.ceil"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] 8 | return %0 : tensor<13x21x3xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/ccl/all_gather.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<1x1x32x32xbf16>) -> tensor<1x1x32x128xbf16> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<1x1x32x128xbf16> 7 | // CHECK: %[[C:.*]] = "ttnn.all_gather"[[C:.*]] 8 | %1 = "ttir.all_gather"(%arg0, %0) <{dim = 3 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x32x32xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> 9 | return %1 : tensor<1x1x32x128xbf16> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/remove_empty_op.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { 5 | // CHECK-NOT: "ttnn.empty" 6 | %0 = tensor.empty() : tensor<2x4x32x32xbf16> 7 | // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] 8 | %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> 9 | return %1 : tensor<2x4x32x32xbf16> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_slice.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<2x16x16xbf16> { 5 | %0 = tensor.empty() : tensor<2x16x16xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] 7 | %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> 8 | return %1 : tensor<2x16x16xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/eltwise/unary/unary.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_ELTWISE_UNARY_UNARY_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_ELTWISE_UNARY_UNARY_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::unary { 12 | 13 | inline bool isUnaryOp(const ::tt::target::ttnn::EltwiseOp *op) { 14 | return op->ins()->size() == 1; 15 | } 16 | 17 | void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context); 18 | 19 | } // namespace tt::runtime::ttnn::operations::unary 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/unary/cosine_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_cosine attributes {} { 4 | func.func public @test_cosine(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = stablehlo.cosine %arg0 : tensor<13x21x3xf32> 6 | // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] 7 | // CHECK: [[VAL1:%[0-9]+]] = "ttir.cos"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] 8 | return %0 : tensor<13x21x3xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s 2 | // CHECK: error: 'ttir.concat' op Invalid dimension 2 for concatenation. 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { 6 | %0 = tensor.empty() : tensor<32x96xf32> 7 | %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 2 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> 8 | return %1 : tensor<32x96xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] 8 | %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/emitc/simple_add.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 3 | 4 | #any_device = #tt.operand_constraint 5 | 6 | func.func @add(%arg0: tensor<32x32xbf16>, %arg1: tensor<32x32xbf16>) -> tensor<32x32xbf16> { 7 | %0 = tensor.empty() : tensor<32x32xbf16> 8 | %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> 9 | return %1 : tensor<32x32xbf16> 10 | } 11 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/TTNNToEmitC/TTNNToEmitC.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_TTNNTOEMITC_TTNNTOEMITC_H 6 | #define TTMLIR_CONVERSION_TTNNTOEMITC_TTNNTOEMITC_H 7 | 8 | #include "mlir/Pass/Pass.h" 9 | #include "mlir/Transforms/DialectConversion.h" 10 | 11 | namespace mlir::tt { 12 | 13 | void populateTTNNToEmitCPatterns(MLIRContext *ctx, RewritePatternSet &patterns, 14 | TypeConverter &typeConverter); 15 | 16 | std::unique_ptr> createConvertTTNNToEmitCPass(); 17 | 18 | } // namespace mlir::tt 19 | 20 | #endif // TTMLIR_CONVERSION_TTNNTOEMITC_TTNNTOEMITC_H 21 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/eltwise/binary/binary.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_ELTWISE_BINARY_BINARY_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_ELTWISE_BINARY_BINARY_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::binary { 12 | 13 | inline bool isBinaryOp(const ::tt::target::ttnn::EltwiseOp *op) { 14 | return op->ins()->size() == 2; 15 | } 16 | 17 | void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context); 18 | 19 | } // namespace tt::runtime::ttnn::operations::binary 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/test_remove_dead_values.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline="enable-remove-dead-values=true" %s | FileCheck %s 3 | module attributes {} { 4 | func.func public @test_reduce_add_opt(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { 5 | %0 = stablehlo.reduce(%arg0 init: %cst_0) applies stablehlo.add across dimensions = [1] : (tensor<128x10xf32>, tensor) -> tensor<128xf32> 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.sum"[[C:.*]] 8 | %1 = tensor.empty() : tensor<64x96xbf16> 9 | // CHECK-NOT: %[[C:.*]] = tensor.empty[[C:.*]] 10 | return %0 : tensor<128xf32> 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s 2 | // CHECK: error: 'ttir.concat' op Invalid dimension -3 for concatenation. 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { 6 | %0 = tensor.empty() : tensor<32x96xf32> 7 | %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = -3 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> 8 | return %1 : tensor<32x96xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] 8 | %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTMetal/Target.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TARGET_TTMETAL_TARGET_H 6 | #define TTMLIR_TARGET_TTMETAL_TARGET_H 7 | 8 | #pragma clang diagnostic push 9 | #pragma clang diagnostic ignored "-Wcovered-switch-default" 10 | 11 | #include "ttmlir/Target/Common/system_desc_generated.h" 12 | #include "ttmlir/Target/Common/types_generated.h" 13 | #include "ttmlir/Target/Common/version_generated.h" 14 | #include "ttmlir/Target/TTMetal/binary_generated.h" 15 | #include "ttmlir/Target/TTMetal/command_generated.h" 16 | #include "ttmlir/Target/TTMetal/program_generated.h" 17 | 18 | #pragma clang diagnostic pop 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/eltwise/ternary/ternary.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef RUNTIME_LIB_TTNN_OPERATIONS_ELTWISE_TERNARY_TERNARY_H 6 | #define RUNTIME_LIB_TTNN_OPERATIONS_ELTWISE_TERNARY_TERNARY_H 7 | 8 | #include "tt/runtime/ttnn/types.h" 9 | #include "ttmlir/Target/TTNN/program_generated.h" 10 | 11 | namespace tt::runtime::ttnn::operations::ternary { 12 | 13 | inline bool isTernaryOp(const ::tt::target::ttnn::EltwiseOp *op) { 14 | return op->ins()->size() == 3; 15 | } 16 | 17 | void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context); 18 | 19 | } // namespace tt::runtime::ttnn::operations::ternary 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<32x96xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] 8 | %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> 9 | return %1 : tensor<32x96xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/embedding/embedding_1d_tensor.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x128xbf16> { 5 | %0 = tensor.empty() : tensor<32x128xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] 7 | %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32xbf16>, tensor<512x128xbf16>, tensor<32x128xbf16>) -> tensor<32x128xbf16> 8 | return %1 : tensor<32x128xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tools/explorer/README.md: -------------------------------------------------------------------------------- 1 | # TT-explorer 2 | 3 | TT-explorer is a tool for MLIR graph visualization and applying optimizer overrides in order to easily experiment with model performance. 4 | 5 | TODO: add documentation from old tt-explorer repo 6 | 7 | ## Build 8 | ```bash 9 | source env/activate 10 | cmake --build build -- explorer 11 | ``` 12 | 13 | ## Usage 14 | Start the server with: 15 | ```bash 16 | tt-explorer 17 | ``` 18 | 19 | Then open http://localhost:8080 in the browser. 20 | 21 | #### Port Forwarding 22 | P.S. 23 | If using a remote machine make sure to forward the 8080 port. E.g: 24 | ```bash 25 | ssh -L 8080:localhost:8080 user@remote-machine 26 | ``` 27 | Or set the "Tt › Ird › Reservation: Ports" setting in vscode-ird. 28 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/IR/TTNNOpsTypes.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.h" 6 | 7 | #include "mlir/IR/Builders.h" 8 | #include "mlir/IR/DialectImplementation.h" 9 | #include "ttmlir/Dialect/TTNN/IR/TTNN.h" 10 | #include "llvm/ADT/StringExtras.h" 11 | #include "llvm/ADT/TypeSwitch.h" 12 | 13 | using namespace mlir::tt::ttnn; 14 | 15 | #define GET_TYPEDEF_CLASSES 16 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.cpp.inc" 17 | 18 | void TTNNDialect::registerTypes() { 19 | // NOLINTNEXTLINE 20 | addTypes< 21 | #define GET_TYPEDEF_LIST 22 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.cpp.inc" 23 | >(); 24 | } 25 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<32x96xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] 8 | %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = -1 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> 9 | return %1 : tensor<32x96xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/softmax/softmax_negative_1.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn %s 2>&1 | FileCheck %s 2 | // CHECK: error: 'ttir.softmax' op Dimension attribute must be within the bounds of the input tensor 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { 6 | %0 = tensor.empty() : tensor<512x1024xbf16> 7 | %1 = "ttir.softmax"(%arg0, %0) <{dimension = 2 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> 8 | return %1 : tensor<512x1024xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/softmax/softmax_negative_2.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn %s 2>&1 | FileCheck %s 2 | // CHECK: error: 'ttir.softmax' op Dimension attribute must be within the bounds of the input tensor 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { 6 | %0 = tensor.empty() : tensor<512x1024xbf16> 7 | %1 = "ttir.softmax"(%arg0, %0) <{dimension = -3 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> 8 | return %1 : tensor<512x1024xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 22.3.0 4 | hooks: 5 | - id: black 6 | language_version: python3 7 | - repo: https://github.com/pre-commit/mirrors-clang-format 8 | rev: v18.1.7 9 | hooks: 10 | - id: clang-format 11 | types_or: [c++, c] 12 | args: [-style=file, -i] 13 | - repo: https://github.com/espressif/check-copyright/ 14 | rev: v1.0.3 15 | hooks: 16 | - id: check-copyright 17 | args: ['--config', '.github/check-spdx.yaml'] 18 | - repo: https://github.com/pre-commit/pre-commit-hooks 19 | rev: v3.4.0 20 | hooks: 21 | - id: trailing-whitespace 22 | - id: end-of-file-fixer 23 | - id: check-added-large-files 24 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/TosaToTTIR/TosaToTTIR.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_TOSATOTTIR_TOSATOTTIR_H 6 | #define TTMLIR_CONVERSION_TOSATOTTIR_TOSATOTTIR_H 7 | 8 | #include "mlir/IR/BuiltinOps.h" 9 | #include "mlir/Pass/Pass.h" 10 | #include "mlir/Transforms/DialectConversion.h" 11 | 12 | namespace mlir::tt { 13 | 14 | void populateTosaToTTIRPatterns(MLIRContext *ctx, RewritePatternSet &patterns, 15 | TypeConverter &typeConverter); 16 | 17 | std::unique_ptr> createConvertTosaToTTIRPass(); 18 | 19 | } // namespace mlir::tt 20 | 21 | #endif // TTMLIR_CONVERSION_TOSATOTTIR_TOSATOTTIR_H 22 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/binary/remainder_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_remainder attributes {} { 5 | func.func public @test_remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { 6 | %0 = stablehlo.remainder %arg0, %arg1 : tensor<32x32xf32> 7 | // CHECK: %[[EMPTY:[0-9]+]] = tensor.empty() : tensor<32x32xf32> 8 | // CHECK: %[[REM:[0-9]+]] = "ttir.remainder"(%arg0, %arg1, %[[EMPTY]]){{.*}} -> tensor<32x32xf32> 9 | return %0 : tensor<32x32xf32> 10 | // CHECK: return %[[REM]] : tensor<32x32xf32> 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/cast/simple_cast.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { 5 | %0 = tensor.empty() : tensor<64x128xbf16> 6 | %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> 7 | // CHECK: %[[C:.*]] = "ttnn.typecast" 8 | // CHECK-SAME: tensor<64x128xf32, 9 | // CHECK-SAME: tensor<64x128xbf16, 10 | return %1 : tensor<64x128xbf16> 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTMetal/IR/TTMetalOpsTypes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTMETAL_IR_TTMETALOPSTYPES_H 6 | #define TTMLIR_DIALECT_TTMETAL_IR_TTMETALOPSTYPES_H 7 | 8 | #include "mlir/IR/BuiltinAttributes.h" 9 | #include "mlir/IR/BuiltinTypes.h" 10 | #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" 11 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsTypes.h" 12 | 13 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsEnums.h.inc" 14 | 15 | #define GET_TYPEDEF_CLASSES 16 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsTypes.h.inc" 17 | 18 | #define GET_ATTRDEF_CLASSES 19 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsAttrDefs.h.inc" 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTMetal/binary.fbs: -------------------------------------------------------------------------------- 1 | include "Common/types.fbs"; 2 | include "Common/version.fbs"; 3 | include "Common/debug_info.fbs"; 4 | include "command.fbs"; 5 | 6 | namespace tt.target.metal; 7 | 8 | table DeviceProgram { 9 | inputs: [TensorRef]; 10 | outputs: [TensorRef]; 11 | command_queues: [CommandQueue]; 12 | } 13 | 14 | table Program { 15 | name: string; 16 | inputs: [TensorRef]; 17 | outputs: [TensorRef]; 18 | device_programs: [DeviceProgram]; 19 | debug_info: DebugInfo; 20 | } 21 | 22 | table TTMetalBinary { 23 | version: Version; 24 | ttmlir_git_hash: string; 25 | system_desc: SystemDesc; 26 | programs: [Program]; 27 | } 28 | 29 | root_type TTMetalBinary; 30 | file_identifier "TTM0"; 31 | file_extension "ttm"; 32 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/embedding/simple_embedding.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<32x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x32x128xbf16> { 5 | %0 = tensor.empty() : tensor<32x32x128xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] 7 | %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> 8 | return %1 : tensor<32x32x128xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_div.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] 8 | %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /docs/src/tools.md: -------------------------------------------------------------------------------- 1 | # Tools 2 | 3 | Currently, there are a few primary tools that are part of the `ttmlir` project: 4 | 5 | - `ttmlir-opt`: The `ttmlir` optimizer driver. This tool is used to run the `ttmlir` compiler passes on a `.mlir` source files and is central to developing and testing the compiler. 6 | - `ttrt`: This tool is intended to be a swiss army knife for working with flatbuffers generated by the compiler. Its primary role is to inspect and run flatbuffer files. 7 | - [`tt-explorer`](https://github.com/vprajapati-tt/tt-explorer): Visualizer tool for `ttmlir`-powered compiler results. Visualizes from emitted `.mlir` files to display compiled model, attributes, performance results, and provide a platform for human-driven overrides to _gameify_ model tuning. 8 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTIR/IR/TTIROpsEnums.td: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TTIR_ENUMS_TD 6 | #define TTMLIR_TTIR_ENUMS_TD 7 | 8 | include "mlir/IR/EnumAttr.td" 9 | 10 | def TTIR_PoolingMethodAverage : I32EnumAttrCase<"Average", 0>; 11 | def TTIR_PoolingMethodMax : I32EnumAttrCase<"Max", 1>; 12 | 13 | def TTIR_PoolingMethod : I32EnumAttr<"PoolingMethod", "TTIR PoolingMethod", [ 14 | TTIR_PoolingMethodAverage, 15 | TTIR_PoolingMethodMax 16 | ]> { 17 | let genSpecializedAttr = 0; 18 | let cppNamespace = "::mlir::tt::ttir"; 19 | } 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /runtime/lib/ttmetal/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(TTRuntimeTTMetal 2 | STATIC 3 | runtime.cpp 4 | command_queue.cpp 5 | ) 6 | # We have to set the C++ standard to 20 because tt-metal requires it 7 | set_property(TARGET TTRuntimeTTMetal PROPERTY CXX_STANDARD 20) 8 | target_include_directories(TTRuntimeTTMetal PUBLIC 9 | ${PROJECT_SOURCE_DIR}/runtime/include 10 | ${PROJECT_BINARY_DIR}/include/ttmlir/Target/Common 11 | ) 12 | target_include_directories(TTRuntimeTTMetal SYSTEM PUBLIC "$") 13 | target_link_libraries(TTRuntimeTTMetal PUBLIC TTMETAL_LIBRARY) 14 | add_dependencies(TTRuntimeTTMetal TTMETAL_LIBRARY tt-metal FBS_GENERATION) 15 | 16 | # Optionally compile profiling code and link tracy client for perf profiling. 17 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | 6 | func.func @max(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { 7 | %0 = tensor.empty() : tensor<1x1x512xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] 9 | %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> 10 | return %1 : tensor<1x1x512xbf16> 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | 6 | func.func @sum(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { 7 | %0 = tensor.empty() : tensor<1x1x512xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] 9 | %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> 10 | return %1 : tensor<1x1x512xbf16> 11 | } 12 | -------------------------------------------------------------------------------- /runtime/tools/python/test/conftest.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | import os 6 | import pytest 7 | 8 | from util import * 9 | 10 | 11 | @pytest.fixture(scope="session", autouse=True) 12 | def session_setup(): 13 | directory_name = "ttrt-results" 14 | if not os.path.exists(directory_name): 15 | try: 16 | os.mkdir(directory_name) 17 | except Exception as e: 18 | print(f"An error occurred while creating the directory: {e}") 19 | 20 | yield 21 | 22 | 23 | def pytest_runtest_teardown(item, nextitem): 24 | assert ( 25 | check_results(f"ttrt-results/{item.name}.json") == 0 26 | ), f"one of more tests failed in={item.name}" 27 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_maximum.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] 8 | %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TT/IR/TTOps.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TT_IR_TTOPS_H 6 | #define TTMLIR_DIALECT_TT_IR_TTOPS_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/BuiltinTypes.h" 10 | #include "mlir/IR/Dialect.h" 11 | #include "mlir/IR/OpDefinition.h" 12 | #include "mlir/Interfaces/ControlFlowInterfaces.h" 13 | #include "mlir/Interfaces/DestinationStyleOpInterface.h" 14 | #include "mlir/Interfaces/InferTypeOpInterface.h" 15 | #include "mlir/Interfaces/SideEffectInterfaces.h" 16 | 17 | #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" 18 | 19 | #define GET_OP_CLASSES 20 | #include "ttmlir/Dialect/TT/IR/TTOps.h.inc" 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /include/ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_TARGET_TTMETAL_TTMETALTOFLATBUFFER_H 6 | #define TTMLIR_TARGET_TTMETAL_TTMETALTOFLATBUFFER_H 7 | 8 | #include "mlir/IR/Operation.h" 9 | #include "mlir/Support/LogicalResult.h" 10 | #include "ttmlir/Target/Utils/MLIRToFlatbuffer.h" 11 | 12 | namespace mlir::tt::ttmetal { 13 | 14 | // Translates a TTMetal operation to a flatbuffer and writes it to the given 15 | // stream. 16 | LogicalResult translateTTMetalToFlatbuffer( 17 | Operation *op, llvm::raw_ostream &os, 18 | std::unordered_map goldenMap = {}); 19 | } // namespace mlir::tt::ttmetal 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_multiply.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] 8 | %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_subtract.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] 8 | %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /tools/ttmlir-opt/ttmlir-opt.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "mlir/IR/MLIRContext.h" 6 | #include "mlir/InitAllDialects.h" 7 | #include "mlir/InitAllPasses.h" 8 | #include "mlir/Support/FileUtilities.h" 9 | #include "mlir/Tools/mlir-opt/MlirOptMain.h" 10 | 11 | #include "ttmlir/RegisterAll.h" 12 | 13 | int main(int argc, char **argv) { 14 | mlir::registerAllPasses(); 15 | mlir::tt::registerAllPasses(); 16 | 17 | mlir::DialectRegistry registry; 18 | mlir::tt::registerAllDialects(registry); 19 | mlir::tt::registerAllExtensions(registry); 20 | 21 | return mlir::asMainReturnCode( 22 | mlir::MlirOptMain(argc, argv, "ttmlir optimizer driver\n", registry)); 23 | } 24 | -------------------------------------------------------------------------------- /lib/CAPI/Dialects.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir-c/Dialects.h" 6 | 7 | #include "mlir/CAPI/Registration.h" 8 | #include "ttmlir/Dialect/TT/IR/TT.h" 9 | #include "ttmlir/Dialect/TTIR/IR/TTIR.h" 10 | #include "ttmlir/Dialect/TTKernel/IR/TTKernel.h" 11 | #include "ttmlir/Dialect/TTNN/IR/TTNN.h" 12 | 13 | MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(TT, tt, mlir::tt::TTDialect) 14 | MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(TTIR, ttir, mlir::tt::ttir::TTIRDialect) 15 | MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(TTKernel, ttkernel, 16 | mlir::tt::ttkernel::TTKernelDialect) 17 | MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(TTNN, ttnn, mlir::tt::ttnn::TTNNDialect) 18 | -------------------------------------------------------------------------------- /runtime/lib/common/workarounds.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "tt/runtime/detail/workarounds.h" 6 | 7 | namespace tt::runtime::workaround { 8 | #if defined(TT_RUNTIME_WORKAROUNDS) && TT_RUNTIME_WORKAROUNDS == 1 9 | const Env &Env::get(bool ignoreTileShape, bool emptyOpForceRowMajor, 10 | bool fullOpForceRowMajor, bool maxpool2dPreshard, 11 | bool swapBinaryOperands) { 12 | static const Env config(ignoreTileShape, emptyOpForceRowMajor, 13 | fullOpForceRowMajor, maxpool2dPreshard, 14 | swapBinaryOperands); 15 | return config; 16 | } 17 | #endif 18 | } // namespace tt::runtime::workaround 19 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/isfinite_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_isfinite attributes {} { 5 | func.func public @test_isfinite(%arg0: tensor<32x32x3xf32>) -> tensor<32x32x3xi1> { 6 | // CHECK: %[[E:.*]] = tensor.empty() : tensor<32x32x3xbf16> 7 | // CHECK: %[[C:.*]] = "ttir.isfinite"(%arg0, %[[E]]) 8 | // CHECK-SAME: (tensor<32x32x3xf32>, tensor<32x32x3xbf16>) -> tensor<32x32x3xbf16> 9 | %0 = stablehlo.is_finite %arg0 : (tensor<32x32x3xf32>) -> tensor<32x32x3xi1> 10 | // CHECK: return %[[C]] : tensor<32x32x3xbf16> 11 | return %0 : tensor<32x32x3xi1> 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /include/ttmlir-c/TTTypes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_C_TTKERNELTYPES_H 6 | #define TTMLIR_C_TTKERNELTYPES_H 7 | 8 | #include "ttmlir-c/Dialects.h" 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | MLIR_CAPI_EXPORTED MlirType ttmlirTTTileTypeGet(MlirContext ctx, 15 | unsigned height, unsigned width, 16 | uint32_t dataType); 17 | 18 | MLIR_CAPI_EXPORTED MlirType ttmlirTTDeviceTypeGet(MlirContext ctx, 19 | MlirAttribute deviceAttr); 20 | 21 | #ifdef __cplusplus 22 | } 23 | #endif 24 | 25 | #endif // TTMLIR_C_TTKERNELTYPES_H 26 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/TTIRToTTMetal/TTIRToTTMetal.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_TTIRTOTTMETAL_TTIRTOTTMETAL_H 6 | #define TTMLIR_CONVERSION_TTIRTOTTMETAL_TTIRTOTTMETAL_H 7 | 8 | #include "mlir/Pass/Pass.h" 9 | #include "mlir/Transforms/DialectConversion.h" 10 | 11 | namespace mlir::tt { 12 | 13 | void populateTTIRToTTMetalPatterns(MLIRContext *ctx, 14 | RewritePatternSet &patterns, 15 | TypeConverter &typeConverter); 16 | 17 | std::unique_ptr> createConvertTTIRToTTMetalPass(); 18 | 19 | } // namespace mlir::tt 20 | 21 | #endif // TTMLIR_CONVERSION_TTIRTOTTMETAL_TTIRTOTTMETAL_H 22 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTKernel/IR/TTKernelOpsTypes.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTKERNEL_IR_TTKERNELOPSTYPES_H 6 | #define TTMLIR_DIALECT_TTKERNEL_IR_TTKERNELOPSTYPES_H 7 | 8 | #include "mlir/IR/BuiltinAttributes.h" 9 | #include "mlir/IR/BuiltinTypes.h" 10 | #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" 11 | 12 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsEnums.h.inc" 13 | 14 | #define GET_TYPEDEF_CLASSES 15 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsTypes.h.inc" 16 | 17 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelAttrInterfaces.h.inc" 18 | 19 | #define GET_ATTRDEF_CLASSES 20 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsAttrDefs.h.inc" 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /lib/Dialect/TTNN/Analysis/OpConfigAnalysis.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Dialect/TTNN/Analysis/OpConfigAnalysis.h" 6 | 7 | namespace mlir::tt::ttnn { 8 | 9 | bool OpConfigAnalysis::applyOverrides() { 10 | 11 | // Placeholder, no overrides for now. 12 | // 13 | return false; 14 | } 15 | 16 | void OpConfigAnalysis::analysisImplementation() { 17 | 18 | // Future entrypoint for picking optimal op config. 19 | // Placeholder: pick the first legal grid. 20 | // 21 | for (auto opGrids : analysisInput.legalGrids) { 22 | if (not opGrids.second.empty()) { 23 | analysisResult[opGrids.first] = opGrids.second[0]; 24 | } 25 | } 26 | } 27 | } // namespace mlir::tt::ttnn 28 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.minimum"[[C:.*]] 8 | %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | return %1 : tensor<64x128xf32> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/include/tt/runtime/ttnn/operations/eltwise/unary/utils.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | #include "utils.h" 5 | #include "tt/runtime/detail/logger.h" 6 | 7 | namespace tt::runtime::ttnn::operations::unary { 8 | 9 | void getEltwiseUnaryOpInputTensor(const ::tt::target::ttnn::EltwiseOp *op, 10 | ProgramTensorPool &tensorPool, 11 | ::ttnn::Tensor **in) { 12 | LOG_ASSERT(op->ins()->size() == 1, "Expected 1 input, got ", 13 | op->ins()->size()); 14 | *in = &(tensorPool.at(op->ins()->Get(0)->global_id())); 15 | DEBUG_ASSERT((*in)->is_allocated()); 16 | } 17 | 18 | } // namespace tt::runtime::ttnn::operations::unary 19 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/embedding/embedding_non_tile.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<1x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<1x32x128xbf16> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<1x32x128xbf16> 7 | // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] 8 | %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x32xbf16>, tensor<512x128xbf16>, tensor<1x32x128xbf16>) -> tensor<1x32x128xbf16> 9 | return %1 : tensor<1x32x128xbf16> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/simple_mean.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | // UNSUPPORTED: true 5 | #any_device = #tt.operand_constraint 6 | module { 7 | func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { 8 | %0 = tensor.empty() : tensor<512x32xbf16> 9 | // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] 10 | %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> 11 | return %1 : tensor<512x32xbf16> 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /lib/Conversion/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(TTMLIRConversions INTERFACE) 2 | 3 | add_subdirectory(TosaToTTIR) 4 | add_subdirectory(TTIRToTTIRDecomposition) 5 | add_subdirectory(TTNNToEmitC) 6 | add_subdirectory(TTIRToTTNN) 7 | add_subdirectory(TTIRToTTMetal) 8 | add_subdirectory(TTKernelToEmitC) 9 | 10 | if (TTMLIR_ENABLE_STABLEHLO) 11 | add_subdirectory(StableHLOToTTIR) 12 | endif() 13 | 14 | include_directories(${TTMLIR_SOURCE_DIR}/include) 15 | 16 | set(link_libs 17 | TTMLIRTosaToTTIR 18 | TTMLIRTTIRToTTIRDecomposition 19 | TTMLIRTTNNToEmitC 20 | TTMLIRTTIRToTTNN 21 | TTMLIRTTIRToTTMetal 22 | TTMLIRTTKernelToEmitC 23 | ) 24 | 25 | if (TTMLIR_ENABLE_STABLEHLO) 26 | list(APPEND link_libs TTMLIRStableHLOToTTIR) 27 | endif() 28 | 29 | target_link_libraries(TTMLIRConversions INTERFACE 30 | ${link_libs} 31 | ) 32 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: {{.*}} = "ttnn.empty"{{.*}} 8 | %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 9 | // CHECK: %[[C:.*]] = "ttnn.logical_not" 10 | // CHECK-SAME: tensor<64x128xf32, 11 | // CHECK-SAME: tensor<64x128xf32, 12 | return %1 : tensor<64x128xf32> 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/simple_index.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device_tile = #tt.operand_constraint 5 | module attributes {} { 6 | func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<4x32x16xbf16> { 7 | %0 = tensor.empty() : tensor<4x32x16xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] 9 | %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 0: i32, end = 32: i32, step = 2: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<4x32x16xbf16>) -> tensor<4x32x16xbf16> 10 | return %1 : tensor<4x32x16xbf16> 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts_negative.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn %s 2>&1 | FileCheck %s 2 | // CHECK: error: 'ttir.multiply' op Operands are not broadcast compatible 3 | #any_device = #tt.operand_constraint 4 | module attributes {} { 5 | func.func @bcast_one_dim(%arg0: tensor<2x64x128xf32>, %arg1: tensor<4x64x128xf32>) -> tensor<4x64x128xf32> { 6 | %0 = tensor.empty() : tensor<4x64x128xf32> 7 | %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2x64x128xf32>, tensor<4x64x128xf32>, tensor<4x64x128xf32>) -> tensor<4x64x128xf32> 8 | return %1 : tensor<4x64x128xf32> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/gelu/simple_gelu.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: "ttnn.empty" 6 | // CHECK-SAME: tensor<64x128xf32, 7 | %0 = tensor.empty() : tensor<64x128xf32> 8 | // CHECK: "ttnn.gelu" 9 | // CHECK-SAME: tensor<64x128xf32, 10 | // CHECK-SAME: tensor<64x128xf32, 11 | // CHECK-SAME: tensor<64x128xf32, 12 | %1 = "ttir.gelu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 13 | return %1 : tensor<64x128xf32> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/matmul/simple_matmul.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device_tile = #tt.operand_constraint 3 | // CHECK: #[[TILED_LAYOUT:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, bf16>, #dram>, interleaved> 4 | module attributes {} { 5 | func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>) -> tensor<64x96xbf16> { 6 | %0 = tensor.empty() : tensor<64x96xbf16> 7 | // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] 8 | %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> 9 | return %1 : tensor<64x96xbf16> 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_neg.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @negate(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { 8 | %0 = tensor.empty() : tensor<32x32xf32> 9 | // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] 10 | %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> 11 | return %1 : tensor<32x32xf32> 12 | } 13 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/include/tt/runtime/ttnn/operations/eltwise/binary/utils.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TT_RUNTIME_TTNN_OPERATIONS_ELTWISE_BINARY_UTILS_H 6 | #define TT_RUNTIME_TTNN_OPERATIONS_ELTWISE_BINARY_UTILS_H 7 | 8 | #include "tt/runtime/detail/ttnn.h" 9 | #include "tt/runtime/ttnn/types.h" 10 | #include "ttmlir/Target/TTNN/program_generated.h" 11 | 12 | namespace tt::runtime::ttnn::operations::binary { 13 | void getEltwiseBinaryOpInputTensors(const ::tt::target::ttnn::EltwiseOp *op, 14 | ProgramTensorPool &tensorPool, 15 | ::ttnn::Tensor **lhs, ::ttnn::Tensor **rhs); 16 | 17 | } // namespace tt::runtime::ttnn::operations::binary 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/composite_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | module @jit_eltwise_add attributes {} { 4 | func.func private @add_impl(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 5 | %0 = stablehlo.add %arg0, %arg1 : tensor<13x21x3xf32> 6 | return %0 : tensor<13x21x3xf32> 7 | } 8 | 9 | func.func public @main(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 10 | %results = stablehlo.composite "jit_eltwise_add.my_add" %arg0, %arg1 { 11 | decomposition = @add_impl 12 | } : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> 13 | // CHECK: %[[C:.*]] = call @add_impl 14 | return %results : tensor<13x21x3xf32> 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Iota/simple_device_iota_dim2.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | module attributes {} { 9 | func.func @forward(%arg0: tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> { 10 | // CHECK: ttnn.arange 11 | %0 = "stablehlo.iota"() {iota_dimension = 2: i64} : () -> tensor<1x1x32x128xbf16> 12 | %2 = "stablehlo.multiply"(%arg0, %0) : (tensor<1x1x32x128xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> 13 | return %2 : tensor<1x1x32x128xbf16> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Iota/simple_device_iota_dim3.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | module attributes {} { 9 | func.func @forward(%arg0: tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> { 10 | // CHECK: ttnn.arange 11 | %0 = "stablehlo.iota"() {iota_dimension = 3: i64} : () -> tensor<1x1x32x128xbf16> 12 | %2 = "stablehlo.multiply"(%arg0, %0) : (tensor<1x1x32x128xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> 13 | return %2 : tensor<1x1x32x128xbf16> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /tools/ttmlir-translate/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) 2 | get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) 3 | get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS) 4 | get_property(translation_libs GLOBAL PROPERTY MLIR_TRANSLATION_LIBS) 5 | 6 | add_llvm_executable(ttmlir-translate ttmlir-translate.cpp DISABLE_LLVM_LINK_LLVM_DYLIB) 7 | 8 | llvm_update_compile_flags(ttmlir-translate) 9 | target_link_libraries(ttmlir-translate PRIVATE 10 | ${dialect_libs} 11 | ${conversion_libs} 12 | ${translation_libs} 13 | ${extension_libs} 14 | MLIRIR 15 | MLIRSupport 16 | MLIRTranslateLib 17 | ) 18 | 19 | mlir_check_link_libraries(ttmlir-translate) 20 | 21 | install(TARGETS ttmlir-translate DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT Test EXCLUDE_FROM_ALL) 22 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/include/tt/runtime/ttnn/operations/eltwise/unary/utils.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TT_RUNTIME_TTNN_OPERATIONS_ELTWISE_UNARY_UTILS_H 6 | #define TT_RUNTIME_TTNN_OPERATIONS_ELTWISE_UNARY_UTILS_H 7 | 8 | #include "tt/runtime/detail/logger.h" 9 | #include "tt/runtime/detail/ttnn.h" 10 | #include "tt/runtime/ttnn/types.h" 11 | #include "ttmlir/Target/TTNN/program_generated.h" 12 | 13 | namespace tt::runtime::ttnn::operations::unary { 14 | void getEltwiseUnaryOpInputTensor(const ::tt::target::ttnn::EltwiseOp *op, 15 | ProgramTensorPool &tensorPool, 16 | ::ttnn::Tensor **in); 17 | 18 | } // namespace tt::runtime::ttnn::operations::unary 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTIR/constant_as_fill.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-constant-as-fill %s | FileCheck %s 2 | 3 | #any_device = #tt.operand_constraint 4 | 5 | func.func public @add5(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { 6 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 7 | // CHECK: %[[C:.*]] = "ttir.fill"[[C:.*]] 8 | %0 = "ttir.constant"() <{value = dense<5.000000e+00> : tensor<32x32xf32>}> : () -> tensor<32x32xf32> 9 | // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] 10 | %1 = tensor.empty() : tensor<32x32xf32> 11 | %2 = "ttir.add"(%arg0, %0, %1) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> 12 | return %2 : tensor<32x32xf32> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Iota/simple_device_dynamic_iota_dim2.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | module attributes {} { 9 | func.func @forward(%arg0: tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> { 10 | // CHECK: ttnn.arange 11 | %0 = "stablehlo.iota"() {iota_dimension = 2: i64} : () -> tensor<1x1x32x128xbf16> 12 | %2 = "stablehlo.multiply"(%arg0, %0) : (tensor<1x1x32x128xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> 13 | return %2 : tensor<1x1x32x128xbf16> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/simple_typecast.mlir: -------------------------------------------------------------------------------- 1 | 2 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 3 | // RUN: FileCheck %s --input-file=%t.mlir 4 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 5 | #any_device = #tt.operand_constraint 6 | func.func @typecast(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { 7 | %0 = tensor.empty() : tensor<64x128xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.typecast" 9 | // CHECK-SAME: tensor<64x128xf32, 10 | // CHECK-SAME: tensor<64x128xbf16, 11 | %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> 12 | return %1 : tensor<64x128xbf16> 13 | } 14 | -------------------------------------------------------------------------------- /lib/Dialect/TTMetal/IR/TTMetalOpsTypes.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsTypes.h" 6 | 7 | #include "mlir/IR/Builders.h" 8 | #include "mlir/IR/DialectImplementation.h" 9 | #include "ttmlir/Dialect/TTMetal/IR/TTMetal.h" 10 | #include "llvm/ADT/StringExtras.h" 11 | #include "llvm/ADT/TypeSwitch.h" 12 | 13 | using namespace mlir::tt::ttmetal; 14 | 15 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsEnums.cpp.inc" 16 | 17 | #define GET_TYPEDEF_CLASSES 18 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsTypes.cpp.inc" 19 | 20 | void TTMetalDialect::registerTypes() { 21 | // NOLINTNEXTLINE 22 | addTypes< 23 | #define GET_TYPEDEF_LIST 24 | #include "ttmlir/Dialect/TTMetal/IR/TTMetalOpsTypes.cpp.inc" 25 | >(); 26 | } 27 | -------------------------------------------------------------------------------- /runtime/lib/common/debug.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "tt/runtime/detail/debug.h" 6 | 7 | #if defined(TT_RUNTIME_DEBUG) && TT_RUNTIME_DEBUG == 1 8 | 9 | namespace tt::runtime::debug { 10 | 11 | Env const &Env::get(bool loadKernelsFromDisk, bool enableAsyncTTNN) { 12 | static Env config(loadKernelsFromDisk, enableAsyncTTNN); 13 | return config; 14 | } 15 | 16 | #if defined(TT_RUNTIME_DEBUG) && TT_RUNTIME_DEBUG == 1 17 | Hooks const &Hooks::get( 18 | std::optional> 19 | operatorCallback) { 20 | static Hooks config(operatorCallback); 21 | return config; 22 | } 23 | #else 24 | Hooks get() { return Hooks(); } 25 | #endif 26 | 27 | } // namespace tt::runtime::debug 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/sign_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_sign attributes {} { 5 | func.func public @test_sign(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.sign %arg0 : tensor<13x21x3xf32> 7 | // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] 8 | // CHECK: [[VAL1:%[0-9]+]] = "ttir.sign"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] 9 | return %0 : tensor<13x21x3xf32> 10 | // CHECK: return [[VAL1]] : [[TENSOR_SIZE]] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_typecast.mlir: -------------------------------------------------------------------------------- 1 | 2 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 3 | // RUN: FileCheck %s --input-file=%t.mlir 4 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 5 | #any_device = #tt.operand_constraint 6 | func.func @typecast(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { 7 | %0 = tensor.empty() : tensor<64x128xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.typecast" 9 | // CHECK-SAME: tensor<64x128xf32, 10 | // CHECK-SAME: tensor<64x128xbf16, 11 | %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> 12 | return %1 : tensor<64x128xbf16> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTIR/clamp/clamp_tests_negative.mlir: -------------------------------------------------------------------------------- 1 | // RUN: not ttmlir-opt --split-input-file %s 2>&1 | FileCheck %s 2 | // Negative test for clamp operation 3 | 4 | // Verify that the parsing fails if input and output shapes do not match. 5 | #any_device_tile = #tt.operand_constraint 6 | module attributes {} { 7 | func.func @clamp(%arg0: tensor<64x64xbf16>) -> tensor<64x128xbf16> { 8 | %0 = tensor.empty() : tensor<64x128xbf16> 9 | // CHECK: error: 'ttir.clamp' op input and output must have same shape. 10 | %1 = "ttir.clamp"(%arg0, %0) <{max = 3.000000e+00 : f32, min = 2.000000e+00 : f32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x64xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> 11 | return %1 : tensor<64x128xbf16> 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/StableHLOToTTIR/StableHLOToTTIR.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_STABLEHLOTOTTIR_STABLEHLOTOTTIR_H 6 | #define TTMLIR_CONVERSION_STABLEHLOTOTTIR_STABLEHLOTOTTIR_H 7 | 8 | #include "mlir/Pass/Pass.h" 9 | #include "mlir/Transforms/DialectConversion.h" 10 | 11 | namespace mlir::tt { 12 | 13 | #ifdef TTMLIR_ENABLE_STABLEHLO 14 | void populateStableHLOToTTIRPatterns(MLIRContext *ctx, 15 | RewritePatternSet &patterns, 16 | TypeConverter &typeConverter); 17 | 18 | std::unique_ptr> createConvertStableHLOToTTIRPass(); 19 | #endif 20 | 21 | } // namespace mlir::tt 22 | 23 | #endif // TTMLIR_CONVERSION_STABLEHLOTOTTIR_STABLEHLOTOTTIR_H 24 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %{{[0-9]+}} = "ttnn.empty" 6 | // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] 7 | %0 = tensor.empty() : tensor<64x128xf32> 8 | // CHECK: %{{[0-9]+}} = "ttnn.floor" 9 | // CHECK-SAME: [[TENSOR]] 10 | // CHECK-SAME: [[TENSOR]] 11 | // CHECK-SAME: -> [[TENSOR]] 12 | %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 13 | return %1 : tensor<64x128xf32> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /include/ttmlir/OpModel/TTNN/TTNNOpModel.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_OPMODEL_TTNN_TTNNOPMODEL_H 6 | #define TTMLIR_OPMODEL_TTNN_TTNNOPMODEL_H 7 | 8 | #include "ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.h" 9 | 10 | #include 11 | 12 | namespace mlir::tt::op_model::ttnn { 13 | 14 | struct ReluOpInterface { 15 | static bool isLegal(const mlir::tt::ttnn::TTNNLayoutAttr &inputLayout, 16 | const mlir::tt::ttnn::TTNNLayoutAttr &outputLayout); 17 | 18 | static std::tuple 19 | getOpL1Usage(const mlir::tt::ttnn::TTNNLayoutAttr &inputLayout, 20 | const mlir::tt::ttnn::TTNNLayoutAttr &outputLayout); 21 | }; 22 | 23 | } // namespace mlir::tt::op_model::ttnn 24 | #endif // TTMLIR_OPMODEL_TTNN_TTNNOPMODEL_H 25 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_transpose.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @transpose(%arg0: tensor<64x128xbf16>) -> tensor<128x64xbf16> { 8 | %0 = tensor.empty() : tensor<128x64xbf16> 9 | // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] 10 | %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> 11 | return %1 : tensor<128x64xbf16> 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/simple_slice.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device_tile = #tt.operand_constraint 5 | module attributes {} { 6 | func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<2x16x16xbf16> { 7 | %0 = tensor.empty() : tensor<2x16x16xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] 9 | %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> 10 | return %1 : tensor<2x16x16xbf16> 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TTKernel/IR/TTKernelOps.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_DIALECT_TTKERNEL_IR_TTKERNELOPS_H 6 | #define TTMLIR_DIALECT_TTKERNEL_IR_TTKERNELOPS_H 7 | 8 | #include "mlir/Bytecode/BytecodeOpInterface.h" 9 | #include "mlir/IR/BuiltinTypes.h" 10 | #include "mlir/IR/Dialect.h" 11 | #include "mlir/IR/OpDefinition.h" 12 | #include "mlir/Interfaces/ControlFlowInterfaces.h" 13 | #include "mlir/Interfaces/DestinationStyleOpInterface.h" 14 | #include "mlir/Interfaces/InferTypeOpInterface.h" 15 | #include "mlir/Interfaces/SideEffectInterfaces.h" 16 | #include "ttmlir/Dialect/TT/IR/TTOpsTypes.h" 17 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsTypes.h" 18 | 19 | #define GET_OP_CLASSES 20 | #include "ttmlir/Dialect/TTKernel/IR/TTKernelOps.h.inc" 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/simple_subtract_to_add.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<1x128xf32>) -> tensor<64x128xf32> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 6 | %0 = tensor.empty() : tensor<64x128xf32> 7 | // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] 8 | // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] 9 | // CHECK-NOT: %[[C:.*]] = "ttnn.subtract"[[C:.*]] 10 | %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<1x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 11 | return %1 : tensor<64x128xf32> 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_slice.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device_tile = #tt.operand_constraint 5 | module attributes {} { 6 | func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<2x16x16xbf16> { 7 | %0 = tensor.empty() : tensor<2x16x16xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] 9 | %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> 10 | return %1 : tensor<2x16x16xbf16> 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]] 11 | %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.log"[[C:.*]] 11 | %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_relu.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] 11 | %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]] 11 | %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sqrt.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] 11 | %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /runtime/lib/ttnn/operations/data_movement/reshape.cpp: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #include "reshape.h" 6 | #include "tt/runtime/detail/logger.h" 7 | #include "tt/runtime/detail/ttnn.h" 8 | 9 | namespace tt::runtime::ttnn::operations::data_movement { 10 | void run(const ::tt::target::ttnn::ReshapeOp *op, ProgramContext &context) { 11 | ProgramTensorPool &tensorPool = context.getTensorPool(); 12 | const ::ttnn::Tensor &in = tensorPool.at(op->in()->global_id()); 13 | DEBUG_ASSERT(in.is_allocated()); 14 | const auto *fbShape = op->shape(); 15 | std::vector shape(fbShape->begin(), fbShape->end()); 16 | ::ttnn::Tensor out = ::ttnn::reshape(in, shape); 17 | tensorPool.insert_or_assign(op->out()->global_id(), out); 18 | } 19 | } // namespace tt::runtime::ttnn::operations::data_movement 20 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/exponential_minus_one_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_expm1 attributes {} { 5 | func.func public @test_expm1(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.exponential_minus_one %arg0 : tensor<13x21x3xf32> 7 | // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] 8 | // CHECK: [[VAL1:%[0-9]+]] = "ttir.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] 9 | return %0 : tensor<13x21x3xf32> 10 | // CHECK: return [[VAL1]] : [[TENSOR_SIZE]] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Conversion/StableHLOToTTIR/log_plus_one_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s 3 | #any_device = #tt.operand_constraint 4 | module @jit_eltwise_log_plus_one attributes {} { 5 | func.func public @test_log_plus_one(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { 6 | %0 = stablehlo.log_plus_one %arg0 : tensor<13x21x3xf32> 7 | // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] 8 | // CHECK: [[VAL1:%[0-9]+]] = "ttir.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] 9 | return %0 : tensor<13x21x3xf32> 10 | // CHECK: return [[VAL1]] : [[TENSOR_SIZE]] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @is_finite(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { 5 | // CHECK: %[[C:.*]] = "ttnn.empty" 6 | // CHECK-SAME: [[TENSOR:tensor<64x128xbf16,]] 7 | %0 = tensor.empty() : tensor<64x128xbf16> 8 | // CHECK: %[[C:.*]] = "ttnn.isfinite" 9 | // CHECK-SAME: tensor<64x128xf32, 10 | // CHECK-SAME: [[TENSOR]] 11 | // CHECK-SAME: -> [[TENSOR]] 12 | %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> 13 | return %1 : tensor<64x128xbf16> 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/absolute_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_abs attributes {} { 10 | func.func public @test_abs(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_abs 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.abs 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.abs %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/cbrt_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_rsqrt attributes {} { 10 | func.func public @test_cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_cbrt 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.cbrt 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.cbrt %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/ceil_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_ceil attributes {} { 10 | func.func public @test_ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_ceil 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.ceil 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.ceil %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/negate_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_neg attributes {} { 10 | func.func public @test_neg(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_neg 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.neg 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.negate %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/sign_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_sign attributes {} { 10 | func.func public @test_sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_sign 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.sign 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.sign %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/sine_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_sine attributes {} { 10 | func.func public @test_sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_sine 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.sin 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.sine %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/sqrt_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_sqrt attributes {} { 10 | func.func public @test_sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_sqrt 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.sqrt 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.sqrt %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]] 11 | %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_rsqrt.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] 11 | %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /runtime/tools/python/ttrt/runtime/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | try: 6 | from ._C import ( 7 | Device, 8 | Event, 9 | Tensor, 10 | DataType, 11 | DeviceRuntime, 12 | DebugEnv, 13 | DebugHooks, 14 | get_current_runtime, 15 | set_compatible_runtime, 16 | get_current_system_desc, 17 | open_device, 18 | close_device, 19 | submit, 20 | create_tensor, 21 | create_multi_device_tensor, 22 | wait, 23 | get_op_output_tensor, 24 | get_op_debug_str, 25 | WorkaroundEnv, 26 | ) 27 | except ModuleNotFoundError: 28 | raise ImportError( 29 | "Error: Project was not built with runtime enabled, rebuild with: -DTTMLIR_ENABLE_RUNTIME=ON" 30 | ) 31 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/floor_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_floor attributes {} { 10 | func.func public @test_floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_floor 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.floor 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.floor %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/rsqrt_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_rsqrt attributes {} { 10 | func.func public @test_rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_rsqrt 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.rsqrt 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.rsqrt %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/cosine_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_cosine attributes {} { 10 | func.func public @test_cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_cosine 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.cos 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.cosine %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/exponential_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_exp attributes {} { 10 | func.func public @test_exp(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 11 | // CHECK-LABEL: func.func public @test_exp 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.exp 14 | // CHECK-SAME: tensor<64x128xf32, 15 | // CHECK-SAME: tensor<64x128xf32, 16 | // CHECK-SAME: -> tensor<64x128xf32, 17 | %0 = stablehlo.exponential %arg0 : tensor<64x128xf32> 18 | return %0 : tensor<64x128xf32> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sigmoid.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @sigmoid(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] 11 | %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /include/ttmlir/Conversion/TTIRToTTIRDecomposition/TTIRToTTIRDecomposition.h: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #ifndef TTMLIR_CONVERSION_TTIRTOTTIRDECOMPOSITION_TTIRTOTTIRDECOMPOSITION_H 6 | #define TTMLIR_CONVERSION_TTIRTOTTIRDECOMPOSITION_TTIRTOTTIRDECOMPOSITION_H 7 | 8 | #include "mlir/Pass/Pass.h" 9 | #include "mlir/Transforms/DialectConversion.h" 10 | 11 | namespace mlir::tt { 12 | 13 | void populateTTIRToTTIRDecompositionPatterns(MLIRContext *ctx, 14 | RewritePatternSet &patterns, 15 | TypeConverter &typeConverter); 16 | 17 | std::unique_ptr> createTTIRToTTIRDecompositionPass(); 18 | 19 | } // namespace mlir::tt 20 | 21 | #endif // TTMLIR_CONVERSION_TTIRTOTTIRDECOMPOSITION_TTIRTOTTIRDECOMPOSITION_H 22 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/StableHLO/Unary/logical_op.mlir: -------------------------------------------------------------------------------- 1 | // REQUIRES: stablehlo 2 | // RUN: rm -rf %t.ttnn 3 | // RUN: rm -rf %t.mlir 4 | // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | \ 5 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" > %t.mlir 6 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 7 | // RUN: FileCheck --input-file=%t.mlir %s 8 | 9 | module @jit_eltwise_compare attributes {} { 10 | func.func public @logical_not(%arg0: tensor<64x128xi1>) -> tensor<64x128xi1> { 11 | // CHECK-LABEL: func.func public @logical_not 12 | // CHECK: ttnn.empty 13 | // CHECK: ttnn.logical_not 14 | // CHECK-SAME: tensor<64x128xbf16, 15 | // CHECK-SAME: tensor<64x128xbf16, 16 | // CHECK-SAME: -> tensor<64x128xbf16, 17 | %0 = stablehlo.not %arg0 : tensor<64x128xi1> 18 | return %0 : tensor<64x128xi1> 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /runtime/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Options 2 | option(TTMLIR_ENABLE_RUNTIME_TESTS "Enable runtime tests" OFF) 3 | option(TT_RUNTIME_ENABLE_TTNN "Enable TTNN Runtime" ON) 4 | option(TT_RUNTIME_ENABLE_TTMETAL "Enable TTMetal Runtime" ON) 5 | option(TT_RUNTIME_DEBUG "Enable debug tools in runtime" OFF) 6 | option(TT_RUNTIME_WORKAROUNDS "Enable toggling workarounds in runtime" OFF) 7 | 8 | if (CMAKE_BUILD_TYPE STREQUAL "Debug") 9 | set(TT_RUNTIME_DEBUG ON) 10 | set(TT_RUNTIME_WORKAROUNDS ON) 11 | endif() 12 | 13 | set(TT_RUNTIME_OPTIONS 14 | TT_RUNTIME_DEBUG 15 | TT_RUNTIME_ENABLE_PERF_TRACE 16 | TT_RUNTIME_WORKAROUNDS 17 | ) 18 | 19 | foreach(OPTION ${TT_RUNTIME_OPTIONS}) 20 | if (${OPTION}) 21 | add_definitions(-D${OPTION}) 22 | endif() 23 | endforeach() 24 | 25 | add_subdirectory(lib) 26 | add_subdirectory(tools) 27 | if (TTMLIR_ENABLE_RUNTIME_TESTS) 28 | add_subdirectory(test) 29 | endif() 30 | -------------------------------------------------------------------------------- /test/ttmlir/Dialect/TTNN/pooling/simple_maxpool2d.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s 2 | #any_device = #tt.operand_constraint 3 | module attributes {} { 4 | func.func @forward(%arg0: tensor<1x128x128x32xbf16>) -> tensor<1x64x64x32xbf16> { 5 | %0 = tensor.empty() : tensor<1x64x64x32xbf16> 6 | // CHECK: %[[C:.*]] = "ttnn.max_pool2d"[[C:.*]] 7 | %1 = "ttir.max_pool2d"(%arg0, %0) <{kernel_height=2: si32, kernel_width=2: si32, stride_height=2: si32, stride_width=2: si32, dilation_height=1: si32, dilation_width=1: si32, ceil_mode=false, padding_left=0: si32, padding_right=0: si32, padding_top=0: si32, padding_bottom=0: si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x128x128x32xbf16>, tensor<1x64x64x32xbf16>) -> tensor<1x64x64x32xbf16> 8 | return %1 : tensor<1x64x64x32xbf16> 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/ttmlir/Silicon/TTNN/perf_unit/test_perf_reciprocal.mlir: -------------------------------------------------------------------------------- 1 | // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir 2 | // RUN: FileCheck %s --input-file=%t.mlir 3 | // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn 4 | #any_device = #tt.operand_constraint 5 | #any_device_tile = #tt.operand_constraint 6 | 7 | func.func @reciprocal(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { 8 | // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] 9 | %0 = tensor.empty() : tensor<64x128xf32> 10 | // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] 11 | %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> 12 | return %1 : tensor<64x128xf32> 13 | } 14 | -------------------------------------------------------------------------------- /include/ttmlir/Dialect/TT/IR/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_mlir_dialect(TTOps tt) 2 | add_mlir_doc(TTBase TTDialect src/autogen/md/Dialect/ -gen-dialect-doc) 3 | add_mlir_doc(TTOps TTOp src/autogen/md/Dialect/ -gen-op-doc) 4 | add_mlir_doc(TTOpsTypes TTAttr src/autogen/md/Dialect/ -gen-attrdef-doc) 5 | add_mlir_doc(TTOpsTypes TTType src/autogen/md/Dialect/ -gen-typedef-doc) 6 | 7 | set(LLVM_TARGET_DEFINITIONS TTOps.td) 8 | mlir_tablegen(TTOpsAttrDefs.h.inc -gen-attrdef-decls) 9 | mlir_tablegen(TTOpsAttrDefs.cpp.inc -gen-attrdef-defs) 10 | add_public_tablegen_target(MLIRTTOpsAttributesIncGen) 11 | add_dependencies(mlir-headers MLIRTTOpsAttributesIncGen) 12 | 13 | set(LLVM_TARGET_DEFINITIONS TTOpsEnums.td) 14 | mlir_tablegen(TTOpsEnums.h.inc -gen-enum-decls) 15 | mlir_tablegen(TTOpsEnums.cpp.inc -gen-enum-defs) 16 | add_public_tablegen_target(MLIRTTOpsEnumsIncGen) 17 | add_dependencies(mlir-headers MLIRTTOpsEnumsIncGen) 18 | --------------------------------------------------------------------------------