├── Podfile
├── Podfile.lock
├── Pods
├── Manifest.lock
├── Pods.xcodeproj
│ ├── project.pbxproj
│ └── xcuserdata
│ │ └── alexeykorotkov.xcuserdatad
│ │ └── xcschemes
│ │ ├── Pods-Segmentation Live.xcscheme
│ │ ├── TensorFlowLiteGpuExperimental.xcscheme
│ │ └── xcschememanagement.plist
├── Target Support Files
│ ├── Pods-Segmentation Live
│ │ ├── Pods-Segmentation Live-Info.plist
│ │ ├── Pods-Segmentation Live-acknowledgements.markdown
│ │ ├── Pods-Segmentation Live-acknowledgements.plist
│ │ ├── Pods-Segmentation Live-dummy.m
│ │ ├── Pods-Segmentation Live-umbrella.h
│ │ ├── Pods-Segmentation Live.debug.xcconfig
│ │ ├── Pods-Segmentation Live.modulemap
│ │ └── Pods-Segmentation Live.release.xcconfig
│ └── TensorFlowLiteGpuExperimental
│ │ └── TensorFlowLiteGpuExperimental.xcconfig
└── TensorFlowLiteGpuExperimental
│ └── Frameworks
│ └── tensorflow_lite_gpu.framework
│ ├── Headers
│ ├── flatbuffers
│ │ ├── base.h
│ │ ├── code_generators.h
│ │ ├── flatbuffers.h
│ │ ├── flatc.h
│ │ ├── flexbuffers.h
│ │ ├── grpc.h
│ │ ├── hash.h
│ │ ├── idl.h
│ │ ├── minireflect.h
│ │ ├── reflection.h
│ │ ├── reflection_generated.h
│ │ ├── registry.h
│ │ ├── stl_emulation.h
│ │ └── util.h
│ └── tensorflow
│ │ └── lite
│ │ ├── allocation.h
│ │ ├── arena_planner.h
│ │ ├── builtin_op_data.h
│ │ ├── builtin_ops.h
│ │ ├── c
│ │ ├── builtin_op_data.h
│ │ └── c_api_internal.h
│ │ ├── context.h
│ │ ├── context_util.h
│ │ ├── core
│ │ ├── api
│ │ │ ├── error_reporter.h
│ │ │ ├── flatbuffer_conversions.h
│ │ │ └── op_resolver.h
│ │ └── subgraph.h
│ │ ├── delegates
│ │ ├── flex
│ │ │ ├── buffer_map.h
│ │ │ ├── delegate.h
│ │ │ ├── delegate_data.h
│ │ │ ├── kernel.h
│ │ │ ├── test_util.h
│ │ │ └── util.h
│ │ ├── gpu
│ │ │ └── metal_delegate.h
│ │ └── nnapi
│ │ │ └── nnapi_delegate.h
│ │ ├── error_reporter.h
│ │ ├── experimental
│ │ ├── c
│ │ │ ├── c_api.h
│ │ │ ├── c_api_experimental.h
│ │ │ └── c_api_internal.h
│ │ ├── kernels
│ │ │ ├── ctc_beam_entry.h
│ │ │ ├── ctc_beam_scorer.h
│ │ │ ├── ctc_beam_search.h
│ │ │ ├── ctc_decoder.h
│ │ │ ├── ctc_loss_util.h
│ │ │ └── top_n.h
│ │ ├── micro
│ │ │ ├── compatibility.h
│ │ │ ├── debug_log.h
│ │ │ ├── debug_log_numbers.h
│ │ │ ├── examples
│ │ │ │ └── micro_speech
│ │ │ │ │ ├── CMSIS
│ │ │ │ │ ├── hanning.h
│ │ │ │ │ └── sin_1k.h
│ │ │ │ │ ├── audio_provider.h
│ │ │ │ │ ├── feature_provider.h
│ │ │ │ │ ├── model_settings.h
│ │ │ │ │ ├── no_30ms_sample_data.h
│ │ │ │ │ ├── no_features_data.h
│ │ │ │ │ ├── no_power_spectrum_data.h
│ │ │ │ │ ├── preprocessor.h
│ │ │ │ │ ├── recognize_commands.h
│ │ │ │ │ ├── tiny_conv_model_data.h
│ │ │ │ │ ├── yes_30ms_sample_data.h
│ │ │ │ │ ├── yes_features_data.h
│ │ │ │ │ └── yes_power_spectrum_data.h
│ │ │ ├── kernels
│ │ │ │ └── all_ops_resolver.h
│ │ │ ├── micro_error_reporter.h
│ │ │ ├── micro_interpreter.h
│ │ │ ├── micro_mutable_op_resolver.h
│ │ │ ├── simple_tensor_allocator.h
│ │ │ └── testing
│ │ │ │ ├── micro_test.h
│ │ │ │ └── test_utils.h
│ │ ├── microfrontend
│ │ │ └── lib
│ │ │ │ ├── bits.h
│ │ │ │ ├── fft.h
│ │ │ │ ├── fft_io.h
│ │ │ │ ├── fft_util.h
│ │ │ │ ├── filterbank.h
│ │ │ │ ├── filterbank_io.h
│ │ │ │ ├── filterbank_util.h
│ │ │ │ ├── frontend.h
│ │ │ │ ├── frontend_io.h
│ │ │ │ ├── frontend_util.h
│ │ │ │ ├── log_lut.h
│ │ │ │ ├── log_scale.h
│ │ │ │ ├── log_scale_io.h
│ │ │ │ ├── log_scale_util.h
│ │ │ │ ├── noise_reduction.h
│ │ │ │ ├── noise_reduction_io.h
│ │ │ │ ├── noise_reduction_util.h
│ │ │ │ ├── pcan_gain_control.h
│ │ │ │ ├── pcan_gain_control_util.h
│ │ │ │ ├── window.h
│ │ │ │ ├── window_io.h
│ │ │ │ └── window_util.h
│ │ └── writer
│ │ │ ├── enum_mapping.h
│ │ │ └── writer_lib.h
│ │ ├── graph_info.h
│ │ ├── interpreter.h
│ │ ├── kernels
│ │ ├── activation_functor.h
│ │ ├── eigen_support.h
│ │ ├── gemm_support.h
│ │ ├── internal
│ │ │ ├── common.h
│ │ │ ├── compatibility.h
│ │ │ ├── kernel_utils.h
│ │ │ ├── legacy_types.h
│ │ │ ├── mfcc.h
│ │ │ ├── mfcc_dct.h
│ │ │ ├── mfcc_mel_filterbank.h
│ │ │ ├── optimized
│ │ │ │ ├── cpu_check.h
│ │ │ │ ├── depthwiseconv_float.h
│ │ │ │ ├── depthwiseconv_uint8.h
│ │ │ │ ├── depthwiseconv_uint8_3x3_filter.h
│ │ │ │ ├── eigen_spatial_convolutions.h
│ │ │ │ ├── eigen_tensor_reduced_instantiations_google.h
│ │ │ │ ├── eigen_tensor_reduced_instantiations_oss.h
│ │ │ │ ├── legacy_optimized_ops.h
│ │ │ │ ├── multithreaded_conv.h
│ │ │ │ ├── neon_tensor_utils.h
│ │ │ │ ├── optimized_ops.h
│ │ │ │ └── tensor_utils_impl.h
│ │ │ ├── quantization_util.h
│ │ │ ├── reference
│ │ │ │ ├── depthwiseconv_float.h
│ │ │ │ ├── depthwiseconv_uint8.h
│ │ │ │ ├── fully_connected.h
│ │ │ │ ├── integer_ops
│ │ │ │ │ ├── dequantize.h
│ │ │ │ │ ├── pooling.h
│ │ │ │ │ └── softmax.h
│ │ │ │ ├── legacy_reference_ops.h
│ │ │ │ ├── portable_tensor_utils.h
│ │ │ │ ├── reference_ops.h
│ │ │ │ └── softmax.h
│ │ │ ├── round.h
│ │ │ ├── spectrogram.h
│ │ │ ├── strided_slice_logic.h
│ │ │ ├── tensor.h
│ │ │ ├── tensor_ctypes.h
│ │ │ ├── tensor_utils.h
│ │ │ ├── test_util.h
│ │ │ └── types.h
│ │ ├── kernel_util.h
│ │ ├── lstm_eval.h
│ │ ├── op_macros.h
│ │ ├── padding.h
│ │ ├── register.h
│ │ ├── register_ref.h
│ │ └── test_util.h
│ │ ├── memory_planner.h
│ │ ├── model.h
│ │ ├── models
│ │ └── smartreply
│ │ │ └── predictor.h
│ │ ├── mutable_op_resolver.h
│ │ ├── nnapi_delegate.h
│ │ ├── op_resolver.h
│ │ ├── optional_debug_tools.h
│ │ ├── profiling
│ │ ├── profile_buffer.h
│ │ ├── profile_summarizer.h
│ │ ├── profiler.h
│ │ └── time.h
│ │ ├── python
│ │ └── interpreter_wrapper
│ │ │ └── interpreter_wrapper.h
│ │ ├── schema
│ │ ├── builtin_ops_header
│ │ │ └── generator.h
│ │ └── schema_generated.h
│ │ ├── simple_memory_arena.h
│ │ ├── stderr_reporter.h
│ │ ├── string.h
│ │ ├── string_util.h
│ │ ├── testing
│ │ ├── generate_testspec.h
│ │ ├── init_tensorflow.h
│ │ ├── join.h
│ │ ├── message.h
│ │ ├── parse_testdata.h
│ │ ├── split.h
│ │ ├── test_runner.h
│ │ ├── tf_driver.h
│ │ ├── tflite_diff_flags.h
│ │ ├── tflite_diff_util.h
│ │ ├── tflite_driver.h
│ │ ├── tokenize.h
│ │ └── util.h
│ │ ├── util.h
│ │ └── version.h
│ ├── LICENSE
│ ├── libmetal_delegate.a
│ └── tensorflow_lite_gpu
├── README.md
├── Segmentation Live.xcodeproj
├── project.pbxproj
├── project.xcworkspace
│ ├── contents.xcworkspacedata
│ ├── xcshareddata
│ │ └── IDEWorkspaceChecks.plist
│ └── xcuserdata
│ │ └── alexeykorotkov.xcuserdatad
│ │ └── UserInterfaceState.xcuserstate
└── xcuserdata
│ └── alexeykorotkov.xcuserdatad
│ └── xcschemes
│ └── xcschememanagement.plist
├── Segmentation Live.xcworkspace
├── contents.xcworkspacedata
├── xcshareddata
│ └── IDEWorkspaceChecks.plist
└── xcuserdata
│ └── alexeykorotkov.xcuserdatad
│ └── UserInterfaceState.xcuserstate
├── Segmentation Live
├── AppDelegate.swift
├── Assets.xcassets
│ ├── AppIcon.appiconset
│ │ └── Contents.json
│ ├── Contents.json
│ └── flip-camera.imageset
│ │ ├── Contents.json
│ │ └── flip-camera@3x.png
├── DeeplabModel.h
├── DeeplabModel.mm
├── Resources
│ ├── Base.lproj
│ │ ├── LaunchScreen.storyboard
│ │ └── Main.storyboard
│ ├── Info.plist
│ ├── Segmentation Live-Bridging-Header.h
│ └── deeplabv3_257_mv_gpu.tflite
└── ViewController.swift
└── chair_flower_segmentation.gif
/Podfile:
--------------------------------------------------------------------------------
1 | platform :ios, '9.0'
2 |
3 | target 'Segmentation Live' do
4 | # Comment the next line if you're not using Swift and don't want to use dynamic frameworks
5 | use_frameworks!
6 |
7 | # Pods for DeepLabApp
8 | pod 'TensorFlowLiteGpuExperimental', '0.0.1'
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/Podfile.lock:
--------------------------------------------------------------------------------
1 | PODS:
2 | - TensorFlowLiteGpuExperimental (0.0.1)
3 |
4 | DEPENDENCIES:
5 | - TensorFlowLiteGpuExperimental (= 0.0.1)
6 |
7 | SPEC REPOS:
8 | https://github.com/cocoapods/specs.git:
9 | - TensorFlowLiteGpuExperimental
10 |
11 | SPEC CHECKSUMS:
12 | TensorFlowLiteGpuExperimental: 16d9be415bce7b2bba9e0be17e2009fd57428d0f
13 |
14 | PODFILE CHECKSUM: 5c471389261d6d70d5bc218c1486c32a3d56fabb
15 |
16 | COCOAPODS: 1.6.1
17 |
--------------------------------------------------------------------------------
/Pods/Manifest.lock:
--------------------------------------------------------------------------------
1 | PODS:
2 | - TensorFlowLiteGpuExperimental (0.0.1)
3 |
4 | DEPENDENCIES:
5 | - TensorFlowLiteGpuExperimental (= 0.0.1)
6 |
7 | SPEC REPOS:
8 | https://github.com/cocoapods/specs.git:
9 | - TensorFlowLiteGpuExperimental
10 |
11 | SPEC CHECKSUMS:
12 | TensorFlowLiteGpuExperimental: 16d9be415bce7b2bba9e0be17e2009fd57428d0f
13 |
14 | PODFILE CHECKSUM: 5c471389261d6d70d5bc218c1486c32a3d56fabb
15 |
16 | COCOAPODS: 1.6.1
17 |
--------------------------------------------------------------------------------
/Pods/Pods.xcodeproj/xcuserdata/alexeykorotkov.xcuserdatad/xcschemes/Pods-Segmentation Live.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
66 |
67 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/Pods/Pods.xcodeproj/xcuserdata/alexeykorotkov.xcuserdatad/xcschemes/TensorFlowLiteGpuExperimental.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
43 |
44 |
45 |
46 |
52 |
53 |
55 |
56 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/Pods/Pods.xcodeproj/xcuserdata/alexeykorotkov.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | Pods-Segmentation Live.xcscheme
8 |
9 | isShown
10 |
11 | orderHint
12 | 0
13 |
14 | TensorFlowLiteGpuExperimental.xcscheme
15 |
16 | isShown
17 |
18 | orderHint
19 | 1
20 |
21 |
22 | SuppressBuildableAutocreation
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live-Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | en
7 | CFBundleExecutable
8 | ${EXECUTABLE_NAME}
9 | CFBundleIdentifier
10 | ${PRODUCT_BUNDLE_IDENTIFIER}
11 | CFBundleInfoDictionaryVersion
12 | 6.0
13 | CFBundleName
14 | ${PRODUCT_NAME}
15 | CFBundlePackageType
16 | FMWK
17 | CFBundleShortVersionString
18 | 1.0.0
19 | CFBundleSignature
20 | ????
21 | CFBundleVersion
22 | ${CURRENT_PROJECT_VERSION}
23 | NSPrincipalClass
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live-acknowledgements.markdown:
--------------------------------------------------------------------------------
1 | # Acknowledgements
2 | This application makes use of the following third party libraries:
3 |
4 | ## TensorFlowLiteGpuExperimental
5 |
6 | Apache 2
7 | Generated by CocoaPods - https://cocoapods.org
8 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live-acknowledgements.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | PreferenceSpecifiers
6 |
7 |
8 | FooterText
9 | This application makes use of the following third party libraries:
10 | Title
11 | Acknowledgements
12 | Type
13 | PSGroupSpecifier
14 |
15 |
16 | FooterText
17 | Apache 2
18 | License
19 | Copyright
20 | Title
21 | TensorFlowLiteGpuExperimental
22 | Type
23 | PSGroupSpecifier
24 |
25 |
26 | FooterText
27 | Generated by CocoaPods - https://cocoapods.org
28 | Title
29 |
30 | Type
31 | PSGroupSpecifier
32 |
33 |
34 | StringsTable
35 | Acknowledgements
36 | Title
37 | Acknowledgements
38 |
39 |
40 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live-dummy.m:
--------------------------------------------------------------------------------
1 | #import
2 | @interface PodsDummy_Pods_Segmentation_Live : NSObject
3 | @end
4 | @implementation PodsDummy_Pods_Segmentation_Live
5 | @end
6 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live-umbrella.h:
--------------------------------------------------------------------------------
1 | #ifdef __OBJC__
2 | #import
3 | #else
4 | #ifndef FOUNDATION_EXPORT
5 | #if defined(__cplusplus)
6 | #define FOUNDATION_EXPORT extern "C"
7 | #else
8 | #define FOUNDATION_EXPORT extern
9 | #endif
10 | #endif
11 | #endif
12 |
13 |
14 | FOUNDATION_EXPORT double Pods_Segmentation_LiveVersionNumber;
15 | FOUNDATION_EXPORT const unsigned char Pods_Segmentation_LiveVersionString[];
16 |
17 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live.debug.xcconfig:
--------------------------------------------------------------------------------
1 | FRAMEWORK_SEARCH_PATHS = $(inherited) "${PODS_ROOT}/TensorFlowLiteGpuExperimental/Frameworks"
2 | GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
3 | HEADER_SEARCH_PATHS = '${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers'
4 | LD_RUNPATH_SEARCH_PATHS = $(inherited) '@executable_path/Frameworks' '@loader_path/Frameworks'
5 | OTHER_LDFLAGS = $(inherited) -L ${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework -ObjC -l"c++" -l"metal_delegate" -framework "Accelerate" -framework "tensorflow_lite_gpu" -force_load ${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/tensorflow_lite_gpu
6 | PODS_BUILD_DIR = ${BUILD_DIR}
7 | PODS_CONFIGURATION_BUILD_DIR = ${PODS_BUILD_DIR}/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)
8 | PODS_PODFILE_DIR_PATH = ${SRCROOT}/.
9 | PODS_ROOT = ${SRCROOT}/Pods
10 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live.modulemap:
--------------------------------------------------------------------------------
1 | framework module Pods_Segmentation_Live {
2 | umbrella header "Pods-Segmentation Live-umbrella.h"
3 |
4 | export *
5 | module * { export * }
6 | }
7 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/Pods-Segmentation Live/Pods-Segmentation Live.release.xcconfig:
--------------------------------------------------------------------------------
1 | FRAMEWORK_SEARCH_PATHS = $(inherited) "${PODS_ROOT}/TensorFlowLiteGpuExperimental/Frameworks"
2 | GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
3 | HEADER_SEARCH_PATHS = '${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers'
4 | LD_RUNPATH_SEARCH_PATHS = $(inherited) '@executable_path/Frameworks' '@loader_path/Frameworks'
5 | OTHER_LDFLAGS = $(inherited) -L ${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework -ObjC -l"c++" -l"metal_delegate" -framework "Accelerate" -framework "tensorflow_lite_gpu" -force_load ${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/tensorflow_lite_gpu
6 | PODS_BUILD_DIR = ${BUILD_DIR}
7 | PODS_CONFIGURATION_BUILD_DIR = ${PODS_BUILD_DIR}/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)
8 | PODS_PODFILE_DIR_PATH = ${SRCROOT}/.
9 | PODS_ROOT = ${SRCROOT}/Pods
10 |
--------------------------------------------------------------------------------
/Pods/Target Support Files/TensorFlowLiteGpuExperimental/TensorFlowLiteGpuExperimental.xcconfig:
--------------------------------------------------------------------------------
1 | CONFIGURATION_BUILD_DIR = ${PODS_CONFIGURATION_BUILD_DIR}/TensorFlowLiteGpuExperimental
2 | FRAMEWORK_SEARCH_PATHS = $(inherited) "${PODS_ROOT}/TensorFlowLiteGpuExperimental/Frameworks"
3 | GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
4 | HEADER_SEARCH_PATHS = '${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers'
5 | OTHER_LDFLAGS = $(inherited) -L ${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework -l"c++" -l"metal_delegate" -framework "Accelerate" -force_load ${SRCROOT}/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/tensorflow_lite_gpu
6 | PODS_BUILD_DIR = ${BUILD_DIR}
7 | PODS_CONFIGURATION_BUILD_DIR = ${PODS_BUILD_DIR}/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)
8 | PODS_ROOT = ${SRCROOT}
9 | PODS_TARGET_SRCROOT = ${PODS_ROOT}/TensorFlowLiteGpuExperimental
10 | PRODUCT_BUNDLE_IDENTIFIER = org.cocoapods.${PRODUCT_NAME:rfc1034identifier}
11 | SKIP_INSTALL = YES
12 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/flatbuffers/flatc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2017 Google Inc. All rights reserved.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | #include
18 | #include
19 | #include
20 | #include "flatbuffers/flatbuffers.h"
21 | #include "flatbuffers/idl.h"
22 | #include "flatbuffers/util.h"
23 |
24 | #ifndef FLATC_H_
25 | # define FLATC_H_
26 |
27 | namespace flatbuffers {
28 |
29 | class FlatCompiler {
30 | public:
31 | // Output generator for the various programming languages and formats we
32 | // support.
33 | struct Generator {
34 | typedef bool (*GenerateFn)(const flatbuffers::Parser &parser,
35 | const std::string &path,
36 | const std::string &file_name);
37 | typedef std::string (*MakeRuleFn)(const flatbuffers::Parser &parser,
38 | const std::string &path,
39 | const std::string &file_name);
40 |
41 | GenerateFn generate;
42 | const char *generator_opt_short;
43 | const char *generator_opt_long;
44 | const char *lang_name;
45 | bool schema_only;
46 | GenerateFn generateGRPC;
47 | flatbuffers::IDLOptions::Language lang;
48 | const char *generator_help;
49 | MakeRuleFn make_rule;
50 | };
51 |
52 | typedef void (*WarnFn)(const FlatCompiler *flatc, const std::string &warn,
53 | bool show_exe_name);
54 |
55 | typedef void (*ErrorFn)(const FlatCompiler *flatc, const std::string &err,
56 | bool usage, bool show_exe_name);
57 |
58 | // Parameters required to initialize the FlatCompiler.
59 | struct InitParams {
60 | InitParams()
61 | : generators(nullptr),
62 | num_generators(0),
63 | warn_fn(nullptr),
64 | error_fn(nullptr) {}
65 |
66 | const Generator *generators;
67 | size_t num_generators;
68 | WarnFn warn_fn;
69 | ErrorFn error_fn;
70 | };
71 |
72 | explicit FlatCompiler(const InitParams ¶ms) : params_(params) {}
73 |
74 | int Compile(int argc, const char **argv);
75 |
76 | std::string GetUsageString(const char *program_name) const;
77 |
78 | private:
79 | void ParseFile(flatbuffers::Parser &parser, const std::string &filename,
80 | const std::string &contents,
81 | std::vector &include_directories) const;
82 |
83 | void Warn(const std::string &warn, bool show_exe_name = true) const;
84 |
85 | void Error(const std::string &err, bool usage = true,
86 | bool show_exe_name = true) const;
87 |
88 | InitParams params_;
89 | };
90 |
91 | } // namespace flatbuffers
92 |
93 | #endif // FLATC_H_
94 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/builtin_op_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Compatibility shim for new location of interface definitions.
16 |
17 | #ifndef TENSORFLOW_LITE_BUILTIN_OP_DATA_H_
18 | #define TENSORFLOW_LITE_BUILTIN_OP_DATA_H_
19 |
20 | #include "tensorflow/lite/c/builtin_op_data.h"
21 |
22 | #endif // TENSORFLOW_LITE_BUILTIN_OP_DATA_H_
23 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/context.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Compatibility shim for moved header location.
16 | #ifndef TENSORFLOW_LITE_CONTEXT_H_
17 | #define TENSORFLOW_LITE_CONTEXT_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 |
21 | #endif // TENSORFLOW_LITE_CONTEXT_H_
22 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/context_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // This provides a few C++ helpers that are useful for manipulating C structures
16 | // in C++.
17 | #ifndef TENSORFLOW_LITE_CONTEXT_UTIL_H_
18 | #define TENSORFLOW_LITE_CONTEXT_UTIL_H_
19 |
20 | #include "tensorflow/lite/c/c_api_internal.h"
21 |
22 | namespace tflite {
23 |
24 | // Provide a range iterable wrapper for TfLiteIntArray* (C lists that TfLite
25 | // C api uses. Can't use the google array_view, since we can't depend on even
26 | // absl for embedded device reasons.
27 | class TfLiteIntArrayView {
28 | public:
29 | // Construct a view of a TfLiteIntArray*. Note, `int_array` should be non-null
30 | // and this view does not take ownership of it.
31 | explicit TfLiteIntArrayView(const TfLiteIntArray* int_array)
32 | : int_array_(int_array) {}
33 |
34 | TfLiteIntArrayView(const TfLiteIntArrayView&) = default;
35 | TfLiteIntArrayView& operator=(const TfLiteIntArrayView& rhs) = default;
36 |
37 | typedef const int* const_iterator;
38 | const_iterator begin() const { return int_array_->data; }
39 | const_iterator end() const { return &int_array_->data[int_array_->size]; }
40 | size_t size() const { return end() - begin(); }
41 |
42 | private:
43 | const TfLiteIntArray* int_array_;
44 | };
45 |
46 | } // namespace tflite
47 |
48 | #endif // TENSORFLOW_LITE_CONTEXT_UTIL_H_
49 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/core/api/error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
16 | #define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | // A functor that reports error to supporting system. Invoked similar to
23 | // printf.
24 | //
25 | // Usage:
26 | // ErrorReporter foo;
27 | // foo.Report("test %d", 5);
28 | // or
29 | // va_list args;
30 | // foo.Report("test %d", args); // where args is va_list
31 | //
32 | // Subclass ErrorReporter to provide another reporting destination.
33 | // For example, if you have a GUI program, you might redirect to a buffer
34 | // that drives a GUI error log box.
35 | class ErrorReporter {
36 | public:
37 | virtual ~ErrorReporter() {}
38 | virtual int Report(const char* format, va_list args) = 0;
39 | int Report(const char* format, ...);
40 | int ReportError(void*, const char* format, ...);
41 | };
42 |
43 | } // namespace tflite
44 |
45 | #endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
46 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/core/api/flatbuffer_conversions.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
16 | #define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
17 |
18 | // These functions transform codes and data structures that are defined in the
19 | // flatbuffer serialization format into in-memory values that are used by the
20 | // runtime API and interpreter.
21 |
22 | #include "tensorflow/lite/c/c_api_internal.h"
23 | #include "tensorflow/lite/core/api/error_reporter.h"
24 | #include "tensorflow/lite/core/api/op_resolver.h"
25 | #include "tensorflow/lite/schema/schema_generated.h"
26 |
27 | namespace tflite {
28 |
29 | // Interface class for builtin data allocations.
30 | class BuiltinDataAllocator {
31 | public:
32 | virtual void* Allocate(size_t size) = 0;
33 | virtual void Deallocate(void* data) = 0;
34 |
35 | // Allocate a structure, but make sure it is a POD structure that doesn't
36 | // require constructors to run. The reason we do this, is that Interpreter's C
37 | // extension part will take ownership so destructors will not be run during
38 | // deallocation.
39 | template
40 | T* AllocatePOD() {
41 | static_assert(std::is_pod::value, "Builtin data structure must be POD.");
42 | return static_cast(this->Allocate(sizeof(T)));
43 | }
44 |
45 | virtual ~BuiltinDataAllocator() {}
46 | };
47 |
48 | // Parse the appropriate data out of the op.
49 | //
50 | // This handles builtin data explicitly as there are flatbuffer schemas.
51 | // If it returns kTfLiteOk, it passes the data out with `builtin_data`. The
52 | // calling function has to pass in an allocator object, and this allocator
53 | // will be called to reserve space for the output data. If the calling
54 | // function's allocator reserves memory on the heap, then it's the calling
55 | // function's responsibility to free it.
56 | // If it returns kTfLiteError, `builtin_data` will be `nullptr`.
57 | TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
58 | ErrorReporter* error_reporter,
59 | BuiltinDataAllocator* allocator, void** builtin_data);
60 |
61 | // Converts the tensor data type used in the flat buffer to the representation
62 | // used by the runtime.
63 | TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
64 | ErrorReporter* error_reporter);
65 |
66 | } // namespace tflite
67 |
68 | #endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
69 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/core/api/op_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
16 | #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 | #include "tensorflow/lite/core/api/error_reporter.h"
20 | #include "tensorflow/lite/schema/schema_generated.h"
21 |
22 | namespace tflite {
23 |
24 | // Abstract interface that returns TfLiteRegistrations given op codes or custom
25 | // op names. This is the mechanism that ops being referenced in the flatbuffer
26 | // model are mapped to executable function pointers (TfLiteRegistrations).
27 | class OpResolver {
28 | public:
29 | // Finds the op registration for a builtin operator by enum code.
30 | virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
31 | int version) const = 0;
32 | // Finds the op registration of a custom operator by op name.
33 | virtual const TfLiteRegistration* FindOp(const char* op,
34 | int version) const = 0;
35 | virtual ~OpResolver() {}
36 | };
37 |
38 | // Handles the logic for converting between an OperatorCode structure extracted
39 | // from a flatbuffer and information about a registered operator implementation.
40 | TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
41 | const OpResolver& op_resolver,
42 | ErrorReporter* error_reporter,
43 | const TfLiteRegistration** registration);
44 |
45 | } // namespace tflite
46 |
47 | #endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
48 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/delegates/flex/delegate.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_H_
16 | #define TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 | #include "tensorflow/lite/delegates/flex/delegate_data.h"
20 |
21 | namespace tflite {
22 |
23 | // WARNING: This is an experimental interface that is subject to change.
24 | // Delegate that can be used to extract parts of a graph that are designed to be
25 | // executed by TensorFlow's runtime via Eager.
26 | //
27 | // The interpreter must be constructed after the FlexDelegate and destructed
28 | // before the FlexDelegate. This delegate may be used with multiple
29 | // interpreters, but it is *not* thread-safe.
30 | //
31 | // Usage:
32 | // auto delegate = FlexDelegate::Create();
33 | // ... build interpreter ...
34 | //
35 | // if (delegate) {
36 | // interpreter->ModifyGraphWithDelegate(
37 | // delegate.get(), /*allow_dynamic_tensors=*/true);
38 | // }
39 | // ... run inference ...
40 | // ... destroy interpreter ...
41 | // ... destroy delegate ...
42 | class FlexDelegate : public TfLiteDelegate {
43 | public:
44 | // Creates a delegate that supports TF ops.
45 | //
46 | // If the underyling TF Flex context creation fails, returns null.
47 | static std::unique_ptr Create();
48 |
49 | ~FlexDelegate();
50 |
51 | private:
52 | FlexDelegate();
53 |
54 | flex::DelegateData delegate_data_;
55 | };
56 |
57 | } // namespace tflite
58 |
59 | #endif // TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_H_
60 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/delegates/flex/delegate_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_DATA_H_
16 | #define TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_DATA_H_
17 |
18 | #include "tensorflow/core/common_runtime/eager/context.h"
19 | #include "tensorflow/core/public/session_options.h"
20 | #include "tensorflow/lite/delegates/flex/buffer_map.h"
21 |
22 | namespace tflite {
23 | namespace flex {
24 |
25 | // Data kept by the Flex delegate for the lifetime of an Interpreter.
26 | //
27 | // Note: This class is *not* thread-safe; any dependent delegates should not be
28 | // used concurrently.
29 | class DelegateData {
30 | public:
31 | DelegateData();
32 | ~DelegateData();
33 |
34 | // Prepare the necessary EagerContext and data for execution.
35 | // This must be called at least once before execution. After preparation
36 | // succeeds, redundant calls will be ignored (even if the session_options
37 | // differ).
38 | tensorflow::Status Prepare(const tensorflow::SessionOptions& session_options);
39 |
40 | // The EagerContext that is required for execution of Flex Ops.
41 | // Note: The context is lazily created after the first call to |Prepare()|.
42 | tensorflow::EagerContext* GetEagerContext() { return eager_context_.get(); }
43 |
44 | // Map from TF Lite tensor index to TensorFlow tensor for a given context.
45 | BufferMap* GetBufferMap(const TfLiteContext* context) {
46 | return &buffer_map_[context];
47 | }
48 |
49 | private:
50 | // Will be null until Prepare() is called and completes successfully.
51 | std::unique_ptr eager_context_;
52 | // TODO(b/112439500): Clean up stale BufferMap instances after adding the
53 | // necessary cleanup hook from a TfLiteContext to a TfLiteDelegate.
54 | std::unordered_map buffer_map_;
55 | };
56 |
57 | } // namespace flex
58 | } // namespace tflite
59 |
60 | #endif // TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_DATA_H_
61 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/delegates/flex/kernel.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_KERNEL_H_
16 | #define TENSORFLOW_LITE_DELEGATES_FLEX_KERNEL_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 |
20 | namespace tflite {
21 | namespace flex {
22 |
23 | // Return the registration object used to initialize and execute ops that will
24 | // be delegated to TensorFlow's Eager runtime. This TF Lite op is created by
25 | // the flex delegate to handle execution of a supported subgraph. The usual
26 | // flow is that the delegate informs the interpreter of supported nodes in a
27 | // graph, and each supported subgraph is replaced with one instance of this
28 | // kernel.
29 | TfLiteRegistration GetKernel();
30 |
31 | } // namespace flex
32 | } // namespace tflite
33 |
34 | #endif // TENSORFLOW_LITE_DELEGATES_FLEX_KERNEL_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/delegates/flex/util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_UTIL_H_
16 | #define TENSORFLOW_LITE_DELEGATES_FLEX_UTIL_H_
17 |
18 | #include "tensorflow/c/c_api_internal.h"
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/core/framework/tensor.h"
21 | #include "tensorflow/core/lib/core/status.h"
22 |
23 | namespace tflite {
24 | namespace flex {
25 |
26 | // Converts a tensorflow:Status into a TfLiteStatus. If the original status
27 | // represented an error, reports it using the given 'context'.
28 | TfLiteStatus ConvertStatus(TfLiteContext* context,
29 | const tensorflow::Status& status);
30 |
31 | // Copies the given shape and type of the TensorFlow 'src' tensor into a TF Lite
32 | // 'tensor'. Logs an error and returns kTfLiteError if the shape or type can't
33 | // be converted.
34 | TfLiteStatus CopyShapeAndType(TfLiteContext* context,
35 | const tensorflow::Tensor& src,
36 | TfLiteTensor* tensor);
37 |
38 | // Returns the TF C API Data type that corresponds to the given TfLiteType.
39 | TF_DataType GetTensorFlowDataType(TfLiteType type);
40 |
41 | // Returns the TfLiteType that corresponds to the given TF C API Data type.
42 | TfLiteType GetTensorFlowLiteType(TF_DataType);
43 |
44 | } // namespace flex
45 | } // namespace tflite
46 |
47 | #endif // TENSORFLOW_LITE_DELEGATES_FLEX_UTIL_H_
48 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/delegates/gpu/metal_delegate.h:
--------------------------------------------------------------------------------
1 | #ifndef RESEARCH_AIMATTER_TFLITE_METAL_DELEGATE_H_
2 | #define RESEARCH_AIMATTER_TFLITE_METAL_DELEGATE_H_
3 |
4 | #import
5 |
6 | #include "tensorflow/lite/context.h"
7 |
8 | // Creates a new delegate instance that need to be destroyed with
9 | // DeleteFlowDelegate when delegate is no longer used by tflite.
10 | struct GpuDelegateOptions {
11 | // Allows to quantify tensors, downcast values, process in float16 etc.
12 | bool allow_precision_loss;
13 |
14 | enum class WaitType {
15 | // waitUntilCompleted
16 | kPassive,
17 | // Minimize latency. It uses active spinning instead of mutex and consumes
18 | // additional CPU resources.
19 | kActive,
20 | // Useful when the output is used with GPU pipeline then or if external
21 | // command encoder is set
22 | kDoNotWait,
23 | };
24 | WaitType wait_type;
25 | };
26 |
27 | TfLiteDelegate* NewGpuDelegate(const GpuDelegateOptions* options);
28 |
29 | // Binds user-created MTLBuffer with the float32 data format
30 | bool BindMetalBufferToTensor(TfLiteDelegate* delegate, int tensor_index,
31 | id metal_buffer);
32 |
33 | // Destroys a delegate created with NewGpuDelegate call.
34 | void DeleteGpuDelegate(TfLiteDelegate* delegate);
35 |
36 | #endif // RESEARCH_AIMATTER_TFLITE_METAL_DELEGATE_H_
37 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/delegates/nnapi/nnapi_delegate.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_H_
16 | #define TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 |
20 | namespace tflite {
21 |
22 | // Return a delegate that can be used to use the NN API.
23 | // e.g.
24 | // NnApiDelegate* delegate = NnApiDelegate();
25 | // interpreter->ModifyGraphWithDelegate(&delegate);
26 | // NnApiDelegate() returns a singleton, so you should not free this
27 | // pointer or worry about its lifetime.
28 | TfLiteDelegate* NnApiDelegate();
29 | } // namespace tflite
30 |
31 | #endif // TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_H_
32 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Compatibility shim for moved header location.
16 | #ifndef TENSORFLOW_LITE_ERROR_REPORTER_H_
17 | #define TENSORFLOW_LITE_ERROR_REPORTER_H_
18 |
19 | #include "tensorflow/lite/core/api/error_reporter.h"
20 | #include "tensorflow/lite/stderr_reporter.h"
21 |
22 | #endif // TENSORFLOW_LITE_ERROR_REPORTER_H_
23 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/c/c_api_experimental.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_EXPERIMENTAL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_EXPERIMENTAL_H_
17 |
18 | #include "tensorflow/lite/builtin_ops.h"
19 | #include "tensorflow/lite/experimental/c/c_api.h"
20 |
21 | #ifdef __cplusplus
22 | extern "C" {
23 | #endif // __cplusplus
24 |
25 | typedef TfLiteBuiltinOperator TFL_BuiltinOperator;
26 |
27 | // Resets all variable tensors to zero.
28 | TFL_CAPI_EXPORT extern TFL_Status TFL_InterpreterResetVariableTensors(
29 | TFL_Interpreter* interpreter);
30 |
31 | // Adds an op registration for a builtin operator.
32 | //
33 | // NOTE: The interpreter will make a copy of `registration` internally, so the
34 | // caller should ensure that its contents (function pointers, etc...) remain
35 | // valid for the duration of the interpreter's lifetime. A common practice is
36 | // making the provided TFL_Registration instance static.
37 | void TFL_InterpreterOptionsAddBuiltinOp(TFL_InterpreterOptions* options,
38 | TFL_BuiltinOperator op,
39 | const TFL_Registration* registration,
40 | int min_version, int max_version);
41 |
42 | // Adds an op registration for a custom operator.
43 | //
44 | // NOTE: The interpreter will make a copy of `registration` internally, so the
45 | // caller should ensure that its contents (function pointers, etc...) remain
46 | // valid for the duration of the interpreter's lifetime. A common practice is
47 | // making the provided TFL_Registration instance static.
48 | void TFL_InterpreterOptionsAddCustomOp(TFL_InterpreterOptions* options,
49 | const char* name,
50 | const TFL_Registration* registration,
51 | int min_version, int max_version);
52 |
53 | #ifdef __cplusplus
54 | } // extern "C"
55 | #endif // __cplusplus
56 |
57 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_EXPERIMENTAL_H_
58 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/c/c_api_internal.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_INTERNAL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_INTERNAL_H_
17 |
18 | #include "tensorflow/lite/experimental/c/c_api.h"
19 |
20 | #include "tensorflow/lite/interpreter.h"
21 | #include "tensorflow/lite/model.h"
22 | #include "tensorflow/lite/op_resolver.h"
23 |
24 | // Internal structures used by the C API. These are likely to change and should
25 | // not be depended on.
26 | //
27 | // NOTE: This header does not follow C conventions and does not define a C API.
28 | // It is effectively an (internal) implementation detail of the C API.
29 |
30 | struct TFL_Model {
31 | // Sharing is safe as FlatBufferModel is const.
32 | std::shared_ptr impl;
33 | };
34 |
35 | struct TFL_InterpreterOptions {
36 | enum {
37 | kDefaultNumThreads = -1,
38 | };
39 | int num_threads = kDefaultNumThreads;
40 |
41 | tflite::MutableOpResolver op_resolver;
42 |
43 | void (*error_reporter)(void* user_data, const char* format,
44 | va_list args) = nullptr;
45 | void* error_reporter_user_data = nullptr;
46 | };
47 |
48 | struct TFL_Interpreter {
49 | // Taking a reference to the (const) model data avoids lifetime-related issues
50 | // and complexity with the TFL_Model's existence.
51 | std::shared_ptr model;
52 |
53 | // The interpreter does not take ownership of the provided ErrorReporter
54 | // instance, so we ensure its validity here. Note that the interpreter may use
55 | // the reporter in its destructor, so it should be declared first.
56 | std::unique_ptr optional_error_reporter;
57 |
58 | std::unique_ptr impl;
59 | };
60 |
61 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_INTERNAL_H_
62 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/kernels/ctc_loss_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Copied from tensorflow/core/util/ctc/ctc_loss_util.h
17 | // TODO(b/111524997): Remove this file.
18 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_LOSS_UTIL_H_
19 | #define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_LOSS_UTIL_H_
20 |
21 | #include
22 | #include
23 |
24 | namespace tflite {
25 | namespace experimental {
26 | namespace ctc {
27 |
28 | const float kLogZero = -std::numeric_limits::infinity();
29 |
30 | // Add logarithmic probabilities using:
31 | // ln(a + b) = ln(a) + ln(1 + exp(ln(b) - ln(a)))
32 | // The two inputs are assumed to be log probabilities.
33 | // (GravesTh) Eq. 7.18
34 | inline float LogSumExp(float log_prob_1, float log_prob_2) {
35 | // Always have 'b' be the smaller number to avoid the exponential from
36 | // blowing up.
37 | if (log_prob_1 == kLogZero && log_prob_2 == kLogZero) {
38 | return kLogZero;
39 | } else {
40 | return (log_prob_1 > log_prob_2)
41 | ? log_prob_1 + log1pf(expf(log_prob_2 - log_prob_1))
42 | : log_prob_2 + log1pf(expf(log_prob_1 - log_prob_2));
43 | }
44 | }
45 |
46 | } // namespace ctc
47 | } // namespace experimental
48 | } // namespace tflite
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_LOSS_UTIL_H_
51 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/compatibility.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_COMPATIBILITY_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_COMPATIBILITY_H_
17 |
18 | // C++ will automatically create class-specific delete operators for virtual
19 | // objects, which by default call the global delete function. For embedded
20 | // applications we want to avoid this, and won't be calling new/delete on these
21 | // objects, so we need to override the default implementation with one that does
22 | // nothing to avoid linking in ::delete().
23 | // This macro needs to be included in all subclasses of a virtual base class in
24 | // the private section.
25 | #ifdef TF_LITE_STATIC_MEMORY
26 | #define TF_LITE_REMOVE_VIRTUAL_DELETE \
27 | void operator delete(void* p) {}
28 | #else
29 | #define TF_LITE_REMOVE_VIRTUAL_DELETE
30 | #endif
31 |
32 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_COMPATIBILITY_H_
33 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/debug_log.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_H_
17 |
18 | // This function should be implemented by each target platform, and provide a
19 | // way for strings to be output to some text stream. For more information, see
20 | // tensorflow/lite/experimental/micro/debug_log.cc.
21 | extern "C" void DebugLog(const char* s);
22 |
23 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_H_
24 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/debug_log_numbers.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_NUMBERS_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_NUMBERS_H_
17 |
18 | #include
19 |
20 | // Output numbers to the debug logging stream.
21 | extern "C" {
22 | void DebugLogInt32(int32_t i);
23 | void DebugLogUInt32(uint32_t i);
24 | void DebugLogHex(uint32_t i);
25 | void DebugLogFloat(float i);
26 | }
27 |
28 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_DEBUG_LOG_NUMBERS_H_
29 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/CMSIS/hanning.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_HANNING_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_HANNING_H_
18 |
19 | #include
20 |
21 | extern const int g_hanning_size;
22 | extern const int16_t g_hanning[];
23 |
24 | #endif
25 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/CMSIS/sin_1k.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_SIN_1K_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_SIN_1K_H_
18 |
19 | #include
20 |
21 | extern const int g_sin_1k_size;
22 | extern const int16_t g_sin_1k[];
23 |
24 | #endif
25 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/audio_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | // This is an abstraction around an audio source like a microphone, and is
23 | // expected to return 16-bit PCM sample data for a given point in time. The
24 | // sample data itself should be used as quickly as possible by the caller, since
25 | // to allow memory optimizations there are no guarantees that the samples won't
26 | // be overwritten by new data in the future. In practice, implementations should
27 | // ensure that there's a reasonable time allowed for clients to access the data
28 | // before any reuse.
29 | // The reference implementation can have no platform-specific dependencies, so
30 | // it just returns an array filled with zeros. For real applications, you should
31 | // ensure there's a specialized implementation that accesses hardware APIs.
32 | TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter,
33 | int start_ms, int duration_ms,
34 | int* audio_samples_size, int16_t** audio_samples);
35 |
36 | // Returns the time that audio data was last captured in milliseconds. There's
37 | // no contract about what time zero represents, the accuracy, or the granularity
38 | // of the result. Subsequent calls will generally not return a lower value, but
39 | // even that's not guaranteed if there's an overflow wraparound.
40 | // The reference implementation of this function just returns a constantly
41 | // incrementing value for each call, since it would need a non-portable platform
42 | // call to access time information. For real applications, you'll need to write
43 | // your own platform-specific implementation.
44 | int32_t LatestAudioTimestamp();
45 |
46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_AUDIO_PROVIDER_H_
47 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/feature_provider.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | // Binds itself to an area of memory intended to hold the input features for an
23 | // audio-recognition neural network model, and fills that data area with the
24 | // features representing the current audio input, for example from a microphone.
25 | // The audio features themselves are a two-dimensional array, made up of
26 | // horizontal slices representing the frequencies at one point in time, stacked
27 | // on top of each other to form a spectrogram showing how those frequencies
28 | // changed over time.
29 | class FeatureProvider {
30 | public:
31 | // Create the provider, and bind it to an area of memory. This memory should
32 | // remain accessible for the lifetime of the provider object, since subsequent
33 | // calls will fill it with feature data. The provider does no memory
34 | // management of this data.
35 | FeatureProvider(int feature_size, uint8_t* feature_data);
36 | ~FeatureProvider();
37 |
38 | // Fills the feature data with information from audio inputs, and returns how
39 | // many feature slices were updated.
40 | TfLiteStatus PopulateFeatureData(tflite::ErrorReporter* error_reporter,
41 | int32_t last_time_in_ms, int32_t time_in_ms,
42 | int* how_many_new_slices);
43 |
44 | private:
45 | int feature_size_;
46 | uint8_t* feature_data_;
47 | // Make sure we don't try to use cached information if this is the first call
48 | // into the provider.
49 | bool is_first_run_;
50 | };
51 |
52 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_FEATURE_PROVIDER_H_
53 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/model_settings.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MODEL_SETTINGS_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MODEL_SETTINGS_H_
18 |
19 | // Keeping these as constant expressions allow us to allocate fixed-sized arrays
20 | // on the stack for our working memory.
21 |
22 | // The size of the input time series data we pass to the FFT to produce the
23 | // frequency information. This has to be a power of two, and since we're dealing
24 | // with 30ms of 16KHz inputs, which means 480 samples, this is the next value.
25 | constexpr int kMaxAudioSampleSize = 512;
26 | constexpr int kAudioSampleFrequency = 16000;
27 |
28 | // All of these values are derived from the values used during model training,
29 | // if you change your model you'll need to update these constants.
30 | constexpr int kAverageWindowSize = 6;
31 | constexpr int kFeatureSliceSize =
32 | ((kMaxAudioSampleSize / 2) + (kAverageWindowSize - 1)) / kAverageWindowSize;
33 | constexpr int kFeatureSliceCount = 49;
34 | constexpr int kFeatureElementCount = (kFeatureSliceSize * kFeatureSliceCount);
35 | constexpr int kFeatureSliceStrideMs = 20;
36 | constexpr int kFeatureSliceDurationMs = 30;
37 |
38 | constexpr int kCategoryCount = 4;
39 | constexpr int kSilenceIndex = 0;
40 | constexpr int kUnknownIndex = 1;
41 | extern const char* kCategoryLabels[kCategoryCount];
42 |
43 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_MODEL_SETTINGS_H_
44 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/no_30ms_sample_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This data was created from the PCM data in a WAV file held in v2 of the
17 | // Speech Commands test dataset, at the path:
18 | // speech_commands_test_set_v0.02/no/f9643d42_nohash_4.wav
19 | // The data was extracted starting at an offset of 8,960, which corresponds to
20 | // the 29th spectrogram slice. It's designed to be used to test the
21 | // preprocessing pipeline, to ensure that the expected spectrogram slice is
22 | // produced given this input.
23 |
24 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_30MS_SAMPLE_DATA_H_
25 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_30MS_SAMPLE_DATA_H_
26 |
27 | #include
28 |
29 | extern const int g_no_30ms_sample_data_size;
30 | extern const int16_t g_no_30ms_sample_data[];
31 |
32 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_30MS_SAMPLE_DATA_H_
33 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/no_features_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_FEATURES_DATA_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_FEATURES_DATA_H_
18 |
19 | extern const int g_no_f9643d42_nohash_4_width;
20 | extern const int g_no_f9643d42_nohash_4_height;
21 | extern const unsigned char g_no_f9643d42_nohash_4_data[];
22 |
23 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_FEATURES_DATA_H_
24 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/no_power_spectrum_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This data was extracted from the larger feature data held in
17 | // no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
18 | // This is the expected result of running the sample data in
19 | // no_30ms_sample_data.cc through through the preprocessing pipeline.
20 |
21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_POWER_SPECTRUM_DATA_H_
22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_POWER_SPECTRUM_DATA_H_
23 |
24 | #include
25 |
26 | constexpr int g_no_power_spectrum_data_size = 43;
27 | extern const uint8_t g_no_power_spectrum_data[];
28 |
29 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_NO_POWER_SPECTRUM_DATA_H_
30 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/preprocessor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_PREPROCESSOR_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_PREPROCESSOR_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
21 |
22 | // Converts audio sample data into a more compact form that's appropriate for
23 | // feeding into a neural network. There are reference implementations that use
24 | // both floating point and fixed point available, but because the calculations
25 | // involved can be time-consuming, it's recommended that you use or write
26 | // specialized versions for your platform.
27 | TfLiteStatus Preprocess(tflite::ErrorReporter* error_reporter,
28 | const int16_t* input, int input_size, int output_size,
29 | uint8_t* output);
30 |
31 | TfLiteStatus Preprocess_1sec(tflite::ErrorReporter* error_reporter,
32 | const int16_t* input, uint8_t* output);
33 |
34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_PREPROCESSOR_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/tiny_conv_model_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This is a standard TensorFlow Lite model file that has been converted into a
17 | // C data array, so it can be easily compiled into a binary for devices that
18 | // don't have a file system. It was created using the command:
19 | // xxd -i tiny_conv.tflite > tiny_conv_model_data.cc
20 |
21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_TINY_CONV_MODEL_DATA_H_
22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_TINY_CONV_MODEL_DATA_H_
23 |
24 | extern const unsigned char g_tiny_conv_model_data[];
25 | extern const int g_tiny_conv_model_data_len;
26 |
27 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_TINY_CONV_MODEL_DATA_H_
28 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/yes_30ms_sample_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This data was created from the PCM data in a WAV file held in v2 of the
17 | // Speech Commands test dataset, at the path:
18 | // speech_commands_test_set_v0.02/yes/f2e59fea_nohash_1.wav
19 | // The data was extracted starting at an offset of 8,000, which corresponds to
20 | // the 26th spectrogram slice. It's designed to be used to test the
21 | // preprocessing pipeline, to ensure that the expected spectrogram slice is
22 | // produced given this input.
23 |
24 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_
25 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_
26 |
27 | #include
28 |
29 | extern const int g_yes_30ms_sample_data_size;
30 | extern const int16_t g_yes_30ms_sample_data[];
31 |
32 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_30MS_SAMPLE_DATA_H_
33 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/yes_features_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_FEATURES_DATA_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_FEATURES_DATA_H_
18 |
19 | extern const int g_yes_f2e59fea_nohash_1_width;
20 | extern const int g_yes_f2e59fea_nohash_1_height;
21 | extern const unsigned char g_yes_f2e59fea_nohash_1_data[];
22 |
23 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_FEATURES_DATA_H_
24 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/examples/micro_speech/yes_power_spectrum_data.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // This data was extracted from the larger feature data held in
17 | // no_features_data.cc and consists of the 26th spectrogram slice of 43 values.
18 | // This is the expected result of running the sample data in
19 | // yes_30ms_sample_data.cc through through the preprocessing pipeline.
20 |
21 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_POWER_SPECTRUM_DATA_H_
22 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_POWER_SPECTRUM_DATA_H_
23 |
24 | #include
25 |
26 | constexpr int g_yes_power_spectrum_data_size = 43;
27 | extern const uint8_t g_yes_power_spectrum_data[];
28 |
29 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_YES_POWER_SPECTRUM_DATA_H_
30 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/kernels/all_ops_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | Licensed under the Apache License, Version 2.0 (the "License");
3 | you may not use this file except in compliance with the License.
4 | You may obtain a copy of the License at
5 | http://www.apache.org/licenses/LICENSE-2.0
6 | Unless required by applicable law or agreed to in writing, software
7 | distributed under the License is distributed on an "AS IS" BASIS,
8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | See the License for the specific language governing permissions and
10 | limitations under the License.
11 | ==============================================================================*/
12 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ALL_OPS_RESOLVER_H_
13 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ALL_OPS_RESOLVER_H_
14 |
15 | #include "tensorflow/lite/experimental/micro/compatibility.h"
16 | #include "tensorflow/lite/experimental/micro/micro_mutable_op_resolver.h"
17 |
18 | namespace tflite {
19 | namespace ops {
20 | namespace micro {
21 |
22 | class AllOpsResolver : public MicroMutableOpResolver {
23 | public:
24 | AllOpsResolver();
25 |
26 | private:
27 | TF_LITE_REMOVE_VIRTUAL_DELETE
28 | };
29 |
30 | } // namespace micro
31 | } // namespace ops
32 | } // namespace tflite
33 |
34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_KERNELS_ALL_OPS_RESOLVER_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/micro_error_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ERROR_REPORTER_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ERROR_REPORTER_H_
17 |
18 | #include "tensorflow/lite/core/api/error_reporter.h"
19 | #include "tensorflow/lite/experimental/micro/compatibility.h"
20 | #include "tensorflow/lite/experimental/micro/debug_log.h"
21 | #include "tensorflow/lite/experimental/micro/debug_log_numbers.h"
22 |
23 | namespace tflite {
24 |
25 | class MicroErrorReporter : public ErrorReporter {
26 | public:
27 | ~MicroErrorReporter() {}
28 | int Report(const char* format, va_list args) override;
29 |
30 | private:
31 | TF_LITE_REMOVE_VIRTUAL_DELETE
32 | };
33 |
34 | } // namespace tflite
35 |
36 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_ERROR_REPORTER_H_
37 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/micro_interpreter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_INTERPRETER_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_INTERPRETER_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 | #include "tensorflow/lite/core/api/error_reporter.h"
20 | #include "tensorflow/lite/core/api/op_resolver.h"
21 | #include "tensorflow/lite/experimental/micro/simple_tensor_allocator.h"
22 | #include "tensorflow/lite/schema/schema_generated.h"
23 |
24 | namespace tflite {
25 |
26 | class MicroInterpreter {
27 | public:
28 | // The lifetime of the model, op resolver, allocator, and error reporter must
29 | // be at least as long as that of the interpreter object, since the
30 | // interpreter may need to access them at any time. This means that you should
31 | // usually create them with the same scope as each other, for example having
32 | // them all allocated on the stack as local variables through a top-level
33 | // function.
34 | // The interpreter doesn't do any deallocation of any of the pointed-to
35 | // objects, ownership remains with the caller.
36 | MicroInterpreter(const Model* model, const OpResolver& op_resolver,
37 | SimpleTensorAllocator* tensor_allocator,
38 | ErrorReporter* error_reporter);
39 |
40 | TfLiteStatus Invoke();
41 |
42 | size_t tensors_size() const { return context_.tensors_size; }
43 | TfLiteTensor* tensor(int tensor_index);
44 |
45 | TfLiteTensor* input(int index);
46 | size_t inputs_size() const { return subgraph_->inputs()->Length(); }
47 |
48 | TfLiteTensor* output(int index);
49 | size_t outputs_size() const { return subgraph_->outputs()->Length(); }
50 |
51 | TfLiteStatus initialization_status() const { return initialization_status_; }
52 |
53 | ErrorReporter* error_reporter() { return error_reporter_; }
54 |
55 | private:
56 | const Model* model_;
57 | const OpResolver& op_resolver_;
58 | SimpleTensorAllocator* tensor_allocator_;
59 | ErrorReporter* error_reporter_;
60 |
61 | TfLiteStatus initialization_status_;
62 | const flatbuffers::Vector>* tensors_;
63 | const flatbuffers::Vector>* operators_;
64 | TfLiteContext context_;
65 |
66 | const SubGraph* subgraph_;
67 | };
68 |
69 | } // namespace tflite
70 |
71 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_INTERPRETER_H_
72 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/micro_mutable_op_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
17 |
18 | #include "tensorflow/lite/core/api/op_resolver.h"
19 | #include "tensorflow/lite/experimental/micro/compatibility.h"
20 |
21 | #ifndef TFLITE_REGISTRATIONS_MAX
22 | #define TFLITE_REGISTRATIONS_MAX (128)
23 | #endif
24 |
25 | namespace tflite {
26 |
27 | class MicroMutableOpResolver : public OpResolver {
28 | public:
29 | const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
30 | int version) const override;
31 | const TfLiteRegistration* FindOp(const char* op, int version) const override;
32 | void AddBuiltin(tflite::BuiltinOperator op, TfLiteRegistration* registration,
33 | int min_version = 1, int max_version = 1);
34 | void AddCustom(const char* name, TfLiteRegistration* registration,
35 | int min_version = 1, int max_version = 1);
36 |
37 | private:
38 | TfLiteRegistration registrations_[TFLITE_REGISTRATIONS_MAX];
39 | int registrations_len_ = 0;
40 |
41 | TF_LITE_REMOVE_VIRTUAL_DELETE
42 | };
43 |
44 | } // namespace tflite
45 |
46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
47 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/micro/simple_tensor_allocator.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_SIMPLE_TENSOR_ALLOCATOR_H_
17 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_SIMPLE_TENSOR_ALLOCATOR_H_
18 |
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/core/api/error_reporter.h"
21 | #include "tensorflow/lite/schema/schema_generated.h"
22 |
23 | namespace tflite {
24 |
25 | // TODO(petewarden): This allocator never frees up or reuses any memory, even
26 | // though we have enough information about lifetimes of the tensors to do so.
27 | // This makes it pretty wasteful, so we should use a more intelligent method.
28 | class SimpleTensorAllocator {
29 | public:
30 | SimpleTensorAllocator(uint8_t* buffer, int buffer_size)
31 | : data_size_(0), data_size_max_(buffer_size), data_(buffer) {}
32 |
33 | TfLiteStatus AllocateTensor(
34 | const tflite::Tensor& flatbuffer_tensor, int create_before,
35 | int destroy_after,
36 | const flatbuffers::Vector>* buffers,
37 | ErrorReporter* error_reporter, TfLiteTensor* result);
38 |
39 | uint8_t* AllocateMemory(size_t size, size_t alignment);
40 |
41 | int GetDataSize() const { return data_size_; }
42 |
43 | private:
44 | int data_size_;
45 | int data_size_max_;
46 | uint8_t* data_;
47 | };
48 |
49 | } // namespace tflite
50 |
51 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_SIMPLE_TENSOR_ALLOCATOR_H_
52 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/fft.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
17 |
18 | #include
19 | #include
20 |
21 | #ifdef __cplusplus
22 | extern "C" {
23 | #endif
24 |
25 | struct complex_int16_t {
26 | int16_t real;
27 | int16_t imag;
28 | };
29 |
30 | struct FftState {
31 | int16_t* input;
32 | struct complex_int16_t* output;
33 | size_t fft_size;
34 | size_t input_size;
35 | void* scratch;
36 | size_t scratch_size;
37 | };
38 |
39 | void FftCompute(struct FftState* state, const int16_t* input,
40 | int input_scale_shift);
41 |
42 | void FftInit(struct FftState* state);
43 |
44 | void FftReset(struct FftState* state);
45 |
46 | #ifdef __cplusplus
47 | } // extern "C"
48 | #endif
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
51 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/fft_io.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_IO_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_IO_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
21 |
22 | #ifdef __cplusplus
23 | extern "C" {
24 | #endif
25 |
26 | void FftWriteMemmapPreamble(FILE* fp, const struct FftState* state);
27 | void FftWriteMemmap(FILE* fp, const struct FftState* state,
28 | const char* variable);
29 |
30 | #ifdef __cplusplus
31 | } // extern "C"
32 | #endif
33 |
34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_IO_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/fft_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | // Prepares and FFT for the given input size.
25 | int FftPopulateState(struct FftState* state, size_t input_size);
26 |
27 | // Frees any allocated buffers.
28 | void FftFreeStateContents(struct FftState* state);
29 |
30 | #ifdef __cplusplus
31 | } // extern "C"
32 | #endif
33 |
34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/filterbank.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
22 |
23 | #define kFilterbankBits 12
24 |
25 | #ifdef __cplusplus
26 | extern "C" {
27 | #endif
28 |
29 | struct FilterbankState {
30 | int num_channels;
31 | int start_index;
32 | int end_index;
33 | int16_t* channel_frequency_starts;
34 | int16_t* channel_weight_starts;
35 | int16_t* channel_widths;
36 | int16_t* weights;
37 | int16_t* unweights;
38 | uint64_t* work;
39 | };
40 |
41 | // Converts the relevant complex values of an FFT output into energy (the
42 | // square magnitude).
43 | void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
44 | struct complex_int16_t* fft_output,
45 | int32_t* energy);
46 |
47 | // Computes the mel-scale filterbank on the given energy array. Output is cached
48 | // internally - to fetch it, you need to call FilterbankSqrt.
49 | void FilterbankAccumulateChannels(struct FilterbankState* state,
50 | const int32_t* energy);
51 |
52 | // Applies an integer square root to the 64 bit intermediate values of the
53 | // filterbank, and returns a pointer to them. Memory will be invalidated the
54 | // next time FilterbankAccumulateChannels is called.
55 | uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift);
56 |
57 | void FilterbankReset(struct FilterbankState* state);
58 |
59 | #ifdef __cplusplus
60 | } // extern "C"
61 | #endif
62 |
63 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
64 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_IO_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_IO_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
21 |
22 | #ifdef __cplusplus
23 | extern "C" {
24 | #endif
25 |
26 | void FilterbankWriteMemmapPreamble(FILE* fp,
27 | const struct FilterbankState* state);
28 | void FilterbankWriteMemmap(FILE* fp, const struct FilterbankState* state,
29 | const char* variable);
30 |
31 | #ifdef __cplusplus
32 | } // extern "C"
33 | #endif
34 |
35 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_IO_H_
36 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | struct FilterbankConfig {
25 | // number of frequency channel buckets for filterbank
26 | int num_channels;
27 | // maximum frequency to include
28 | float upper_band_limit;
29 | // minimum frequency to include
30 | float lower_band_limit;
31 | // unused
32 | int output_scale_shift;
33 | };
34 |
35 | // Fills the frontendConfig with "sane" defaults.
36 | void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config);
37 |
38 | // Allocates any buffers.
39 | int FilterbankPopulateState(const struct FilterbankConfig* config,
40 | struct FilterbankState* state, int sample_rate,
41 | int spectrum_size);
42 |
43 | // Frees any allocated buffers.
44 | void FilterbankFreeStateContents(struct FilterbankState* state);
45 |
46 | #ifdef __cplusplus
47 | } // extern "C"
48 | #endif
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
51 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/frontend.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
22 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
23 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
24 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
25 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
26 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h"
27 |
28 | #ifdef __cplusplus
29 | extern "C" {
30 | #endif
31 |
32 | struct FrontendState {
33 | struct WindowState window;
34 | struct FftState fft;
35 | struct FilterbankState filterbank;
36 | struct NoiseReductionState noise_reduction;
37 | struct PcanGainControlState pcan_gain_control;
38 | struct LogScaleState log_scale;
39 | };
40 |
41 | struct FrontendOutput {
42 | const uint16_t* values;
43 | size_t size;
44 | };
45 |
46 | // Main entry point to processing frontend samples. Updates num_samples_read to
47 | // contain the number of samples that have been consumed from the input array.
48 | // Returns a struct containing the generated output. If not enough samples were
49 | // added to generate a feature vector, the returned size will be 0 and the
50 | // values pointer will be NULL. Note that the output pointer will be invalidated
51 | // as soon as FrontendProcessSamples is called again, so copy the contents
52 | // elsewhere if you need to use them later.
53 | struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
54 | const int16_t* samples,
55 | size_t num_samples,
56 | size_t* num_samples_read);
57 |
58 | void FrontendReset(struct FrontendState* state);
59 |
60 | #ifdef __cplusplus
61 | } // extern "C"
62 | #endif
63 |
64 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
65 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_IO_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_IO_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | int WriteFrontendStateMemmap(const char* header, const char* source,
25 | const struct FrontendState* state);
26 |
27 | #ifdef __cplusplus
28 | } // extern "C"
29 | #endif
30 |
31 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_IO_H_
32 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
19 | #include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
20 | #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
21 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
22 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
23 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
24 | #include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
25 |
26 | #ifdef __cplusplus
27 | extern "C" {
28 | #endif
29 |
30 | struct FrontendConfig {
31 | struct WindowConfig window;
32 | struct FilterbankConfig filterbank;
33 | struct NoiseReductionConfig noise_reduction;
34 | struct PcanGainControlConfig pcan_gain_control;
35 | struct LogScaleConfig log_scale;
36 | };
37 |
38 | // Fills the frontendConfig with "sane" defaults.
39 | void FrontendFillConfigWithDefaults(struct FrontendConfig* config);
40 |
41 | // Allocates any buffers.
42 | int FrontendPopulateState(const struct FrontendConfig* config,
43 | struct FrontendState* state, int sample_rate);
44 |
45 | // Frees any allocated buffers.
46 | void FrontendFreeStateContents(struct FrontendState* state);
47 |
48 | #ifdef __cplusplus
49 | } // extern "C"
50 | #endif
51 |
52 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
53 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/log_lut.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
17 |
18 | #include
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | // Number of segments in the log lookup table. The table will be kLogSegments+1
25 | // in length (with some padding).
26 | #define kLogSegments 128
27 | #define kLogSegmentsLog2 7
28 |
29 | // Scale used by lookup table.
30 | #define kLogScale 65536
31 | #define kLogScaleLog2 16
32 | #define kLogCoeff 45426
33 |
34 | extern const uint16_t kLogLut[];
35 |
36 | #ifdef __cplusplus
37 | } // extern "C"
38 | #endif
39 |
40 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
41 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/log_scale.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
17 |
18 | #include
19 | #include
20 |
21 | #ifdef __cplusplus
22 | extern "C" {
23 | #endif
24 |
25 | struct LogScaleState {
26 | int enable_log;
27 | int scale_shift;
28 | };
29 |
30 | // Applies a fixed point logarithm to the signal and converts it to 16 bit. Note
31 | // that the signal array will be modified.
32 | uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
33 | int signal_size, int correction_bits);
34 |
35 | #ifdef __cplusplus
36 | } // extern "C"
37 | #endif
38 |
39 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
40 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_IO_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_IO_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
21 |
22 | #ifdef __cplusplus
23 | extern "C" {
24 | #endif
25 |
26 | void LogScaleWriteMemmap(FILE* fp, const struct LogScaleState* state,
27 | const char* variable);
28 |
29 | #ifdef __cplusplus
30 | } // extern "C"
31 | #endif
32 |
33 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_IO_H_
34 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct LogScaleConfig {
28 | // set to false (0) to disable this module
29 | int enable_log;
30 | // scale results by 2^(scale_shift)
31 | int scale_shift;
32 | };
33 |
34 | // Populates the LogScaleConfig with "sane" default values.
35 | void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config);
36 |
37 | // Allocates any buffers.
38 | int LogScalePopulateState(const struct LogScaleConfig* config,
39 | struct LogScaleState* state);
40 |
41 | #ifdef __cplusplus
42 | } // extern "C"
43 | #endif
44 |
45 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
46 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
17 |
18 | #define kNoiseReductionBits 14
19 |
20 | #include
21 | #include
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct NoiseReductionState {
28 | int smoothing_bits;
29 | uint16_t even_smoothing;
30 | uint16_t odd_smoothing;
31 | uint16_t min_signal_remaining;
32 | int num_channels;
33 | uint32_t* estimate;
34 | };
35 |
36 | // Removes stationary noise from each channel of the signal using a low pass
37 | // filter.
38 | void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal);
39 |
40 | void NoiseReductionReset(struct NoiseReductionState* state);
41 |
42 | #ifdef __cplusplus
43 | } // extern "C"
44 | #endif
45 |
46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
47 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_IO_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_IO_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
21 |
22 | #ifdef __cplusplus
23 | extern "C" {
24 | #endif
25 |
26 | void NoiseReductionWriteMemmapPreamble(FILE* fp,
27 | const struct NoiseReductionState* state);
28 | void NoiseReductionWriteMemmap(FILE* fp,
29 | const struct NoiseReductionState* state,
30 | const char* variable);
31 |
32 | #ifdef __cplusplus
33 | } // extern "C"
34 | #endif
35 |
36 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_IO_H_
37 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | struct NoiseReductionConfig {
25 | // scale the signal up by 2^(smoothing_bits) before reduction
26 | int smoothing_bits;
27 | // smoothing coefficient for even-numbered channels
28 | float even_smoothing;
29 | // smoothing coefficient for odd-numbered channels
30 | float odd_smoothing;
31 | // fraction of signal to preserve (1.0 disables this module)
32 | float min_signal_remaining;
33 | };
34 |
35 | // Populates the NoiseReductionConfig with "sane" default values.
36 | void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config);
37 |
38 | // Allocates any buffers.
39 | int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
40 | struct NoiseReductionState* state,
41 | int num_channels);
42 |
43 | // Frees any allocated buffers.
44 | void NoiseReductionFreeStateContents(struct NoiseReductionState* state);
45 |
46 | #ifdef __cplusplus
47 | } // extern "C"
48 | #endif
49 |
50 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
51 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
17 |
18 | #include
19 | #include
20 |
21 | #define kPcanSnrBits 12
22 | #define kPcanOutputBits 6
23 |
24 | #ifdef __cplusplus
25 | extern "C" {
26 | #endif
27 |
28 | struct PcanGainControlState {
29 | int enable_pcan;
30 | uint32_t* noise_estimate;
31 | int num_channels;
32 | int16_t* gain_lut;
33 | int32_t snr_shift;
34 | };
35 |
36 | int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
37 |
38 | uint32_t PcanShrink(const uint32_t x);
39 |
40 | void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal);
41 |
42 | #ifdef __cplusplus
43 | } // extern "C"
44 | #endif
45 |
46 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
47 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
19 |
20 | #define kWideDynamicFunctionBits 32
21 | #define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct PcanGainControlConfig {
28 | // set to false (0) to disable this module
29 | int enable_pcan;
30 | // gain normalization exponent (0.0 disables, 1.0 full strength)
31 | float strength;
32 | // positive value added in the normalization denominator
33 | float offset;
34 | // number of fractional bits in the gain
35 | int gain_bits;
36 | };
37 |
38 | void PcanGainControlFillConfigWithDefaults(
39 | struct PcanGainControlConfig* config);
40 |
41 | int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
42 | int32_t input_bits, uint32_t x);
43 |
44 | int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
45 | struct PcanGainControlState* state,
46 | uint32_t* noise_estimate,
47 | const int num_channels,
48 | const uint16_t smoothing_bits,
49 | const int32_t input_correction_bits);
50 |
51 | void PcanGainControlFreeStateContents(struct PcanGainControlState* state);
52 |
53 | #ifdef __cplusplus
54 | } // extern "C"
55 | #endif
56 |
57 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
58 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/window.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
17 |
18 | #include
19 | #include
20 |
21 | #define kFrontendWindowBits 12
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | struct WindowState {
28 | size_t size;
29 | int16_t* coefficients;
30 | size_t step;
31 |
32 | int16_t* input;
33 | size_t input_used;
34 | int16_t* output;
35 | int16_t max_abs_output_value;
36 | };
37 |
38 | // Applies a window to the samples coming in, stepping forward at the given
39 | // rate.
40 | int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
41 | size_t num_samples, size_t* num_samples_read);
42 |
43 | void WindowReset(struct WindowState* state);
44 |
45 | #ifdef __cplusplus
46 | } // extern "C"
47 | #endif
48 |
49 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
50 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/window_io.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_IO_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_IO_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h"
21 |
22 | #ifdef __cplusplus
23 | extern "C" {
24 | #endif
25 |
26 | void WindowWriteMemmapPreamble(FILE* fp, const struct WindowState* state);
27 | void WindowWriteMemmap(FILE* fp, const struct WindowState* state,
28 | const char* variable);
29 |
30 | #ifdef __cplusplus
31 | } // extern "C"
32 | #endif
33 |
34 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_IO_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/experimental/microfrontend/lib/window_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
16 | #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
17 |
18 | #include "tensorflow/lite/experimental/microfrontend/lib/window.h"
19 |
20 | #ifdef __cplusplus
21 | extern "C" {
22 | #endif
23 |
24 | struct WindowConfig {
25 | // length of window frame in milliseconds
26 | size_t size_ms;
27 | // length of step for next frame in milliseconds
28 | size_t step_size_ms;
29 | };
30 |
31 | // Populates the WindowConfig with "sane" default values.
32 | void WindowFillConfigWithDefaults(struct WindowConfig* config);
33 |
34 | // Allocates any buffers.
35 | int WindowPopulateState(const struct WindowConfig* config,
36 | struct WindowState* state, int sample_rate);
37 |
38 | // Frees any allocated buffers.
39 | void WindowFreeStateContents(struct WindowState* state);
40 |
41 | #ifdef __cplusplus
42 | } // extern "C"
43 | #endif
44 |
45 | #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
46 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/activation_functor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_ACTIVATION_FUNCTOR_H_
16 | #define TENSORFLOW_LITE_KERNELS_ACTIVATION_FUNCTOR_H_
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/c/builtin_op_data.h"
23 |
24 | namespace tflite {
25 |
26 | // Dynamic (non-fused) activation functor. perhaps it is worth having
27 | // template instantiation?
28 | // TODO(aselle): Make this more efficient by pulling the switch to conv_eval
29 | // using template inlining.
30 | class ActivationFunctor {
31 | public:
32 | explicit ActivationFunctor(TfLiteFusedActivation act) : act_(act) {}
33 |
34 | float operator()(float a) const {
35 | switch (act_) {
36 | case kTfLiteActNone:
37 | return a;
38 | case kTfLiteActRelu:
39 | return a < 0.f ? 0.f : a;
40 | case kTfLiteActRelu6:
41 | return std::max(0.f, std::min(a, 6.f));
42 | case kTfLiteActTanh:
43 | return std::tanh(a);
44 | case kTfLiteActSigmoid:
45 | return 1.0f / (1.0f + std::exp(-a));
46 | default:
47 | // TODO(aselle): More informative fatal error!
48 | exit(1);
49 | }
50 | }
51 |
52 | private:
53 | TfLiteFusedActivation act_;
54 | };
55 |
56 | } // namespace tflite
57 |
58 | #endif // TENSORFLOW_LITE_KERNELS_ACTIVATION_FUNCTOR_H_
59 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/eigen_support.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
16 | #define TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 |
20 | namespace EigenForTFLite {
21 | struct ThreadPoolDevice;
22 | }
23 |
24 | namespace tflite {
25 | namespace eigen_support {
26 |
27 | // Let the framework know that the op will be using Eigen. If necessary a set of
28 | // temporary Eigen objects might be created and placed in 'context'.
29 | void IncrementUsageCounter(TfLiteContext* context);
30 |
31 | // Let the framework know that the op stopped using Eigen. If there are no more
32 | // usages all temporary Eigen objects will be deleted.
33 | void DecrementUsageCounter(TfLiteContext* context);
34 |
35 | const EigenForTFLite::ThreadPoolDevice* GetThreadPoolDevice(
36 | TfLiteContext* context);
37 |
38 | } // namespace eigen_support
39 | } // namespace tflite
40 |
41 | #endif // TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
42 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/gemm_support.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_GEMM_SUPPORT_H_
16 | #define TENSORFLOW_LITE_KERNELS_GEMM_SUPPORT_H_
17 |
18 | #include "public/gemmlowp.h"
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 |
21 | namespace tflite {
22 | namespace gemm_support {
23 |
24 | // Returns the GemmContext stored in 'context', allowing multiple ops to
25 | // share a single object, as long as they share a TfLiteContext. The caller
26 | // must ensure that this is called between IncrementUsageCounter() and
27 | // DecrementUsageCounter(). For example, in the implementation of an op:
28 | // void* Init(TfLiteContext* context, const char*, size_t) {
29 | // gemm_support::IncrementUsageCounter(context);
30 | // return nullptr;
31 | // }
32 | // void Free(TfLiteContext* context, void*) {
33 | // gemm_support::DecrementUsageCounter(context);
34 | // }
35 | // TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
36 | // auto* gemm_context = gemm_support::GetFromContext(context);
37 | // }
38 | gemmlowp::GemmContext* GetFromContext(TfLiteContext* context);
39 |
40 | // Let the framework know that the GemmContext stored in 'context' will be used
41 | // by an op. If necessary a new GemmContext is created and placed in 'context'.
42 | void IncrementUsageCounter(TfLiteContext* context);
43 |
44 | // Let the framework know that the op stopped using the GemmContext stored in
45 | // 'context'. If there are no more usages the GemmContext will be deleted.
46 | void DecrementUsageCounter(TfLiteContext* context);
47 |
48 | } // namespace gemm_support
49 | } // namespace tflite
50 |
51 | #endif // TENSORFLOW_LITE_KERNELS_GEMM_SUPPORT_H_
52 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/legacy_types.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_LEGACY_TYPES_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_LEGACY_TYPES_H_
17 |
18 | #include "tensorflow/lite/kernels/internal/types.h"
19 |
20 | namespace tflite {
21 |
22 | // TODO(b/116772710): Insert legacy Dims<> code in here.
23 |
24 | } // namespace tflite
25 |
26 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_LEGACY_TYPES_H_
27 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/mfcc.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Basic class for computing MFCCs from spectrogram slices.
17 |
18 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_H_
19 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_H_
20 |
21 | #include
22 |
23 | #include "tensorflow/lite/kernels/internal/mfcc_dct.h"
24 | #include "tensorflow/lite/kernels/internal/mfcc_mel_filterbank.h"
25 |
26 | namespace tflite {
27 | namespace internal {
28 |
29 | class Mfcc {
30 | public:
31 | Mfcc();
32 | bool Initialize(int input_length, double input_sample_rate);
33 |
34 | // Input is a single squared-magnitude spectrogram frame. The input spectrum
35 | // is converted to linear magnitude and weighted into bands using a
36 | // triangular mel filterbank, and a discrete cosine transform (DCT) of the
37 | // values is taken. Output is populated with the lowest dct_coefficient_count
38 | // of these values.
39 | void Compute(const std::vector& spectrogram_frame,
40 | std::vector* output) const;
41 |
42 | void set_upper_frequency_limit(double upper_frequency_limit) {
43 | // CHECK(!initialized_) << "Set frequency limits before calling
44 | // Initialize.";
45 | upper_frequency_limit_ = upper_frequency_limit;
46 | }
47 |
48 | void set_lower_frequency_limit(double lower_frequency_limit) {
49 | // CHECK(!initialized_) << "Set frequency limits before calling
50 | // Initialize.";
51 | lower_frequency_limit_ = lower_frequency_limit;
52 | }
53 |
54 | void set_filterbank_channel_count(int filterbank_channel_count) {
55 | /// CHECK(!initialized_) << "Set channel count before calling Initialize.";
56 | filterbank_channel_count_ = filterbank_channel_count;
57 | }
58 |
59 | void set_dct_coefficient_count(int dct_coefficient_count) {
60 | // CHECK(!initialized_) << "Set coefficient count before calling
61 | // Initialize.";
62 | dct_coefficient_count_ = dct_coefficient_count;
63 | }
64 |
65 | private:
66 | MfccMelFilterbank mel_filterbank_;
67 | MfccDct dct_;
68 | bool initialized_;
69 | double lower_frequency_limit_;
70 | double upper_frequency_limit_;
71 | int filterbank_channel_count_;
72 | int dct_coefficient_count_;
73 | };
74 |
75 | } // namespace internal
76 | } // namespace tflite
77 |
78 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_H_
79 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/mfcc_dct.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Basic minimal DCT class for MFCC speech processing.
17 |
18 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_DCT_H_
19 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_DCT_H_
20 |
21 | #include
22 |
23 | namespace tflite {
24 | namespace internal {
25 |
26 | class MfccDct {
27 | public:
28 | MfccDct();
29 | bool Initialize(int input_length, int coefficient_count);
30 | void Compute(const std::vector& input,
31 | std::vector* output) const;
32 |
33 | private:
34 | bool initialized_;
35 | int coefficient_count_;
36 | int input_length_;
37 | std::vector > cosines_;
38 | };
39 |
40 | } // namespace internal
41 | } // namespace tflite
42 |
43 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_DCT_H_
44 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/mfcc_mel_filterbank.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | // Basic class for applying a mel-scale mapping to a power spectrum.
17 |
18 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_MEL_FILTERBANK_H_
19 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_MEL_FILTERBANK_H_
20 |
21 | #include
22 |
23 | namespace tflite {
24 | namespace internal {
25 |
26 | class MfccMelFilterbank {
27 | public:
28 | MfccMelFilterbank();
29 | bool Initialize(int input_length, // Number of unique FFT bins fftsize/2+1.
30 | double input_sample_rate, int output_channel_count,
31 | double lower_frequency_limit, double upper_frequency_limit);
32 |
33 | // Takes a squared-magnitude spectrogram slice as input, computes a
34 | // triangular-mel-weighted linear-magnitude filterbank, and places the result
35 | // in output.
36 | void Compute(const std::vector& input,
37 | std::vector* output) const;
38 |
39 | private:
40 | double FreqToMel(double freq) const;
41 | bool initialized_;
42 | int num_channels_;
43 | double sample_rate_;
44 | int input_length_;
45 | std::vector center_frequencies_; // In mel, for each mel channel.
46 |
47 | // Each FFT bin b contributes to two triangular mel channels, with
48 | // proportion weights_[b] going into mel channel band_mapper_[b], and
49 | // proportion (1 - weights_[b]) going into channel band_mapper_[b] + 1.
50 | // Thus, weights_ contains the weighting applied to each FFT bin for the
51 | // upper-half of the triangular band.
52 | std::vector weights_; // Right-side weight for this fft bin.
53 |
54 | // FFT bin i contributes to the upper side of mel channel band_mapper_[i]
55 | std::vector band_mapper_;
56 | int start_index_; // Lowest FFT bin used to calculate mel spectrum.
57 | int end_index_; // Highest FFT bin used to calculate mel spectrum.
58 | };
59 |
60 | } // namespace internal
61 | } // namespace tflite
62 |
63 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_MEL_FILTERBANK_H_
64 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/optimized/cpu_check.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_CPU_CHECK_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_CPU_CHECK_H_
17 |
18 | namespace tflite {
19 |
20 | #ifdef __ANDROID__
21 | #include "ndk/sources/android/cpufeatures/cpu-features.h"
22 |
23 | // Runtime check for Neon support on Android.
24 | inline bool TestCPUFeatureNeon() {
25 | #ifdef __aarch64__
26 | // ARM-64 always has NEON support.
27 | return true;
28 | #else
29 | static bool kUseAndroidNeon =
30 | (android_getCpuFamily() == ANDROID_CPU_FAMILY_ARM &&
31 | android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_ARMv7 &&
32 | android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON);
33 | return kUseAndroidNeon;
34 | #endif // __aarch64__
35 | }
36 |
37 | #elif defined USE_NEON || defined __ARM_NEON
38 |
39 | inline bool TestCPUFeatureNeon() { return true; }
40 |
41 | #else
42 |
43 | inline bool TestCPUFeatureNeon() { return false; }
44 |
45 | #endif
46 |
47 | } // namespace tflite
48 |
49 | // NEON_OR_PORTABLE(SomeFunc, arcs) calls NeonSomeFunc(args) if Neon is both
50 | // enabled at build time and detected at runtime, or PortableSomeFunc(args)
51 | // otherwise.
52 | #ifdef __ARM_ARCH_5TE__
53 | // Neon isn't available at all on ARMv5.
54 | #define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
55 | #else
56 | #define NEON_OR_PORTABLE(funcname, ...) \
57 | TestCPUFeatureNeon() ? Neon##funcname(__VA_ARGS__) \
58 | : Portable##funcname(__VA_ARGS__)
59 | #endif
60 |
61 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_CPU_CHECK_H_
62 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/reference/integer_ops/dequantize.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
17 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
18 |
19 | #include "tensorflow/lite/kernels/internal/common.h"
20 | #include "tensorflow/lite/kernels/internal/types.h"
21 |
22 | namespace tflite {
23 | namespace reference_integer_ops {
24 |
25 | inline void Dequantize(const tflite::DequantizationParams& op_params,
26 | const RuntimeShape& input_shape, const int8* input_data,
27 | const RuntimeShape& output_shape, float* output_data) {
28 | const int32 zero_point = op_params.zero_point;
29 | const double scale = op_params.scale;
30 | const int flat_size = MatchingFlatSize(input_shape, output_shape);
31 |
32 | for (int i = 0; i < flat_size; i++) {
33 | const int32 val = input_data[i];
34 | const float result = static_cast(scale * (val - zero_point));
35 | output_data[i] = result;
36 | }
37 | }
38 |
39 | } // namespace reference_integer_ops
40 | } // namespace tflite
41 |
42 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
43 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/internal/round.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_
16 | #define TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 |
22 | // TODO(aselle): See if we can do this only on jdk. Also mikecase, check
23 | // if you need this for java host build.
24 | #if defined(TF_LITE_USE_GLOBAL_ROUND) || \
25 | (defined(__ANDROID__) && !defined(__NDK_MAJOR__))
26 | template
27 | inline float TfLiteRound(const float x) {
28 | return ::round(x);
29 | }
30 | inline double TfLiteRound(const double x) { return ::round(x); }
31 | #else
32 | template
33 | inline T TfLiteRound(const T x) {
34 | return std::round(x);
35 | }
36 | #endif
37 |
38 | } // namespace tflite
39 |
40 | #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_ROUND_H_
41 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/op_macros.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
16 | #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
17 |
18 | // If we're on a platform without standard IO functions, fall back to a
19 | // non-portable function.
20 | #ifdef TF_LITE_MCU_DEBUG_LOG
21 |
22 | #include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
23 |
24 | #define DEBUG_LOG(x) \
25 | do { \
26 | DebugLog(x); \
27 | } while (0)
28 |
29 | inline void InfiniteLoop() {
30 | DEBUG_LOG("HALTED\n");
31 | while (1) {
32 | }
33 | }
34 | #define TFLITE_ASSERT_FALSE InfiniteLoop();
35 | #define TFLITE_ABORT InfiniteLoop();
36 |
37 | #else // TF_LITE_MCU_DEBUG_LOG
38 |
39 | #include
40 | #include
41 | #include
42 |
43 | #define DEBUG_LOG(x) \
44 | do { \
45 | fprintf(stderr, "%s", (x)); \
46 | } while (0)
47 |
48 | #define TFLITE_ASSERT_FALSE assert(false)
49 | #define TFLITE_ABORT abort()
50 |
51 | #endif // TF_LITE_MCU_DEBUG_LOG
52 |
53 | #define TF_LITE_FATAL(msg) \
54 | do { \
55 | DEBUG_LOG(msg); \
56 | DEBUG_LOG("\nFATAL\n"); \
57 | TFLITE_ABORT; \
58 | } while (0)
59 |
60 | #define TF_LITE_ASSERT(x) \
61 | do { \
62 | if (!(x)) TF_LITE_FATAL(#x); \
63 | } while (0)
64 |
65 | #define TF_LITE_ASSERT_EQ(x, y) \
66 | do { \
67 | if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \
68 | } while (0)
69 |
70 | #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
71 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/padding.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_
16 | #define TENSORFLOW_LITE_KERNELS_PADDING_H_
17 |
18 | #include "tensorflow/lite/c/builtin_op_data.h"
19 |
20 | namespace tflite {
21 |
22 | inline int ComputePadding(int stride, int dilation_rate, int in_size,
23 | int filter_size, int out_size) {
24 | int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
25 | int padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2;
26 | return padding > 0 ? padding : 0;
27 | }
28 |
29 | // Matching GetWindowedOutputSize in TensorFlow.
30 | inline int ComputeOutSize(TfLitePadding padding, int image_size,
31 | int filter_size, int stride) {
32 | switch (padding) {
33 | case kTfLitePaddingSame:
34 | return (image_size + stride - 1) / stride;
35 | case kTfLitePaddingValid:
36 | return (image_size + stride - filter_size) / stride;
37 | default:
38 | return 0;
39 | }
40 | }
41 |
42 | inline TfLitePaddingValues ComputePaddingHeightWidth(
43 | int stride_height, int stride_width, int dilation_rate, int in_height,
44 | int in_width, int filter_height, int filter_width, TfLitePadding padding) {
45 | int out_width = ComputeOutSize(padding, in_width, filter_width, stride_width);
46 | int out_height =
47 | ComputeOutSize(padding, in_height, filter_height, stride_height);
48 |
49 | TfLitePaddingValues padding_values;
50 | padding_values.height =
51 | ComputePadding(stride_height, 1, in_height, filter_height, out_height);
52 | padding_values.width =
53 | ComputePadding(stride_width, 1, in_width, filter_width, out_width);
54 | return padding_values;
55 | }
56 | } // namespace tflite
57 |
58 | #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_
59 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/register.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_REGISTER_H_
16 | #define TENSORFLOW_LITE_KERNELS_REGISTER_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 | #include "tensorflow/lite/model.h"
20 | #include "tensorflow/lite/mutable_op_resolver.h"
21 |
22 | namespace tflite {
23 | namespace ops {
24 | namespace builtin {
25 |
26 | class BuiltinOpResolver : public MutableOpResolver {
27 | public:
28 | BuiltinOpResolver();
29 |
30 | const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
31 | int version) const override;
32 | const TfLiteRegistration* FindOp(const char* op, int version) const override;
33 | };
34 |
35 | } // namespace builtin
36 | } // namespace ops
37 | } // namespace tflite
38 |
39 | #endif // TENSORFLOW_LITE_KERNELS_REGISTER_H_
40 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/kernels/register_ref.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_KERNELS_REGISTER_REF_H_
16 | #define TENSORFLOW_LITE_KERNELS_REGISTER_REF_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 | #include "tensorflow/lite/model.h"
20 | #include "tensorflow/lite/mutable_op_resolver.h"
21 |
22 | namespace tflite {
23 | namespace ops {
24 | namespace builtin {
25 |
26 | class BuiltinRefOpResolver : public MutableOpResolver {
27 | public:
28 | BuiltinRefOpResolver();
29 |
30 | const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
31 | int version) const override;
32 | const TfLiteRegistration* FindOp(const char* op, int version) const override;
33 | };
34 |
35 | } // namespace builtin
36 | } // namespace ops
37 | } // namespace tflite
38 |
39 | #endif // TENSORFLOW_LITE_KERNELS_REGISTER_REF_H_
40 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/memory_planner.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_MEMORY_PLANNER_H_
16 | #define TENSORFLOW_LITE_MEMORY_PLANNER_H_
17 |
18 | #include "tensorflow/lite/c/c_api_internal.h"
19 |
20 | namespace tflite {
21 |
22 | // A MemoryPlanner is responsible for planning and executing a number of
23 | // memory-related operations that are necessary in TF Lite.
24 | class MemoryPlanner {
25 | public:
26 | virtual ~MemoryPlanner() {}
27 |
28 | // Plans the necessary memory allocations. This is the MemoryPlanner's
29 | // pre-processing step and is called when the graph structure is known but
30 | // actual size of the tensors is not.
31 | virtual TfLiteStatus PlanAllocations() = 0;
32 |
33 | // Allocates the necessary memory to execute all nodes in the interval
34 | // [first_node, last_node].
35 | virtual TfLiteStatus ExecuteAllocations(int first_node, int last_node) = 0;
36 |
37 | // Invalidates allocations made earlier. This is called when tensors sizes
38 | // have changed. All planned allocations remain, but can't be used until
39 | // ExecuteAllocations() is called.
40 | virtual TfLiteStatus ResetAllocations() = 0;
41 | };
42 |
43 | } // namespace tflite
44 |
45 | #endif // TENSORFLOW_LITE_MEMORY_PLANNER_H_
46 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/models/smartreply/predictor.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_MODELS_SMARTREPLY_PREDICTOR_H_
17 | #define TENSORFLOW_LITE_MODELS_SMARTREPLY_PREDICTOR_H_
18 |
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/model.h"
23 |
24 | namespace tflite {
25 | namespace custom {
26 | namespace smartreply {
27 |
28 | const int kDefaultNumResponse = 10;
29 | const float kDefaultBackoffConfidence = 1e-4;
30 |
31 | class PredictorResponse;
32 | struct SmartReplyConfig;
33 |
34 | // With a given string as input, predict the response with a Tflite model.
35 | // When config.backoff_response is not empty, predictor_responses will be filled
36 | // with messagees from backoff response.
37 | void GetSegmentPredictions(const std::vector& input,
38 | const ::tflite::FlatBufferModel& model,
39 | const SmartReplyConfig& config,
40 | std::vector* predictor_responses);
41 |
42 | // Data object used to hold a single predictor response.
43 | // It includes messages, and confidence.
44 | class PredictorResponse {
45 | public:
46 | PredictorResponse(const std::string& response_text, float score) {
47 | response_text_ = response_text;
48 | prediction_score_ = score;
49 | }
50 |
51 | // Accessor methods.
52 | const std::string& GetText() const { return response_text_; }
53 | float GetScore() const { return prediction_score_; }
54 |
55 | private:
56 | std::string response_text_ = "";
57 | float prediction_score_ = 0.0;
58 | };
59 |
60 | // Configurations for SmartReply.
61 | struct SmartReplyConfig {
62 | // Maximum responses to return.
63 | int num_response;
64 | // Default confidence for backoff responses.
65 | float backoff_confidence;
66 | // Backoff responses are used when predicted responses cannot fulfill the
67 | // list.
68 | std::vector backoff_responses;
69 |
70 | SmartReplyConfig(const std::vector& backoff_responses)
71 | : num_response(kDefaultNumResponse),
72 | backoff_confidence(kDefaultBackoffConfidence),
73 | backoff_responses(backoff_responses) {}
74 | };
75 |
76 | } // namespace smartreply
77 | } // namespace custom
78 | } // namespace tflite
79 |
80 | #endif // TENSORFLOW_LITE_MODELS_SMARTREPLY_PREDICTOR_H_
81 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/nnapi_delegate.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_NNAPI_DELEGATE_H_
16 | #define TENSORFLOW_LITE_NNAPI_DELEGATE_H_
17 |
18 | #include "tensorflow/lite/allocation.h"
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/core/api/error_reporter.h"
21 | #include "tensorflow/lite/core/subgraph.h"
22 | #include "tensorflow/lite/interpreter.h"
23 |
24 | class ANeuralNetworksModel;
25 | class ANeuralNetworksMemory;
26 | class ANeuralNetworksCompilation;
27 |
28 | namespace tflite {
29 |
30 | class NNAPIAllocation : public MMAPAllocation {
31 | public:
32 | NNAPIAllocation(const char* filename, ErrorReporter* error_reporter);
33 | ~NNAPIAllocation();
34 |
35 | size_t offset(const void* ptr) const {
36 | auto signed_offset = reinterpret_cast(ptr) -
37 | reinterpret_cast(mmapped_buffer_);
38 |
39 | return static_cast(signed_offset);
40 | }
41 |
42 | ANeuralNetworksMemory* memory() const { return handle_; }
43 | bool valid() const override { return handle_ != nullptr; }
44 |
45 | private:
46 | mutable ANeuralNetworksMemory* handle_ = nullptr;
47 | };
48 |
49 | class NNAPIDelegate {
50 | public:
51 | ~NNAPIDelegate();
52 |
53 | // Convert a tflite graph to NNAPI
54 | TfLiteStatus BuildGraph(Subgraph* subgraph);
55 |
56 | // Run
57 | TfLiteStatus Invoke(Subgraph* subgraph);
58 |
59 | // Whether the current platform supports NNAPI delegation.
60 | static bool IsSupported();
61 |
62 | private:
63 | // The NN API model handle
64 | ANeuralNetworksModel* nn_model_ = nullptr;
65 | // The NN API compilation handle
66 | ANeuralNetworksCompilation* nn_compiled_model_ = nullptr;
67 | // Model status
68 | TfLiteStatus model_status_ = kTfLiteOk;
69 |
70 | // List of state tensors for LSTM, RNN, SVDF.
71 | // NN API does not allow ops to maintain states across multiple
72 | // invocations. We need to manually create state input tensors from
73 | // corresponding state output tensors of TFLite operations, and map them
74 | // correctly.
75 | std::vector model_states_inputs_; // holds NNAPI operand ids
76 | std::vector model_states_outputs_; // holds TFLite tensor ids
77 | };
78 |
79 | } // namespace tflite
80 |
81 | #endif // TENSORFLOW_LITE_NNAPI_DELEGATE_H_
82 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/op_resolver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Compatibility shim for moved header location.
16 | #ifndef TENSORFLOW_LITE_OP_RESOLVER_H_
17 | #define TENSORFLOW_LITE_OP_RESOLVER_H_
18 |
19 | #include "tensorflow/lite/core/api/op_resolver.h"
20 | #include "tensorflow/lite/mutable_op_resolver.h"
21 |
22 | #endif // TENSORFLOW_LITE_OP_RESOLVER_H_
23 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/optional_debug_tools.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Optional debugging functionality. For small sized binaries, these are not
16 | // needed.
17 | #ifndef TENSORFLOW_LITE_OPTIONAL_DEBUG_TOOLS_H_
18 | #define TENSORFLOW_LITE_OPTIONAL_DEBUG_TOOLS_H_
19 |
20 | #include "tensorflow/lite/interpreter.h"
21 |
22 | namespace tflite {
23 |
24 | // Prints a dump of what tensors and what nodes are in the interpreter.
25 | void PrintInterpreterState(Interpreter* interpreter);
26 |
27 | } // namespace tflite
28 |
29 | #endif // TENSORFLOW_LITE_OPTIONAL_DEBUG_TOOLS_H_
30 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/profiling/profile_summarizer.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 |
16 | #ifndef TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARIZER_H_
17 | #define TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARIZER_H_
18 |
19 | #include
20 |
21 | #include "tensorflow/lite/interpreter.h"
22 | #include "tensorflow/lite/profiling/profiler.h"
23 | #include "tensorflow/core/util/stats_calculator.h"
24 |
25 | namespace tflite {
26 | namespace profiling {
27 |
28 | // Creates a summary of operator invocations in the interpreter.
29 | class ProfileSummarizer {
30 | public:
31 | ProfileSummarizer();
32 | virtual ~ProfileSummarizer() {}
33 |
34 | // Process profile events to update statistics for operator invocations.
35 | void ProcessProfiles(const std::vector& profile_stats,
36 | const tflite::Interpreter& interpreter);
37 |
38 | // Returns a string detailing the accumulated runtime stats in a tab-separated
39 | // format which can be pasted into a spreadsheet for further analysis.
40 | std::string GetOutputString() const {
41 | return stats_calculator_->GetOutputString();
42 | }
43 |
44 | std::string GetShortSummary() const {
45 | return stats_calculator_->GetShortSummary();
46 | }
47 |
48 | private:
49 | std::unique_ptr stats_calculator_;
50 | };
51 |
52 | } // namespace profiling
53 | } // namespace tflite
54 |
55 | #endif // TENSORFLOW_LITE_PROFILING_PROFILE_SUMMARIZER_H_
56 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/profiling/time.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_PROFILING_TIME_H_
16 | #define TENSORFLOW_LITE_PROFILING_TIME_H_
17 |
18 | #include
19 |
20 | namespace tflite {
21 | namespace profiling {
22 | namespace time {
23 | uint64_t NowMicros();
24 | void SleepForMicros(uint64_t micros);
25 | } // namespace time
26 | } // namespace profiling
27 | } // namespace tflite
28 | #endif // TENSORFLOW_LITE_PROFILING_TIME_H_
29 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/schema/builtin_ops_header/generator.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // An utility library to generate pure C header for builtin ops definition.
16 | #ifndef TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
17 | #define TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
18 |
19 | #include
20 |
21 | namespace tflite {
22 | namespace builtin_ops_header {
23 |
24 | // Check if the input enum name (from the Flatbuffer definition) is valid.
25 | bool IsValidInputEnumName(const std::string& name);
26 |
27 | // Convert the enum name from Flatbuffer convention to C enum name convention.
28 | // E.g. `L2_POOL_2D` becomes `kTfLiteBuiltinL2Pool2d`.
29 | std::string ConstantizeVariableName(const std::string& name);
30 |
31 | // The function generates a pure C header for builtin ops definition, and write
32 | // it to the output stream.
33 | bool GenerateHeader(std::ostream& os);
34 |
35 | } // namespace builtin_ops_header
36 | } // namespace tflite
37 |
38 | #endif // TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
39 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/stderr_reporter.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_STDERR_REPORTER_H_
16 | #define TENSORFLOW_LITE_STDERR_REPORTER_H_
17 |
18 | #include
19 | #include "tensorflow/lite/c/c_api_internal.h"
20 | #include "tensorflow/lite/core/api/error_reporter.h"
21 |
22 | namespace tflite {
23 |
24 | // An error reporter that simplify writes the message to stderr.
25 | struct StderrReporter : public ErrorReporter {
26 | int Report(const char* format, va_list args) override;
27 | };
28 |
29 | // Return the default error reporter (output to stderr).
30 | ErrorReporter* DefaultErrorReporter();
31 |
32 | } // namespace tflite
33 |
34 | #endif // TENSORFLOW_LITE_STDERR_REPORTER_H_
35 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/string.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | // Abstract string. We don't want even absl at this level.
16 | #ifndef TENSORFLOW_LITE_STRING_H_
17 | #define TENSORFLOW_LITE_STRING_H_
18 |
19 | #include
20 |
21 | namespace tflite {
22 |
23 | #ifndef HAS_GLOBAL_STRING
24 | using std::string;
25 | #endif
26 |
27 | } // namespace tflite
28 |
29 | #endif // TENSORFLOW_LITE_STRING_H_
30 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/generate_testspec.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_GENERATE_TESTSPEC_H_
16 | #define TENSORFLOW_LITE_TESTING_GENERATE_TESTSPEC_H_
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/string.h"
23 |
24 | namespace tflite {
25 | namespace testing {
26 |
27 | // Generate test spec by executing TensorFlow model on random inputs.
28 | // The test spec can be consumed by ParseAndRunTests.
29 | // See test spec format in parse_testdata.h
30 | //
31 | // Inputs:
32 | // stream: mutable iostream that contains the contents of test spec.
33 | // tensorflow_model_path: path to TensorFlow model.
34 | // tflite_model_path: path to tflite_model_path that the test spec runs
35 | // num_invocations: how many pairs of inputs and outputs will be generated.
36 | // against. input_layer: names of input tensors. Example: input1
37 | // input_layer_type: datatypes of input tensors. Example: float
38 | // input_layer_shape: shapes of input tensors, separated by comma. example:
39 | // 1,3,4 output_layer: names of output tensors. Example: output
40 | bool GenerateTestSpecFromTensorflowModel(
41 | std::iostream& stream, const string& tensorflow_model_path,
42 | const string& tflite_model_path, int num_invocations,
43 | const std::vector& input_layer,
44 | const std::vector& input_layer_type,
45 | const std::vector& input_layer_shape,
46 | const std::vector& output_layer);
47 |
48 | // Generates random values that are filled into the tensor.
49 | // random_func returns the generated random element at given index.
50 | template
51 | std::vector GenerateRandomTensor(const std::vector& shape,
52 | const std::function& random_func) {
53 | int64_t num_elements = 1;
54 | for (const int dim : shape) {
55 | num_elements *= dim;
56 | }
57 |
58 | std::vector result(num_elements);
59 | for (int i = 0; i < num_elements; i++) {
60 | result[i] = random_func(i);
61 | }
62 | return result;
63 | }
64 |
65 | } // namespace testing
66 | } // namespace tflite
67 |
68 | #endif // TENSORFLOW_LITE_TESTING_GENERATE_TESTSPEC_H_
69 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/init_tensorflow.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_INIT_TENSORFLOW_H_
16 | #define TENSORFLOW_LITE_TESTING_INIT_TENSORFLOW_H_
17 |
18 | namespace tflite {
19 |
20 | // Initializes tensorflow's libraries. Note that this simulates an empty
21 | // command line, so flags are not initialized.
22 | void InitTensorFlow();
23 |
24 | } // namespace tflite
25 |
26 | #endif // TENSORFLOW_LITE_TESTING_INIT_TENSORFLOW_H_
27 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/join.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_JOIN_H_
16 | #define TENSORFLOW_LITE_TESTING_JOIN_H_
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | #include "tensorflow/lite/string.h"
23 |
24 | namespace tflite {
25 | namespace testing {
26 |
27 | // Join a list of data separated by delimiter.
28 | template
29 | string Join(T* data, size_t len, const string& delimiter) {
30 | if (len == 0 || data == nullptr) {
31 | return "";
32 | }
33 | std::stringstream result;
34 | result << std::setprecision(9) << data[0];
35 | for (int i = 1; i < len; i++) {
36 | result << std::setprecision(9) << delimiter << data[i];
37 | }
38 | return result.str();
39 | }
40 |
41 | // Join a list of uint8 data separated by a delimiter. Cast data to int before
42 | // placing it in the string to prevent values from being treated like chars.
43 | template <>
44 | inline string Join(uint8_t* data, size_t len,
45 | const string& delimiter) {
46 | if (len == 0 || data == nullptr) {
47 | return "";
48 | }
49 | std::stringstream result;
50 | result << static_cast(data[0]);
51 | for (int i = 1; i < len; i++) {
52 | result << delimiter << static_cast(data[i]);
53 | }
54 | return result.str();
55 | }
56 |
57 | } // namespace testing
58 | } // namespace tflite
59 |
60 | #endif // TENSORFLOW_LITE_TESTING_JOIN_H_
61 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/message.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_MESSAGE_H_
16 | #define TENSORFLOW_LITE_TESTING_MESSAGE_H_
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | namespace tflite {
23 | namespace testing {
24 |
25 | // A Message is a textual protobuf-like structure that looks like:
26 | // tag {
27 | // f : "values"
28 | // child {
29 | // a : 1
30 | // }
31 | // }
32 | // This class provides the framework for processing message but does not
33 | // associate any particular behavior to fields and submessage. In order
34 | // to properly parse a stream this class must be derived.
35 | class Message {
36 | public:
37 | // Reads a stream, tokenizes it and create a new message under the given
38 | // top-level message. Returns true if the parsing succeeded.
39 | static bool Read(std::istream* input, Message* message);
40 |
41 | Message() {}
42 | virtual ~Message() {}
43 |
44 | // Called when a new field is found. For example, when:
45 | // f : "values"
46 | // is found, it triggers:
47 | // SetField("f", "values");
48 | virtual void SetField(const std::string& name, const std::string& value) {}
49 |
50 | // Called when a submessage is started. For example, when:
51 | // child {
52 | // is found, it triggers
53 | // AddChild("child");
54 | // If nullptr is returned, the contents of the submessage will be ignored.
55 | // Otherwise, the returned Message will be used to handle new fields and new
56 | // submessages. The caller should not take ownership of the returned pointer.
57 | virtual Message* AddChild(const std::string& name) { return nullptr; }
58 |
59 | // Called when a submessage is completed, that is, whenever a '}' is found.
60 | virtual void Finish() {}
61 |
62 | protected:
63 | // Takes ownership of the given pointer. Subclasses can use this method if
64 | // they don't want to implement their own ownership semantics.
65 | Message* Store(Message* n) {
66 | children_.emplace_back(n);
67 | return n;
68 | }
69 |
70 | // Returns a list of all owned submessages.
71 | const std::vector>& Children() const {
72 | return children_;
73 | }
74 |
75 | private:
76 | std::vector> children_;
77 | };
78 |
79 | } // namespace testing
80 | } // namespace tflite
81 |
82 | #endif // TENSORFLOW_LITE_TESTING_MESSAGE_H_
83 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/parse_testdata.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_PARSE_TESTDATA_H_
16 | #define TENSORFLOW_LITE_TESTING_PARSE_TESTDATA_H_
17 |
18 | #include
19 | #include "tensorflow/lite/interpreter.h"
20 | #include "tensorflow/lite/testing/test_runner.h"
21 |
22 | namespace tflite {
23 | namespace testing {
24 |
25 | // Shape and data for a float tensor
26 | struct FloatTensor {
27 | std::vector shape;
28 | std::vector flat_data;
29 | };
30 |
31 | // A prescribed input, output example
32 | struct Example {
33 | std::vector inputs;
34 | std::vector outputs;
35 | };
36 |
37 | // Parses an example input and output file (used for unit tests)
38 | TfLiteStatus ParseExamples(const char* filename,
39 | std::vector* examples);
40 |
41 | // Inputs Tensors into a TensorFlow lite interpreter. Note, this will run
42 | // interpreter.AllocateTensors();
43 | TfLiteStatus FeedExample(tflite::Interpreter* interpreter, const Example&);
44 |
45 | // Check outputs against (already) evaluated result.
46 | TfLiteStatus CheckOutputs(tflite::Interpreter* interpreter, const Example&);
47 |
48 | // Parses a test description and feeds the given test runner with data.
49 | // The input format is similar to an ASCII proto:
50 | // // Loads model 'add.bin' from the TestRunner's model directory.
51 | // load_model: "add.bin"
52 | // // Changes the shape of inputs, provided in the same order they appear
53 | // // in the model.
54 | // reshape {
55 | // input: "1,224,224,3"
56 | // input: "1,3,4,1"
57 | // }
58 | // // Fills the given persistent tensors with zeros.
59 | // init_state: 0,1,2,3
60 | // // Invokes the interpreter with the given input and checks that it
61 | // // produces the expected output. Inputs and outputs should be specified in
62 | // // the order they appear in the model.
63 | // invoke {
64 | // input: "1,2,3,4,56"
65 | // input: "0.1,0.2,0.3,4.3,56.4"
66 | // output: "12,3,4,545,3"
67 | // output: "0.01,0.02"
68 | // }
69 | bool ParseAndRunTests(std::istream* input, TestRunner* test_runner,
70 | int max_invocations = -1);
71 |
72 | } // namespace testing
73 | } // namespace tflite
74 |
75 | #endif // TENSORFLOW_LITE_TESTING_PARSE_TESTDATA_H_
76 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/tflite_diff_util.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_TFLITE_DIFF_UTIL_H_
16 | #define TENSORFLOW_LITE_TESTING_TFLITE_DIFF_UTIL_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/lite/string.h"
21 |
22 | namespace tflite {
23 | namespace testing {
24 |
25 | // Configurations to run Tflite diff test.
26 | struct DiffOptions {
27 | // Path of tensorflow model.
28 | string tensorflow_model;
29 | // Path of tensorflow lite model.
30 | string tflite_model;
31 | // Names of input tensors.
32 | // Example: input_1,input_2
33 | std::vector input_layer;
34 | // Data types of input tensors.
35 | // Example: float,int
36 | std::vector input_layer_type;
37 | // Shapes of input tensors, separated by comma.
38 | // Example: 1,3,4,1
39 | std::vector input_layer_shape;
40 | // Names of output tensors.
41 | // Example output_1,output_2
42 | std::vector output_layer;
43 | // Number of full runs (from building interpreter to checking outputs) in
44 | // each of the passes. The first pass has a single inference, while the
45 | // second pass does multiple inferences back to back.
46 | int num_runs_per_pass;
47 | // Path to the delegate library to be loaded in order to execute ops. Must be
48 | // `{"", FLEX}`.
49 | string delegate;
50 | };
51 |
52 | // Run a single TensorFLow Lite diff test with a given options.
53 | bool RunDiffTest(const DiffOptions& options, int num_invocations);
54 |
55 | } // namespace testing
56 | } // namespace tflite
57 |
58 | #endif // TENSORFLOW_LITE_TESTING_TFLITE_DIFF_UTIL_H_
59 |
--------------------------------------------------------------------------------
/Pods/TensorFlowLiteGpuExperimental/Frameworks/tensorflow_lite_gpu.framework/Headers/tensorflow/lite/testing/tflite_driver.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================*/
15 | #ifndef TENSORFLOW_LITE_TESTING_TFLITE_DRIVER_H_
16 | #define TENSORFLOW_LITE_TESTING_TFLITE_DRIVER_H_
17 |
18 | #include