├── README.md
├── camera
├── squarePNG.png
├── camera_example.xcodeproj
│ ├── project.xcworkspace
│ │ └── contents.xcworkspacedata
│ └── project.pbxproj
├── CameraExampleAppDelegate.h
├── main.mm
├── ios_image_load.h
├── Info.plist
├── CameraExampleAppDelegate.m
├── data
│ ├── food101_labels.txt
│ └── LICENSE
├── CameraExampleViewController.h
├── tensorflow_utils.h
├── ios_image_load.mm
├── en.lproj
│ └── MainStoryboard_iPhone.storyboard
├── tensorflow_utils.mm
└── CameraExampleViewController.mm
├── model_export
├── sushi.png
├── mobile.jpg
├── requirements.txt
└── labels.txt
├── Creating a Deep Learning iOS App with Keras and Tensorflow_files
├── Creating a Deep Learning iOS App with Keras and Tensorflow_1_0.jpeg
├── Creating a Deep Learning iOS App with Keras and Tensorflow_1_0.jpg
├── Creating a Deep Learning iOS App with Keras and Tensorflow_23_1.png
├── Creating a Deep Learning iOS App with Keras and Tensorflow_24_1.png
├── Creating a Deep Learning iOS App with Keras and Tensorflow_25_1.png
└── Creating a Deep Learning iOS App with Keras and Tensorflow_26_1.png
├── LICENSE
├── .gitignore
└── Creating a Deep Learning iOS App with Keras and Tensorflow.md
/README.md:
--------------------------------------------------------------------------------
1 | Creating a Deep Learning iOS App with Keras and Tensorflow.md
--------------------------------------------------------------------------------
/camera/squarePNG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/camera/squarePNG.png
--------------------------------------------------------------------------------
/model_export/sushi.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/model_export/sushi.png
--------------------------------------------------------------------------------
/model_export/mobile.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/model_export/mobile.jpg
--------------------------------------------------------------------------------
/camera/camera_example.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_1_0.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_1_0.jpeg
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_1_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_1_0.jpg
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_23_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_23_1.png
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_24_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_24_1.png
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_25_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_25_1.png
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_26_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stratospark/food-101-mobile/HEAD/Creating a Deep Learning iOS App with Keras and Tensorflow_files/Creating a Deep Learning iOS App with Keras and Tensorflow_26_1.png
--------------------------------------------------------------------------------
/camera/CameraExampleAppDelegate.h:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #import
16 |
17 | @interface CameraExampleAppDelegate : UIResponder
18 |
19 | @property(strong, nonatomic) UIWindow *window;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/camera/main.mm:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #import
16 |
17 | #import "CameraExampleAppDelegate.h"
18 |
19 | int main(int argc, char *argv[]) {
20 | int retVal = 0;
21 |
22 | @autoreleasepool {
23 | retVal = UIApplicationMain(
24 | argc, argv, nil, NSStringFromClass([CameraExampleAppDelegate class]));
25 | }
26 | return retVal;
27 | }
28 |
--------------------------------------------------------------------------------
/camera/ios_image_load.h:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #ifndef TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_IMAGE_LOAD_H_
16 | #define TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_IMAGE_LOAD_H_
17 |
18 | #include
19 |
20 | #include "tensorflow/core/framework/types.h"
21 |
22 | std::vector LoadImageFromFile(const char* file_name,
23 | int* out_width,
24 | int* out_height,
25 | int* out_channels);
26 |
27 | #endif // TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_IMAGE_LOAD_H_
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Patrick Rodriguez
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/model_export/requirements.txt:
--------------------------------------------------------------------------------
1 | backports-abc==0.5
2 | backports.shutil-get-terminal-size==1.0.0
3 | backports.ssl-match-hostname==3.4.0.2
4 | bleach==1.5.0
5 | configparser==3.5.0
6 | cycler==0.10.0
7 | decorator==4.0.11
8 | entrypoints==0.2.2
9 | enum34==1.1.6
10 | funcsigs==1.0.2
11 | functools32==3.2.3.post2
12 | h5py==2.6.0
13 | html5lib==0.999
14 | ipykernel==4.5.2
15 | ipython==5.3.0
16 | ipython-genutils==0.1.0
17 | ipywidgets==5.2.2
18 | Jinja2==2.9.5
19 | jsonschema==2.5.1
20 | jupyter==1.0.0
21 | jupyter-client==5.0.0
22 | jupyter-console==5.1.0
23 | jupyter-core==4.3.0
24 | Keras==1.2.2
25 | MarkupSafe==0.23
26 | matplotlib==2.0.0
27 | mistune==0.7.3
28 | mock==2.0.0
29 | nbconvert==5.1.1
30 | nbformat==4.3.0
31 | notebook==4.3.1
32 | numpy==1.12.0
33 | pandocfilters==1.4.1
34 | pathlib2==2.2.0
35 | pbr==2.0.0
36 | pexpect==4.2.1
37 | pickleshare==0.7.4
38 | prompt-toolkit==1.0.9
39 | protobuf==3.2.0
40 | ptyprocess==0.5.1
41 | pycairo==1.10.0
42 | Pygments==2.2.0
43 | pyparsing==2.1.4
44 | python-dateutil==2.6.0
45 | pytz==2016.10
46 | PyYAML==3.12
47 | pyzmq==16.0.2
48 | qtconsole==4.2.1
49 | scandir==1.5
50 | scipy==0.18.1
51 | simplegeneric==0.8.1
52 | singledispatch==3.4.0.3
53 | six==1.10.0
54 | subprocess32==3.2.7
55 | tensorflow-gpu==1.0.1
56 | terminado==0.6
57 | testpath==0.3
58 | Theano==0.8.2
59 | tornado==4.4.2
60 | traitlets==4.3.2
61 | wcwidth==0.1.7
62 | widgetsnbextension==1.2.6
63 |
--------------------------------------------------------------------------------
/camera/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | en
7 | CFBundleDisplayName
8 | Food-101
9 | CFBundleExecutable
10 | ${EXECUTABLE_NAME}
11 | CFBundleIdentifier
12 | $(PRODUCT_BUNDLE_IDENTIFIER)
13 | CFBundleInfoDictionaryVersion
14 | 6.0
15 | CFBundleName
16 | ${PRODUCT_NAME}
17 | CFBundlePackageType
18 | APPL
19 | CFBundleShortVersionString
20 | 1.0
21 | CFBundleSignature
22 | ????
23 | CFBundleVersion
24 | 1.0
25 | LSRequiresIPhoneOS
26 |
27 | NSCameraUsageDescription
28 | Capture images to detect object
29 | UIMainStoryboardFile
30 | MainStoryboard_iPhone
31 | UIRequiresFullScreen
32 |
33 | UIStatusBarHidden
34 |
35 | UISupportedInterfaceOrientations
36 |
37 | UIInterfaceOrientationPortrait
38 |
39 | UISupportedInterfaceOrientations~ipad
40 |
41 | UIInterfaceOrientationPortrait
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/camera/CameraExampleAppDelegate.m:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #import "CameraExampleAppDelegate.h"
16 |
17 | @implementation CameraExampleAppDelegate
18 |
19 | @synthesize window = _window;
20 |
21 | - (BOOL)application:(UIApplication *)application
22 | didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
23 | [self.window makeKeyAndVisible];
24 | return YES;
25 | }
26 |
27 | - (void)applicationWillResignActive:(UIApplication *)application {
28 | [[UIApplication sharedApplication] setIdleTimerDisabled:NO];
29 | }
30 |
31 | - (void)applicationDidEnterBackground:(UIApplication *)application {
32 | }
33 |
34 | - (void)applicationWillEnterForeground:(UIApplication *)application {
35 | }
36 |
37 | - (void)applicationDidBecomeActive:(UIApplication *)application {
38 | [[UIApplication sharedApplication] setIdleTimerDisabled:YES];
39 | }
40 |
41 | - (void)applicationWillTerminate:(UIApplication *)application {
42 | }
43 |
44 | @end
45 |
--------------------------------------------------------------------------------
/camera/data/food101_labels.txt:
--------------------------------------------------------------------------------
1 | apple_pie
2 | baby_back_ribs
3 | baklava
4 | beef_carpaccio
5 | beef_tartare
6 | beet_salad
7 | beignets
8 | bibimbap
9 | bread_pudding
10 | breakfast_burrito
11 | bruschetta
12 | caesar_salad
13 | cannoli
14 | caprese_salad
15 | carrot_cake
16 | ceviche
17 | cheese_plate
18 | cheesecake
19 | chicken_curry
20 | chicken_quesadilla
21 | chicken_wings
22 | chocolate_cake
23 | chocolate_mousse
24 | churros
25 | clam_chowder
26 | club_sandwich
27 | crab_cakes
28 | creme_brulee
29 | croque_madame
30 | cup_cakes
31 | deviled_eggs
32 | donuts
33 | dumplings
34 | edamame
35 | eggs_benedict
36 | escargots
37 | falafel
38 | filet_mignon
39 | fish_and_chips
40 | foie_gras
41 | french_fries
42 | french_onion_soup
43 | french_toast
44 | fried_calamari
45 | fried_rice
46 | frozen_yogurt
47 | garlic_bread
48 | gnocchi
49 | greek_salad
50 | grilled_cheese_sandwich
51 | grilled_salmon
52 | guacamole
53 | gyoza
54 | hamburger
55 | hot_and_sour_soup
56 | hot_dog
57 | huevos_rancheros
58 | hummus
59 | ice_cream
60 | lasagna
61 | lobster_bisque
62 | lobster_roll_sandwich
63 | macaroni_and_cheese
64 | macarons
65 | miso_soup
66 | mussels
67 | nachos
68 | omelette
69 | onion_rings
70 | oysters
71 | pad_thai
72 | paella
73 | pancakes
74 | panna_cotta
75 | peking_duck
76 | pho
77 | pizza
78 | pork_chop
79 | poutine
80 | prime_rib
81 | pulled_pork_sandwich
82 | ramen
83 | ravioli
84 | red_velvet_cake
85 | risotto
86 | samosa
87 | sashimi
88 | scallops
89 | seaweed_salad
90 | shrimp_and_grits
91 | spaghetti_bolognese
92 | spaghetti_carbonara
93 | spring_rolls
94 | steak
95 | strawberry_shortcake
96 | sushi
97 | tacos
98 | takoyaki
99 | tiramisu
100 | tuna_tartare
101 | waffles
102 |
--------------------------------------------------------------------------------
/model_export/labels.txt:
--------------------------------------------------------------------------------
1 | apple_pie
2 | baby_back_ribs
3 | baklava
4 | beef_carpaccio
5 | beef_tartare
6 | beet_salad
7 | beignets
8 | bibimbap
9 | bread_pudding
10 | breakfast_burrito
11 | bruschetta
12 | caesar_salad
13 | cannoli
14 | caprese_salad
15 | carrot_cake
16 | ceviche
17 | cheese_plate
18 | cheesecake
19 | chicken_curry
20 | chicken_quesadilla
21 | chicken_wings
22 | chocolate_cake
23 | chocolate_mousse
24 | churros
25 | clam_chowder
26 | club_sandwich
27 | crab_cakes
28 | creme_brulee
29 | croque_madame
30 | cup_cakes
31 | deviled_eggs
32 | donuts
33 | dumplings
34 | edamame
35 | eggs_benedict
36 | escargots
37 | falafel
38 | filet_mignon
39 | fish_and_chips
40 | foie_gras
41 | french_fries
42 | french_onion_soup
43 | french_toast
44 | fried_calamari
45 | fried_rice
46 | frozen_yogurt
47 | garlic_bread
48 | gnocchi
49 | greek_salad
50 | grilled_cheese_sandwich
51 | grilled_salmon
52 | guacamole
53 | gyoza
54 | hamburger
55 | hot_and_sour_soup
56 | hot_dog
57 | huevos_rancheros
58 | hummus
59 | ice_cream
60 | lasagna
61 | lobster_bisque
62 | lobster_roll_sandwich
63 | macaroni_and_cheese
64 | macarons
65 | miso_soup
66 | mussels
67 | nachos
68 | omelette
69 | onion_rings
70 | oysters
71 | pad_thai
72 | paella
73 | pancakes
74 | panna_cotta
75 | peking_duck
76 | pho
77 | pizza
78 | pork_chop
79 | poutine
80 | prime_rib
81 | pulled_pork_sandwich
82 | ramen
83 | ravioli
84 | red_velvet_cake
85 | risotto
86 | samosa
87 | sashimi
88 | scallops
89 | seaweed_salad
90 | shrimp_and_grits
91 | spaghetti_bolognese
92 | spaghetti_carbonara
93 | spring_rolls
94 | steak
95 | strawberry_shortcake
96 | sushi
97 | tacos
98 | takoyaki
99 | tiramisu
100 | tuna_tartare
101 | waffles
102 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints
2 | *.pb
3 | *.hdf5
4 |
5 |
6 | # Created by https://www.gitignore.io/api/objective-c
7 |
8 | ### Objective-C ###
9 | # Xcode
10 | #
11 | # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
12 |
13 | ## Build generated
14 | build/
15 | DerivedData/
16 |
17 | ## Various settings
18 | *.pbxuser
19 | !default.pbxuser
20 | *.mode1v3
21 | !default.mode1v3
22 | *.mode2v3
23 | !default.mode2v3
24 | *.perspectivev3
25 | !default.perspectivev3
26 | xcuserdata/
27 |
28 | ## Other
29 | *.moved-aside
30 | *.xccheckout
31 | *.xcscmblueprint
32 |
33 | ## Obj-C/Swift specific
34 | *.hmap
35 | *.ipa
36 | *.dSYM.zip
37 | *.dSYM
38 |
39 | # CocoaPods
40 | #
41 | # We recommend against adding the Pods directory to your .gitignore. However
42 | # you should judge for yourself, the pros and cons are mentioned at:
43 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
44 | #
45 | # Pods/
46 |
47 | # Carthage
48 | #
49 | # Add this line if you want to avoid checking in source code from Carthage dependencies.
50 | # Carthage/Checkouts
51 |
52 | Carthage/Build
53 |
54 | # fastlane
55 | #
56 | # It is recommended to not store the screenshots in the git repo. Instead, use fastlane to re-generate the
57 | # screenshots whenever they are needed.
58 | # For more information about the recommended setup visit:
59 | # https://docs.fastlane.tools/best-practices/source-control/#source-control
60 |
61 | fastlane/report.xml
62 | fastlane/Preview.html
63 | fastlane/screenshots
64 | fastlane/test_output
65 |
66 | # Code Injection
67 | #
68 | # After new code Injection tools there's a generated folder /iOSInjectionProject
69 | # https://github.com/johnno1962/injectionforxcode
70 |
71 | iOSInjectionProject/
72 |
73 | ### Objective-C Patch ###
74 |
75 | # End of https://www.gitignore.io/api/objective-c
76 |
--------------------------------------------------------------------------------
/camera/CameraExampleViewController.h:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #import
16 | #import
17 |
18 | #include
19 | #include "tensorflow/core/public/session.h"
20 | #include "tensorflow/core/util/memmapped_file_system.h"
21 |
22 | @interface CameraExampleViewController
23 | : UIViewController {
25 | IBOutlet UIView *previewView;
26 | IBOutlet UISegmentedControl *camerasControl;
27 | AVCaptureVideoPreviewLayer *previewLayer;
28 | AVCaptureVideoDataOutput *videoDataOutput;
29 | dispatch_queue_t videoDataOutputQueue;
30 | AVCaptureStillImageOutput *stillImageOutput;
31 | UIView *flashView;
32 | UIImage *square;
33 | BOOL isUsingFrontFacingCamera;
34 | AVSpeechSynthesizer *synth;
35 | NSMutableDictionary *oldPredictionValues;
36 | NSMutableArray *labelLayers;
37 | AVCaptureSession *session;
38 | std::unique_ptr tf_session;
39 | std::unique_ptr tf_memmapped_env;
40 | std::vector labels;
41 | }
42 | @property(strong, nonatomic) CATextLayer *predictionTextLayer;
43 |
44 | - (IBAction)takePicture:(id)sender;
45 | - (IBAction)switchCameras:(id)sender;
46 |
47 | @end
48 |
--------------------------------------------------------------------------------
/camera/tensorflow_utils.h:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #ifndef TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_TENSORFLOW_UTILS_H_
16 | #define TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_TENSORFLOW_UTILS_H_
17 |
18 | #include
19 | #include
20 |
21 | #include "tensorflow/core/public/session.h"
22 | #include "tensorflow/core/util/memmapped_file_system.h"
23 | #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
24 |
25 | // Reads a serialized GraphDef protobuf file from the bundle, typically
26 | // created with the freeze_graph script. Populates the session argument with a
27 | // Session object that has the model loaded.
28 | tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
29 | std::unique_ptr* session);
30 |
31 | // Loads a model from a file that has been created using the
32 | // convert_graphdef_memmapped_format tool. This bundles together a GraphDef
33 | // proto together with a file that can be memory-mapped, containing the weight
34 | // parameters for the model. This is useful because it reduces the overall
35 | // memory pressure, since the read-only parameter regions can be easily paged
36 | // out and don't count toward memory limits on iOS.
37 | tensorflow::Status LoadMemoryMappedModel(
38 | NSString* file_name, NSString* file_type,
39 | std::unique_ptr* session,
40 | std::unique_ptr* memmapped_env);
41 |
42 | // Takes a text file with a single label on each line, and returns a list.
43 | tensorflow::Status LoadLabels(NSString* file_name, NSString* file_type,
44 | std::vector* label_strings);
45 |
46 | // Sorts the results from a model execution, and returns the highest scoring.
47 | void GetTopN(const Eigen::TensorMap,
48 | Eigen::Aligned>& prediction,
49 | const int num_results, const float threshold,
50 | std::vector >* top_results);
51 |
52 | #endif // TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_TENSORFLOW_UTILS_H_
53 |
--------------------------------------------------------------------------------
/camera/ios_image_load.mm:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #include "ios_image_load.h"
16 |
17 | #include
18 | #include
19 | #include
20 | #include
21 |
22 | #import
23 | #import
24 |
25 | using tensorflow::uint8;
26 |
27 | std::vector LoadImageFromFile(const char* file_name,
28 | int* out_width, int* out_height,
29 | int* out_channels) {
30 | FILE* file_handle = fopen(file_name, "rb");
31 | fseek(file_handle, 0, SEEK_END);
32 | const size_t bytes_in_file = ftell(file_handle);
33 | fseek(file_handle, 0, SEEK_SET);
34 | std::vector file_data(bytes_in_file);
35 | fread(file_data.data(), 1, bytes_in_file, file_handle);
36 | fclose(file_handle);
37 | CFDataRef file_data_ref = CFDataCreateWithBytesNoCopy(NULL, file_data.data(),
38 | bytes_in_file,
39 | kCFAllocatorNull);
40 | CGDataProviderRef image_provider =
41 | CGDataProviderCreateWithCFData(file_data_ref);
42 |
43 | const char* suffix = strrchr(file_name, '.');
44 | if (!suffix || suffix == file_name) {
45 | suffix = "";
46 | }
47 | CGImageRef image;
48 | if (strcasecmp(suffix, ".png") == 0) {
49 | image = CGImageCreateWithPNGDataProvider(image_provider, NULL, true,
50 | kCGRenderingIntentDefault);
51 | } else if ((strcasecmp(suffix, ".jpg") == 0) ||
52 | (strcasecmp(suffix, ".jpeg") == 0)) {
53 | image = CGImageCreateWithJPEGDataProvider(image_provider, NULL, true,
54 | kCGRenderingIntentDefault);
55 | } else {
56 | CFRelease(image_provider);
57 | CFRelease(file_data_ref);
58 | fprintf(stderr, "Unknown suffix for file '%s'\n", file_name);
59 | *out_width = 0;
60 | *out_height = 0;
61 | *out_channels = 0;
62 | return std::vector();
63 | }
64 |
65 | const int width = (int)CGImageGetWidth(image);
66 | const int height = (int)CGImageGetHeight(image);
67 | const int channels = 4;
68 | CGColorSpaceRef color_space = CGColorSpaceCreateDeviceRGB();
69 | const int bytes_per_row = (width * channels);
70 | const int bytes_in_image = (bytes_per_row * height);
71 | std::vector result(bytes_in_image);
72 | const int bits_per_component = 8;
73 | CGContextRef context = CGBitmapContextCreate(result.data(), width, height,
74 | bits_per_component, bytes_per_row, color_space,
75 | kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
76 | CGColorSpaceRelease(color_space);
77 | CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
78 | CGContextRelease(context);
79 | CFRelease(image);
80 | CFRelease(image_provider);
81 | CFRelease(file_data_ref);
82 |
83 | *out_width = width;
84 | *out_height = height;
85 | *out_channels = channels;
86 | return result;
87 | }
88 |
--------------------------------------------------------------------------------
/camera/en.lproj/MainStoryboard_iPhone.storyboard:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/camera/tensorflow_utils.mm:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #import
16 |
17 | #include "tensorflow_utils.h"
18 |
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | #include "google/protobuf/io/coded_stream.h"
27 | #include "google/protobuf/io/zero_copy_stream_impl.h"
28 | #include "google/protobuf/io/zero_copy_stream_impl_lite.h"
29 | #include "google/protobuf/message_lite.h"
30 | #include "tensorflow/core/framework/tensor.h"
31 | #include "tensorflow/core/framework/types.pb.h"
32 | #include "tensorflow/core/platform/env.h"
33 | #include "tensorflow/core/platform/logging.h"
34 | #include "tensorflow/core/platform/mutex.h"
35 | #include "tensorflow/core/platform/types.h"
36 | #include "tensorflow/core/public/session.h"
37 |
38 | namespace {
39 |
40 | // Helper class used to load protobufs efficiently.
41 | class IfstreamInputStream : public ::google::protobuf::io::CopyingInputStream {
42 | public:
43 | explicit IfstreamInputStream(const std::string& file_name)
44 | : ifs_(file_name.c_str(), std::ios::in | std::ios::binary) {}
45 | ~IfstreamInputStream() { ifs_.close(); }
46 |
47 | int Read(void* buffer, int size) {
48 | if (!ifs_) {
49 | return -1;
50 | }
51 | ifs_.read(static_cast(buffer), size);
52 | return ifs_.gcount();
53 | }
54 |
55 | private:
56 | std::ifstream ifs_;
57 | };
58 | } // namespace
59 |
60 | // Returns the top N confidence values over threshold in the provided vector,
61 | // sorted by confidence in descending order.
62 | void GetTopN(const Eigen::TensorMap,
63 | Eigen::Aligned>& prediction,
64 | const int num_results, const float threshold,
65 | std::vector >* top_results) {
66 | // Will contain top N results in ascending order.
67 | std::priority_queue,
68 | std::vector >,
69 | std::greater > >
70 | top_result_pq;
71 |
72 | const int count = prediction.size();
73 | for (int i = 0; i < count; ++i) {
74 | const float value = prediction(i);
75 |
76 | // Only add it if it beats the threshold and has a chance at being in
77 | // the top N.
78 | if (value < threshold) {
79 | continue;
80 | }
81 |
82 | top_result_pq.push(std::pair(value, i));
83 |
84 | // If at capacity, kick the smallest value out.
85 | if (top_result_pq.size() > num_results) {
86 | top_result_pq.pop();
87 | }
88 | }
89 |
90 | // Copy to output vector and reverse into descending order.
91 | while (!top_result_pq.empty()) {
92 | top_results->push_back(top_result_pq.top());
93 | top_result_pq.pop();
94 | }
95 | std::reverse(top_results->begin(), top_results->end());
96 | }
97 |
98 | bool PortableReadFileToProto(const std::string& file_name,
99 | ::google::protobuf::MessageLite* proto) {
100 | ::google::protobuf::io::CopyingInputStreamAdaptor stream(
101 | new IfstreamInputStream(file_name));
102 | stream.SetOwnsCopyingStream(true);
103 | ::google::protobuf::io::CodedInputStream coded_stream(&stream);
104 | // Total bytes hard limit / warning limit are set to 1GB and 512MB
105 | // respectively.
106 | coded_stream.SetTotalBytesLimit(1024LL << 20, 512LL << 20);
107 | return proto->ParseFromCodedStream(&coded_stream);
108 | }
109 |
110 | NSString* FilePathForResourceName(NSString* name, NSString* extension) {
111 | NSString* file_path =
112 | [[NSBundle mainBundle] pathForResource:name ofType:extension];
113 | if (file_path == NULL) {
114 | LOG(FATAL) << "Couldn't find '" << [name UTF8String] << "."
115 | << [extension UTF8String] << "' in bundle.";
116 | return nullptr;
117 | }
118 | return file_path;
119 | }
120 |
121 | tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
122 | std::unique_ptr* session) {
123 | tensorflow::SessionOptions options;
124 |
125 | tensorflow::Session* session_pointer = nullptr;
126 | tensorflow::Status session_status =
127 | tensorflow::NewSession(options, &session_pointer);
128 | if (!session_status.ok()) {
129 | LOG(ERROR) << "Could not create TensorFlow Session: " << session_status;
130 | return session_status;
131 | }
132 | session->reset(session_pointer);
133 |
134 | tensorflow::GraphDef tensorflow_graph;
135 |
136 | NSString* model_path = FilePathForResourceName(file_name, file_type);
137 | if (!model_path) {
138 | LOG(ERROR) << "Failed to find model proto at" << [file_name UTF8String]
139 | << [file_type UTF8String];
140 | return tensorflow::errors::NotFound([file_name UTF8String],
141 | [file_type UTF8String]);
142 | }
143 | const bool read_proto_succeeded =
144 | PortableReadFileToProto([model_path UTF8String], &tensorflow_graph);
145 | if (!read_proto_succeeded) {
146 | LOG(ERROR) << "Failed to load model proto from" << [model_path UTF8String];
147 | return tensorflow::errors::NotFound([model_path UTF8String]);
148 | }
149 |
150 | tensorflow::Status create_status = (*session)->Create(tensorflow_graph);
151 | if (!create_status.ok()) {
152 | LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status;
153 | return create_status;
154 | }
155 |
156 | return tensorflow::Status::OK();
157 | }
158 |
159 | tensorflow::Status LoadMemoryMappedModel(
160 | NSString* file_name, NSString* file_type,
161 | std::unique_ptr* session,
162 | std::unique_ptr* memmapped_env) {
163 | NSString* network_path = FilePathForResourceName(file_name, file_type);
164 | memmapped_env->reset(
165 | new tensorflow::MemmappedEnv(tensorflow::Env::Default()));
166 | tensorflow::Status mmap_status =
167 | (memmapped_env->get())->InitializeFromFile([network_path UTF8String]);
168 | if (!mmap_status.ok()) {
169 | LOG(ERROR) << "MMap failed with " << mmap_status.error_message();
170 | return mmap_status;
171 | }
172 |
173 | tensorflow::GraphDef tensorflow_graph;
174 | tensorflow::Status load_graph_status = ReadBinaryProto(
175 | memmapped_env->get(),
176 | tensorflow::MemmappedFileSystem::kMemmappedPackageDefaultGraphDef,
177 | &tensorflow_graph);
178 | if (!load_graph_status.ok()) {
179 | LOG(ERROR) << "MMap load graph failed with "
180 | << load_graph_status.error_message();
181 | return load_graph_status;
182 | }
183 |
184 | tensorflow::SessionOptions options;
185 | // Disable optimizations on this graph so that constant folding doesn't
186 | // increase the memory footprint by creating new constant copies of the weight
187 | // parameters.
188 | options.config.mutable_graph_options()
189 | ->mutable_optimizer_options()
190 | ->set_opt_level(::tensorflow::OptimizerOptions::L0);
191 | options.env = memmapped_env->get();
192 |
193 | tensorflow::Session* session_pointer = nullptr;
194 | tensorflow::Status session_status =
195 | tensorflow::NewSession(options, &session_pointer);
196 | if (!session_status.ok()) {
197 | LOG(ERROR) << "Could not create TensorFlow Session: " << session_status;
198 | return session_status;
199 | }
200 |
201 | tensorflow::Status create_status = session_pointer->Create(tensorflow_graph);
202 | if (!create_status.ok()) {
203 | LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status;
204 | return create_status;
205 | }
206 |
207 | session->reset(session_pointer);
208 |
209 | return tensorflow::Status::OK();
210 | }
211 |
212 | tensorflow::Status LoadLabels(NSString* file_name, NSString* file_type,
213 | std::vector* label_strings) {
214 | // Read the label list
215 | NSString* labels_path = FilePathForResourceName(file_name, file_type);
216 | if (!labels_path) {
217 | LOG(ERROR) << "Failed to find model proto at" << [file_name UTF8String]
218 | << [file_type UTF8String];
219 | return tensorflow::errors::NotFound([file_name UTF8String],
220 | [file_type UTF8String]);
221 | }
222 | std::ifstream t;
223 | t.open([labels_path UTF8String]);
224 | std::string line;
225 | while (t) {
226 | std::getline(t, line);
227 | label_strings->push_back(line);
228 | }
229 | t.close();
230 | return tensorflow::Status::OK();
231 | }
232 |
--------------------------------------------------------------------------------
/camera/data/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2015 The TensorFlow Authors. All rights reserved.
2 |
3 | Apache License
4 | Version 2.0, January 2004
5 | http://www.apache.org/licenses/
6 |
7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8 |
9 | 1. Definitions.
10 |
11 | "License" shall mean the terms and conditions for use, reproduction,
12 | and distribution as defined by Sections 1 through 9 of this document.
13 |
14 | "Licensor" shall mean the copyright owner or entity authorized by
15 | the copyright owner that is granting the License.
16 |
17 | "Legal Entity" shall mean the union of the acting entity and all
18 | other entities that control, are controlled by, or are under common
19 | control with that entity. For the purposes of this definition,
20 | "control" means (i) the power, direct or indirect, to cause the
21 | direction or management of such entity, whether by contract or
22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
23 | outstanding shares, or (iii) beneficial ownership of such entity.
24 |
25 | "You" (or "Your") shall mean an individual or Legal Entity
26 | exercising permissions granted by this License.
27 |
28 | "Source" form shall mean the preferred form for making modifications,
29 | including but not limited to software source code, documentation
30 | source, and configuration files.
31 |
32 | "Object" form shall mean any form resulting from mechanical
33 | transformation or translation of a Source form, including but
34 | not limited to compiled object code, generated documentation,
35 | and conversions to other media types.
36 |
37 | "Work" shall mean the work of authorship, whether in Source or
38 | Object form, made available under the License, as indicated by a
39 | copyright notice that is included in or attached to the work
40 | (an example is provided in the Appendix below).
41 |
42 | "Derivative Works" shall mean any work, whether in Source or Object
43 | form, that is based on (or derived from) the Work and for which the
44 | editorial revisions, annotations, elaborations, or other modifications
45 | represent, as a whole, an original work of authorship. For the purposes
46 | of this License, Derivative Works shall not include works that remain
47 | separable from, or merely link (or bind by name) to the interfaces of,
48 | the Work and Derivative Works thereof.
49 |
50 | "Contribution" shall mean any work of authorship, including
51 | the original version of the Work and any modifications or additions
52 | to that Work or Derivative Works thereof, that is intentionally
53 | submitted to Licensor for inclusion in the Work by the copyright owner
54 | or by an individual or Legal Entity authorized to submit on behalf of
55 | the copyright owner. For the purposes of this definition, "submitted"
56 | means any form of electronic, verbal, or written communication sent
57 | to the Licensor or its representatives, including but not limited to
58 | communication on electronic mailing lists, source code control systems,
59 | and issue tracking systems that are managed by, or on behalf of, the
60 | Licensor for the purpose of discussing and improving the Work, but
61 | excluding communication that is conspicuously marked or otherwise
62 | designated in writing by the copyright owner as "Not a Contribution."
63 |
64 | "Contributor" shall mean Licensor and any individual or Legal Entity
65 | on behalf of whom a Contribution has been received by Licensor and
66 | subsequently incorporated within the Work.
67 |
68 | 2. Grant of Copyright License. Subject to the terms and conditions of
69 | this License, each Contributor hereby grants to You a perpetual,
70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71 | copyright license to reproduce, prepare Derivative Works of,
72 | publicly display, publicly perform, sublicense, and distribute the
73 | Work and such Derivative Works in Source or Object form.
74 |
75 | 3. Grant of Patent License. Subject to the terms and conditions of
76 | this License, each Contributor hereby grants to You a perpetual,
77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78 | (except as stated in this section) patent license to make, have made,
79 | use, offer to sell, sell, import, and otherwise transfer the Work,
80 | where such license applies only to those patent claims licensable
81 | by such Contributor that are necessarily infringed by their
82 | Contribution(s) alone or by combination of their Contribution(s)
83 | with the Work to which such Contribution(s) was submitted. If You
84 | institute patent litigation against any entity (including a
85 | cross-claim or counterclaim in a lawsuit) alleging that the Work
86 | or a Contribution incorporated within the Work constitutes direct
87 | or contributory patent infringement, then any patent licenses
88 | granted to You under this License for that Work shall terminate
89 | as of the date such litigation is filed.
90 |
91 | 4. Redistribution. You may reproduce and distribute copies of the
92 | Work or Derivative Works thereof in any medium, with or without
93 | modifications, and in Source or Object form, provided that You
94 | meet the following conditions:
95 |
96 | (a) You must give any other recipients of the Work or
97 | Derivative Works a copy of this License; and
98 |
99 | (b) You must cause any modified files to carry prominent notices
100 | stating that You changed the files; and
101 |
102 | (c) You must retain, in the Source form of any Derivative Works
103 | that You distribute, all copyright, patent, trademark, and
104 | attribution notices from the Source form of the Work,
105 | excluding those notices that do not pertain to any part of
106 | the Derivative Works; and
107 |
108 | (d) If the Work includes a "NOTICE" text file as part of its
109 | distribution, then any Derivative Works that You distribute must
110 | include a readable copy of the attribution notices contained
111 | within such NOTICE file, excluding those notices that do not
112 | pertain to any part of the Derivative Works, in at least one
113 | of the following places: within a NOTICE text file distributed
114 | as part of the Derivative Works; within the Source form or
115 | documentation, if provided along with the Derivative Works; or,
116 | within a display generated by the Derivative Works, if and
117 | wherever such third-party notices normally appear. The contents
118 | of the NOTICE file are for informational purposes only and
119 | do not modify the License. You may add Your own attribution
120 | notices within Derivative Works that You distribute, alongside
121 | or as an addendum to the NOTICE text from the Work, provided
122 | that such additional attribution notices cannot be construed
123 | as modifying the License.
124 |
125 | You may add Your own copyright statement to Your modifications and
126 | may provide additional or different license terms and conditions
127 | for use, reproduction, or distribution of Your modifications, or
128 | for any such Derivative Works as a whole, provided Your use,
129 | reproduction, and distribution of the Work otherwise complies with
130 | the conditions stated in this License.
131 |
132 | 5. Submission of Contributions. Unless You explicitly state otherwise,
133 | any Contribution intentionally submitted for inclusion in the Work
134 | by You to the Licensor shall be under the terms and conditions of
135 | this License, without any additional terms or conditions.
136 | Notwithstanding the above, nothing herein shall supersede or modify
137 | the terms of any separate license agreement you may have executed
138 | with Licensor regarding such Contributions.
139 |
140 | 6. Trademarks. This License does not grant permission to use the trade
141 | names, trademarks, service marks, or product names of the Licensor,
142 | except as required for reasonable and customary use in describing the
143 | origin of the Work and reproducing the content of the NOTICE file.
144 |
145 | 7. Disclaimer of Warranty. Unless required by applicable law or
146 | agreed to in writing, Licensor provides the Work (and each
147 | Contributor provides its Contributions) on an "AS IS" BASIS,
148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149 | implied, including, without limitation, any warranties or conditions
150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151 | PARTICULAR PURPOSE. You are solely responsible for determining the
152 | appropriateness of using or redistributing the Work and assume any
153 | risks associated with Your exercise of permissions under this License.
154 |
155 | 8. Limitation of Liability. In no event and under no legal theory,
156 | whether in tort (including negligence), contract, or otherwise,
157 | unless required by applicable law (such as deliberate and grossly
158 | negligent acts) or agreed to in writing, shall any Contributor be
159 | liable to You for damages, including any direct, indirect, special,
160 | incidental, or consequential damages of any character arising as a
161 | result of this License or out of the use or inability to use the
162 | Work (including but not limited to damages for loss of goodwill,
163 | work stoppage, computer failure or malfunction, or any and all
164 | other commercial damages or losses), even if such Contributor
165 | has been advised of the possibility of such damages.
166 |
167 | 9. Accepting Warranty or Additional Liability. While redistributing
168 | the Work or Derivative Works thereof, You may choose to offer,
169 | and charge a fee for, acceptance of support, warranty, indemnity,
170 | or other liability obligations and/or rights consistent with this
171 | License. However, in accepting such obligations, You may act only
172 | on Your own behalf and on Your sole responsibility, not on behalf
173 | of any other Contributor, and only if You agree to indemnify,
174 | defend, and hold each Contributor harmless for any liability
175 | incurred by, or claims asserted against, such Contributor by reason
176 | of your accepting any such warranty or additional liability.
177 |
178 | END OF TERMS AND CONDITIONS
179 |
180 | APPENDIX: How to apply the Apache License to your work.
181 |
182 | To apply the Apache License to your work, attach the following
183 | boilerplate notice, with the fields enclosed by brackets "[]"
184 | replaced with your own identifying information. (Don't include
185 | the brackets!) The text should be enclosed in the appropriate
186 | comment syntax for the file format. We also recommend that a
187 | file or class name and description of purpose be included on the
188 | same "printed page" as the copyright notice for easier
189 | identification within third-party archives.
190 |
191 | Copyright 2015, The TensorFlow Authors.
192 |
193 | Licensed under the Apache License, Version 2.0 (the "License");
194 | you may not use this file except in compliance with the License.
195 | You may obtain a copy of the License at
196 |
197 | http://www.apache.org/licenses/LICENSE-2.0
198 |
199 | Unless required by applicable law or agreed to in writing, software
200 | distributed under the License is distributed on an "AS IS" BASIS,
201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202 | See the License for the specific language governing permissions and
203 | limitations under the License.
204 |
--------------------------------------------------------------------------------
/camera/camera_example.xcodeproj/project.pbxproj:
--------------------------------------------------------------------------------
1 | // !$*UTF8*$!
2 | {
3 | archiveVersion = 1;
4 | classes = {
5 | };
6 | objectVersion = 46;
7 | objects = {
8 |
9 | /* Begin PBXBuildFile section */
10 | 591D3EC51CFF7F130059011C /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 591D3EC41CFF7F120059011C /* AVFoundation.framework */; };
11 | 591D3ECB1CFF7F5F0059011C /* CoreMedia.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 591D3ECA1CFF7F5F0059011C /* CoreMedia.framework */; };
12 | 591D3ECD1CFF7F9F0059011C /* AssetsLibrary.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 591D3ECC1CFF7F9F0059011C /* AssetsLibrary.framework */; };
13 | 591D3ECF1CFF7FCE0059011C /* ImageIO.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 591D3ECE1CFF7FCE0059011C /* ImageIO.framework */; };
14 | 591D3ED21CFF85C30059011C /* ios_image_load.mm in Sources */ = {isa = PBXBuildFile; fileRef = 591D3ED11CFF85C30059011C /* ios_image_load.mm */; };
15 | 591D3ED51CFF85FD0059011C /* tensorflow_utils.mm in Sources */ = {isa = PBXBuildFile; fileRef = 591D3ED31CFF85FD0059011C /* tensorflow_utils.mm */; };
16 | 591D3EDF1CFFAD230059011C /* libprotobuf-lite.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 591D3EDD1CFFAD230059011C /* libprotobuf-lite.a */; };
17 | 591D3EE01CFFAD230059011C /* libprotobuf.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 591D3EDE1CFFAD230059011C /* libprotobuf.a */; };
18 | 592FF8B918ECBD7600C164F8 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 592FF8B818ECBD7600C164F8 /* Foundation.framework */; };
19 | 592FF8BB18ECBD7600C164F8 /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 592FF8BA18ECBD7600C164F8 /* CoreGraphics.framework */; };
20 | 592FF90218ECC66200C164F8 /* main.mm in Sources */ = {isa = PBXBuildFile; fileRef = 592FF90118ECC66200C164F8 /* main.mm */; };
21 | 592FF90D18EDD0DA00C164F8 /* MainStoryboard_iPhone.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 592FF90A18EDD0DA00C164F8 /* MainStoryboard_iPhone.storyboard */; };
22 | 592FF92518EE240200C164F8 /* CameraExampleAppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 592FF92218EE240200C164F8 /* CameraExampleAppDelegate.m */; };
23 | 592FF92618EE240200C164F8 /* CameraExampleViewController.mm in Sources */ = {isa = PBXBuildFile; fileRef = 592FF92418EE240200C164F8 /* CameraExampleViewController.mm */; };
24 | 5993C7721D5D4E980048CE6A /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 5993C7711D5D4E980048CE6A /* Accelerate.framework */; };
25 | CB123EFB1E72718800AED158 /* food101_labels.txt in Resources */ = {isa = PBXBuildFile; fileRef = CB123EF91E72718800AED158 /* food101_labels.txt */; };
26 | CBB0B4501E8263FC00C7F7F2 /* mem_graph.pb in Resources */ = {isa = PBXBuildFile; fileRef = CBB0B44F1E8263FC00C7F7F2 /* mem_graph.pb */; };
27 | /* End PBXBuildFile section */
28 |
29 | /* Begin PBXFileReference section */
30 | 591D3EC41CFF7F120059011C /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/System/Library/Frameworks/AVFoundation.framework; sourceTree = DEVELOPER_DIR; };
31 | 591D3EC61CFF7F370059011C /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/System/Library/Frameworks/CoreFoundation.framework; sourceTree = DEVELOPER_DIR; };
32 | 591D3EC81CFF7F500059011C /* CoreImage.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreImage.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/System/Library/Frameworks/CoreImage.framework; sourceTree = DEVELOPER_DIR; };
33 | 591D3ECA1CFF7F5F0059011C /* CoreMedia.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreMedia.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/System/Library/Frameworks/CoreMedia.framework; sourceTree = DEVELOPER_DIR; };
34 | 591D3ECC1CFF7F9F0059011C /* AssetsLibrary.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AssetsLibrary.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/System/Library/Frameworks/AssetsLibrary.framework; sourceTree = DEVELOPER_DIR; };
35 | 591D3ECE1CFF7FCE0059011C /* ImageIO.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = ImageIO.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk/System/Library/Frameworks/ImageIO.framework; sourceTree = DEVELOPER_DIR; };
36 | 591D3ED01CFF85C30059011C /* ios_image_load.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ios_image_load.h; sourceTree = SOURCE_ROOT; };
37 | 591D3ED11CFF85C30059011C /* ios_image_load.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = ios_image_load.mm; sourceTree = SOURCE_ROOT; };
38 | 591D3ED31CFF85FD0059011C /* tensorflow_utils.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = tensorflow_utils.mm; sourceTree = SOURCE_ROOT; };
39 | 591D3ED41CFF85FD0059011C /* tensorflow_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorflow_utils.h; sourceTree = SOURCE_ROOT; };
40 | 591D3EDD1CFFAD230059011C /* libprotobuf-lite.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = "libprotobuf-lite.a"; path = "../../makefile/gen/protobuf_ios/lib/libprotobuf-lite.a"; sourceTree = ""; };
41 | 591D3EDE1CFFAD230059011C /* libprotobuf.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libprotobuf.a; path = ../../makefile/gen/protobuf_ios/lib/libprotobuf.a; sourceTree = ""; };
42 | 592FF8B518ECBD7600C164F8 /* CameraExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = CameraExample.app; sourceTree = BUILT_PRODUCTS_DIR; };
43 | 592FF8B818ECBD7600C164F8 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };
44 | 592FF8BA18ECBD7600C164F8 /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; };
45 | 592FF90118ECC66200C164F8 /* main.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = main.mm; sourceTree = SOURCE_ROOT; };
46 | 592FF90318ECCB8300C164F8 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = SOURCE_ROOT; };
47 | 592FF90B18EDD0DA00C164F8 /* en */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = en; path = MainStoryboard_iPhone.storyboard; sourceTree = ""; };
48 | 592FF92118EE240200C164F8 /* CameraExampleAppDelegate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CameraExampleAppDelegate.h; sourceTree = SOURCE_ROOT; };
49 | 592FF92218EE240200C164F8 /* CameraExampleAppDelegate.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CameraExampleAppDelegate.m; sourceTree = SOURCE_ROOT; };
50 | 592FF92318EE240200C164F8 /* CameraExampleViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CameraExampleViewController.h; sourceTree = SOURCE_ROOT; };
51 | 592FF92418EE240200C164F8 /* CameraExampleViewController.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = CameraExampleViewController.mm; sourceTree = SOURCE_ROOT; };
52 | 5993C7711D5D4E980048CE6A /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.3.sdk/System/Library/Frameworks/Accelerate.framework; sourceTree = DEVELOPER_DIR; };
53 | CB123EF91E72718800AED158 /* food101_labels.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = food101_labels.txt; sourceTree = ""; };
54 | CBB0B44F1E8263FC00C7F7F2 /* mem_graph.pb */ = {isa = PBXFileReference; lastKnownFileType = file; path = mem_graph.pb; sourceTree = ""; };
55 | /* End PBXFileReference section */
56 |
57 | /* Begin PBXFrameworksBuildPhase section */
58 | 592FF8B218ECBD7600C164F8 /* Frameworks */ = {
59 | isa = PBXFrameworksBuildPhase;
60 | buildActionMask = 2147483647;
61 | files = (
62 | 5993C7721D5D4E980048CE6A /* Accelerate.framework in Frameworks */,
63 | 591D3EDF1CFFAD230059011C /* libprotobuf-lite.a in Frameworks */,
64 | 591D3EE01CFFAD230059011C /* libprotobuf.a in Frameworks */,
65 | 591D3ECF1CFF7FCE0059011C /* ImageIO.framework in Frameworks */,
66 | 591D3ECD1CFF7F9F0059011C /* AssetsLibrary.framework in Frameworks */,
67 | 591D3ECB1CFF7F5F0059011C /* CoreMedia.framework in Frameworks */,
68 | 591D3EC51CFF7F130059011C /* AVFoundation.framework in Frameworks */,
69 | 592FF8BB18ECBD7600C164F8 /* CoreGraphics.framework in Frameworks */,
70 | 592FF8B918ECBD7600C164F8 /* Foundation.framework in Frameworks */,
71 | );
72 | runOnlyForDeploymentPostprocessing = 0;
73 | };
74 | /* End PBXFrameworksBuildPhase section */
75 |
76 | /* Begin PBXGroup section */
77 | 591D3ED61CFFA83A0059011C /* data */ = {
78 | isa = PBXGroup;
79 | children = (
80 | CBB0B44F1E8263FC00C7F7F2 /* mem_graph.pb */,
81 | CB123EF91E72718800AED158 /* food101_labels.txt */,
82 | );
83 | path = data;
84 | sourceTree = SOURCE_ROOT;
85 | };
86 | 592FF8AA18ECBD3600C164F8 = {
87 | isa = PBXGroup;
88 | children = (
89 | 592FF8BE18ECBD7600C164F8 /* CameraExample */,
90 | 592FF8B718ECBD7600C164F8 /* Frameworks */,
91 | 592FF8B618ECBD7600C164F8 /* Products */,
92 | );
93 | sourceTree = "";
94 | };
95 | 592FF8B618ECBD7600C164F8 /* Products */ = {
96 | isa = PBXGroup;
97 | children = (
98 | 592FF8B518ECBD7600C164F8 /* CameraExample.app */,
99 | );
100 | name = Products;
101 | sourceTree = "";
102 | };
103 | 592FF8B718ECBD7600C164F8 /* Frameworks */ = {
104 | isa = PBXGroup;
105 | children = (
106 | 5993C7711D5D4E980048CE6A /* Accelerate.framework */,
107 | 591D3EDD1CFFAD230059011C /* libprotobuf-lite.a */,
108 | 591D3EDE1CFFAD230059011C /* libprotobuf.a */,
109 | 591D3ECE1CFF7FCE0059011C /* ImageIO.framework */,
110 | 591D3ECC1CFF7F9F0059011C /* AssetsLibrary.framework */,
111 | 591D3ECA1CFF7F5F0059011C /* CoreMedia.framework */,
112 | 591D3EC81CFF7F500059011C /* CoreImage.framework */,
113 | 591D3EC61CFF7F370059011C /* CoreFoundation.framework */,
114 | 591D3EC41CFF7F120059011C /* AVFoundation.framework */,
115 | 592FF8B818ECBD7600C164F8 /* Foundation.framework */,
116 | 592FF8BA18ECBD7600C164F8 /* CoreGraphics.framework */,
117 | );
118 | name = Frameworks;
119 | sourceTree = "";
120 | };
121 | 592FF8BE18ECBD7600C164F8 /* CameraExample */ = {
122 | isa = PBXGroup;
123 | children = (
124 | 591D3ED61CFFA83A0059011C /* data */,
125 | 592FF90718EDD0DA00C164F8 /* en.lproj */,
126 | 592FF92118EE240200C164F8 /* CameraExampleAppDelegate.h */,
127 | 592FF92218EE240200C164F8 /* CameraExampleAppDelegate.m */,
128 | 592FF92318EE240200C164F8 /* CameraExampleViewController.h */,
129 | 592FF92418EE240200C164F8 /* CameraExampleViewController.mm */,
130 | 592FF90318ECCB8300C164F8 /* Info.plist */,
131 | 591D3ED01CFF85C30059011C /* ios_image_load.h */,
132 | 591D3ED11CFF85C30059011C /* ios_image_load.mm */,
133 | 592FF90118ECC66200C164F8 /* main.mm */,
134 | 591D3ED31CFF85FD0059011C /* tensorflow_utils.mm */,
135 | 591D3ED41CFF85FD0059011C /* tensorflow_utils.h */,
136 | );
137 | name = CameraExample;
138 | path = SimpleExample;
139 | sourceTree = "";
140 | };
141 | 592FF90718EDD0DA00C164F8 /* en.lproj */ = {
142 | isa = PBXGroup;
143 | children = (
144 | 592FF90A18EDD0DA00C164F8 /* MainStoryboard_iPhone.storyboard */,
145 | );
146 | path = en.lproj;
147 | sourceTree = SOURCE_ROOT;
148 | };
149 | /* End PBXGroup section */
150 |
151 | /* Begin PBXNativeTarget section */
152 | 592FF8B418ECBD7600C164F8 /* CameraExample */ = {
153 | isa = PBXNativeTarget;
154 | buildConfigurationList = 592FF8E318ECBD7600C164F8 /* Build configuration list for PBXNativeTarget "CameraExample" */;
155 | buildPhases = (
156 | 592FF8B118ECBD7600C164F8 /* Sources */,
157 | 592FF8B218ECBD7600C164F8 /* Frameworks */,
158 | 592FF8B318ECBD7600C164F8 /* Resources */,
159 | );
160 | buildRules = (
161 | );
162 | dependencies = (
163 | );
164 | name = CameraExample;
165 | productName = SimpleExample;
166 | productReference = 592FF8B518ECBD7600C164F8 /* CameraExample.app */;
167 | productType = "com.apple.product-type.application";
168 | };
169 | /* End PBXNativeTarget section */
170 |
171 | /* Begin PBXProject section */
172 | 592FF8AB18ECBD3600C164F8 /* Project object */ = {
173 | isa = PBXProject;
174 | attributes = {
175 | LastUpgradeCheck = 0720;
176 | TargetAttributes = {
177 | 592FF8B418ECBD7600C164F8 = {
178 | DevelopmentTeam = 6F3F52S49T;
179 | };
180 | };
181 | };
182 | buildConfigurationList = 592FF8AE18ECBD3600C164F8 /* Build configuration list for PBXProject "camera_example" */;
183 | compatibilityVersion = "Xcode 3.2";
184 | developmentRegion = English;
185 | hasScannedForEncodings = 0;
186 | knownRegions = (
187 | en,
188 | );
189 | mainGroup = 592FF8AA18ECBD3600C164F8;
190 | productRefGroup = 592FF8B618ECBD7600C164F8 /* Products */;
191 | projectDirPath = "";
192 | projectRoot = "";
193 | targets = (
194 | 592FF8B418ECBD7600C164F8 /* CameraExample */,
195 | );
196 | };
197 | /* End PBXProject section */
198 |
199 | /* Begin PBXResourcesBuildPhase section */
200 | 592FF8B318ECBD7600C164F8 /* Resources */ = {
201 | isa = PBXResourcesBuildPhase;
202 | buildActionMask = 2147483647;
203 | files = (
204 | CBB0B4501E8263FC00C7F7F2 /* mem_graph.pb in Resources */,
205 | CB123EFB1E72718800AED158 /* food101_labels.txt in Resources */,
206 | 592FF90D18EDD0DA00C164F8 /* MainStoryboard_iPhone.storyboard in Resources */,
207 | );
208 | runOnlyForDeploymentPostprocessing = 0;
209 | };
210 | /* End PBXResourcesBuildPhase section */
211 |
212 | /* Begin PBXSourcesBuildPhase section */
213 | 592FF8B118ECBD7600C164F8 /* Sources */ = {
214 | isa = PBXSourcesBuildPhase;
215 | buildActionMask = 2147483647;
216 | files = (
217 | 592FF90218ECC66200C164F8 /* main.mm in Sources */,
218 | 591D3ED21CFF85C30059011C /* ios_image_load.mm in Sources */,
219 | 592FF92618EE240200C164F8 /* CameraExampleViewController.mm in Sources */,
220 | 592FF92518EE240200C164F8 /* CameraExampleAppDelegate.m in Sources */,
221 | 591D3ED51CFF85FD0059011C /* tensorflow_utils.mm in Sources */,
222 | );
223 | runOnlyForDeploymentPostprocessing = 0;
224 | };
225 | /* End PBXSourcesBuildPhase section */
226 |
227 | /* Begin PBXVariantGroup section */
228 | 592FF90A18EDD0DA00C164F8 /* MainStoryboard_iPhone.storyboard */ = {
229 | isa = PBXVariantGroup;
230 | children = (
231 | 592FF90B18EDD0DA00C164F8 /* en */,
232 | );
233 | name = MainStoryboard_iPhone.storyboard;
234 | sourceTree = "";
235 | };
236 | /* End PBXVariantGroup section */
237 |
238 | /* Begin XCBuildConfiguration section */
239 | 592FF8AF18ECBD3600C164F8 /* Debug */ = {
240 | isa = XCBuildConfiguration;
241 | buildSettings = {
242 | CLANG_WARN_BOOL_CONVERSION = YES;
243 | CLANG_WARN_CONSTANT_CONVERSION = YES;
244 | CLANG_WARN_EMPTY_BODY = YES;
245 | CLANG_WARN_ENUM_CONVERSION = YES;
246 | CLANG_WARN_INFINITE_RECURSION = YES;
247 | CLANG_WARN_INT_CONVERSION = YES;
248 | CLANG_WARN_SUSPICIOUS_MOVE = YES;
249 | CLANG_WARN_UNREACHABLE_CODE = YES;
250 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
251 | ENABLE_STRICT_OBJC_MSGSEND = YES;
252 | ENABLE_TESTABILITY = YES;
253 | GCC_NO_COMMON_BLOCKS = YES;
254 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
255 | GCC_WARN_ABOUT_RETURN_TYPE = YES;
256 | GCC_WARN_UNDECLARED_SELECTOR = YES;
257 | GCC_WARN_UNINITIALIZED_AUTOS = YES;
258 | GCC_WARN_UNUSED_FUNCTION = YES;
259 | GCC_WARN_UNUSED_VARIABLE = YES;
260 | ONLY_ACTIVE_ARCH = YES;
261 | };
262 | name = Debug;
263 | };
264 | 592FF8B018ECBD3600C164F8 /* Release */ = {
265 | isa = XCBuildConfiguration;
266 | buildSettings = {
267 | CLANG_WARN_BOOL_CONVERSION = YES;
268 | CLANG_WARN_CONSTANT_CONVERSION = YES;
269 | CLANG_WARN_EMPTY_BODY = YES;
270 | CLANG_WARN_ENUM_CONVERSION = YES;
271 | CLANG_WARN_INFINITE_RECURSION = YES;
272 | CLANG_WARN_INT_CONVERSION = YES;
273 | CLANG_WARN_SUSPICIOUS_MOVE = YES;
274 | CLANG_WARN_UNREACHABLE_CODE = YES;
275 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
276 | ENABLE_STRICT_OBJC_MSGSEND = YES;
277 | GCC_NO_COMMON_BLOCKS = YES;
278 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
279 | GCC_WARN_ABOUT_RETURN_TYPE = YES;
280 | GCC_WARN_UNDECLARED_SELECTOR = YES;
281 | GCC_WARN_UNINITIALIZED_AUTOS = YES;
282 | GCC_WARN_UNUSED_FUNCTION = YES;
283 | GCC_WARN_UNUSED_VARIABLE = YES;
284 | };
285 | name = Release;
286 | };
287 | 592FF8DF18ECBD7600C164F8 /* Debug */ = {
288 | isa = XCBuildConfiguration;
289 | buildSettings = {
290 | ALWAYS_SEARCH_USER_PATHS = NO;
291 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
292 | ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage;
293 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
294 | CLANG_CXX_LIBRARY = "compiler-default";
295 | CLANG_ENABLE_MODULES = YES;
296 | CLANG_ENABLE_OBJC_ARC = YES;
297 | CLANG_WARN_BOOL_CONVERSION = YES;
298 | CLANG_WARN_CONSTANT_CONVERSION = YES;
299 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
300 | CLANG_WARN_EMPTY_BODY = YES;
301 | CLANG_WARN_ENUM_CONVERSION = YES;
302 | CLANG_WARN_INT_CONVERSION = YES;
303 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
304 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
305 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
306 | COPY_PHASE_STRIP = NO;
307 | DEVELOPMENT_TEAM = 6F3F52S49T;
308 | ENABLE_BITCODE = NO;
309 | FRAMEWORK_SEARCH_PATHS = "$(inherited)";
310 | GCC_C_LANGUAGE_STANDARD = gnu99;
311 | GCC_DYNAMIC_NO_PIC = NO;
312 | GCC_OPTIMIZATION_LEVEL = 0;
313 | GCC_PRECOMPILE_PREFIX_HEADER = YES;
314 | GCC_PREFIX_HEADER = "";
315 | GCC_PREPROCESSOR_DEFINITIONS = (
316 | "DEBUG=1",
317 | "$(inherited)",
318 | );
319 | GCC_SYMBOLS_PRIVATE_EXTERN = NO;
320 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
321 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
322 | GCC_WARN_UNDECLARED_SELECTOR = YES;
323 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
324 | GCC_WARN_UNUSED_FUNCTION = YES;
325 | GCC_WARN_UNUSED_VARIABLE = YES;
326 | HEADER_SEARCH_PATHS = (
327 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/proto",
328 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/downloads/eigen",
329 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/downloads",
330 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/downloads/protobuf/src/",
331 | "$(SRCROOT)/../../tensorflow/",
332 | );
333 | INFOPLIST_FILE = "$(SRCROOT)/Info.plist";
334 | IPHONEOS_DEPLOYMENT_TARGET = 9.2;
335 | LIBRARY_SEARCH_PATHS = (
336 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/lib",
337 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/protobuf_ios/lib",
338 | );
339 | ONLY_ACTIVE_ARCH = NO;
340 | OTHER_LDFLAGS = (
341 | "-force_load",
342 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a",
343 | );
344 | PRODUCT_BUNDLE_IDENTIFIER = com.stratospark.CameraExample;
345 | PRODUCT_NAME = "$(TARGET_NAME)";
346 | SDKROOT = iphoneos;
347 | TARGETED_DEVICE_FAMILY = "1,2";
348 | VALID_ARCHS = "arm64 armv7 armv7s";
349 | WRAPPER_EXTENSION = app;
350 | };
351 | name = Debug;
352 | };
353 | 592FF8E018ECBD7600C164F8 /* Release */ = {
354 | isa = XCBuildConfiguration;
355 | buildSettings = {
356 | ALWAYS_SEARCH_USER_PATHS = NO;
357 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
358 | ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage;
359 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
360 | CLANG_CXX_LIBRARY = "compiler-default";
361 | CLANG_ENABLE_MODULES = YES;
362 | CLANG_ENABLE_OBJC_ARC = YES;
363 | CLANG_WARN_BOOL_CONVERSION = YES;
364 | CLANG_WARN_CONSTANT_CONVERSION = YES;
365 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
366 | CLANG_WARN_EMPTY_BODY = YES;
367 | CLANG_WARN_ENUM_CONVERSION = YES;
368 | CLANG_WARN_INT_CONVERSION = YES;
369 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
370 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
371 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
372 | COPY_PHASE_STRIP = YES;
373 | DEVELOPMENT_TEAM = 6F3F52S49T;
374 | ENABLE_BITCODE = NO;
375 | ENABLE_NS_ASSERTIONS = NO;
376 | FRAMEWORK_SEARCH_PATHS = "$(inherited)";
377 | GCC_C_LANGUAGE_STANDARD = gnu99;
378 | GCC_PRECOMPILE_PREFIX_HEADER = YES;
379 | GCC_PREFIX_HEADER = "";
380 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
381 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
382 | GCC_WARN_UNDECLARED_SELECTOR = YES;
383 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
384 | GCC_WARN_UNUSED_FUNCTION = YES;
385 | GCC_WARN_UNUSED_VARIABLE = YES;
386 | HEADER_SEARCH_PATHS = (
387 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/proto",
388 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/downloads/eigen",
389 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/downloads",
390 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/downloads/protobuf/src/",
391 | "$(SRCROOT)/../../tensorflow/",
392 | );
393 | INFOPLIST_FILE = "$(SRCROOT)/Info.plist";
394 | IPHONEOS_DEPLOYMENT_TARGET = 9.2;
395 | LIBRARY_SEARCH_PATHS = (
396 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/lib",
397 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/protobuf_ios/lib",
398 | );
399 | ONLY_ACTIVE_ARCH = NO;
400 | OTHER_LDFLAGS = (
401 | "-force_load",
402 | "$(SRCROOT)/../../tensorflow/tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a",
403 | );
404 | PRODUCT_BUNDLE_IDENTIFIER = com.stratospark.CameraExample;
405 | PRODUCT_NAME = "$(TARGET_NAME)";
406 | SDKROOT = iphoneos;
407 | TARGETED_DEVICE_FAMILY = "1,2";
408 | VALIDATE_PRODUCT = YES;
409 | VALID_ARCHS = "arm64 armv7 armv7s";
410 | WRAPPER_EXTENSION = app;
411 | };
412 | name = Release;
413 | };
414 | /* End XCBuildConfiguration section */
415 |
416 | /* Begin XCConfigurationList section */
417 | 592FF8AE18ECBD3600C164F8 /* Build configuration list for PBXProject "camera_example" */ = {
418 | isa = XCConfigurationList;
419 | buildConfigurations = (
420 | 592FF8AF18ECBD3600C164F8 /* Debug */,
421 | 592FF8B018ECBD3600C164F8 /* Release */,
422 | );
423 | defaultConfigurationIsVisible = 0;
424 | defaultConfigurationName = Release;
425 | };
426 | 592FF8E318ECBD7600C164F8 /* Build configuration list for PBXNativeTarget "CameraExample" */ = {
427 | isa = XCConfigurationList;
428 | buildConfigurations = (
429 | 592FF8DF18ECBD7600C164F8 /* Debug */,
430 | 592FF8E018ECBD7600C164F8 /* Release */,
431 | );
432 | defaultConfigurationIsVisible = 0;
433 | defaultConfigurationName = Release;
434 | };
435 | /* End XCConfigurationList section */
436 | };
437 | rootObject = 592FF8AB18ECBD3600C164F8 /* Project object */;
438 | }
439 |
--------------------------------------------------------------------------------
/camera/CameraExampleViewController.mm:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google Inc. All rights reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | #import
16 | #import
17 | #import
18 | #import
19 | #import "CameraExampleViewController.h"
20 |
21 | #include
22 |
23 | #include "tensorflow_utils.h"
24 | #include
25 |
26 | // If you have your own model, modify this to the file name, and make sure
27 | // you've added the file to your app resources too.
28 | static NSString* model_file_name = @"mem_graph";
29 | static NSString* model_file_type = @"pb";
30 | // This controls whether we'll be loading a plain GraphDef proto, or a
31 | // file created by the convert_graphdef_memmapped_format utility that wraps a
32 | // GraphDef and parameter file that can be mapped into memory from file to
33 | // reduce overall memory usage.
34 | const bool model_uses_memory_mapping = true;
35 | // If you have your own model, point this to the labels file.
36 | static NSString* labels_file_name = @"food101_labels";
37 | static NSString* labels_file_type = @"txt";
38 | // These dimensions need to match those the model was trained with.
39 | const int wanted_input_width = 299;
40 | const int wanted_input_height = 299;
41 | const int wanted_input_channels = 3;
42 | const float input_mean = 128.0f;
43 | const float input_std = 128.0f;
44 | const std::string input_layer_name = "input_1";
45 | const std::string output_layer_name = "Softmax";
46 |
47 | static void *AVCaptureStillImageIsCapturingStillImageContext =
48 | &AVCaptureStillImageIsCapturingStillImageContext;
49 |
50 | @interface CameraExampleViewController (InternalMethods)
51 | - (void)setupAVCapture;
52 | - (void)teardownAVCapture;
53 | @end
54 |
55 | @implementation CameraExampleViewController
56 |
57 | - (void)setupAVCapture {
58 | NSError *error = nil;
59 |
60 | session = [AVCaptureSession new];
61 | if ([[UIDevice currentDevice] userInterfaceIdiom] ==
62 | UIUserInterfaceIdiomPhone)
63 | [session setSessionPreset:AVCaptureSessionPreset640x480];
64 | else
65 | [session setSessionPreset:AVCaptureSessionPresetPhoto];
66 |
67 | AVCaptureDevice *device =
68 | [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
69 | AVCaptureDeviceInput *deviceInput =
70 | [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
71 | assert(error == nil);
72 |
73 | isUsingFrontFacingCamera = NO;
74 | if ([session canAddInput:deviceInput]) [session addInput:deviceInput];
75 |
76 | stillImageOutput = [AVCaptureStillImageOutput new];
77 | [stillImageOutput
78 | addObserver:self
79 | forKeyPath:@"capturingStillImage"
80 | options:NSKeyValueObservingOptionNew
81 | context:(void *)(AVCaptureStillImageIsCapturingStillImageContext)];
82 | if ([session canAddOutput:stillImageOutput])
83 | [session addOutput:stillImageOutput];
84 |
85 | videoDataOutput = [AVCaptureVideoDataOutput new];
86 |
87 | NSDictionary *rgbOutputSettings = [NSDictionary
88 | dictionaryWithObject:[NSNumber numberWithInt:kCMPixelFormat_32BGRA]
89 | forKey:(id)kCVPixelBufferPixelFormatTypeKey];
90 | [videoDataOutput setVideoSettings:rgbOutputSettings];
91 | [videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
92 | videoDataOutputQueue =
93 | dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
94 | [videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
95 |
96 | if ([session canAddOutput:videoDataOutput])
97 | [session addOutput:videoDataOutput];
98 | [[videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
99 |
100 | previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
101 | [previewLayer setBackgroundColor:[[UIColor blackColor] CGColor]];
102 | [previewLayer setVideoGravity:AVLayerVideoGravityResizeAspect];
103 | CALayer *rootLayer = [previewView layer];
104 | [rootLayer setMasksToBounds:YES];
105 | [previewLayer setFrame:[rootLayer bounds]];
106 | [rootLayer addSublayer:previewLayer];
107 | [session startRunning];
108 |
109 | if (error) {
110 | NSString *title = \
111 | [NSString stringWithFormat:@"Failed with error %d", (int)[error code]];
112 | UIAlertController *alertController = \
113 | [UIAlertController alertControllerWithTitle:title
114 | message:[error localizedDescription]
115 | preferredStyle:UIAlertControllerStyleAlert];
116 | UIAlertAction *dismiss = \
117 | [UIAlertAction actionWithTitle:@"Dismiss"
118 | style:UIAlertActionStyleDefault
119 | handler:nil];
120 | [alertController addAction:dismiss];
121 | [self presentViewController:alertController
122 | animated:YES
123 | completion:nil];
124 | [self teardownAVCapture];
125 | }
126 | }
127 |
128 | - (void)teardownAVCapture {
129 | [stillImageOutput removeObserver:self forKeyPath:@"isCapturingStillImage"];
130 | [previewLayer removeFromSuperlayer];
131 | }
132 |
133 | - (void)observeValueForKeyPath:(NSString *)keyPath
134 | ofObject:(id)object
135 | change:(NSDictionary *)change
136 | context:(void *)context {
137 | if (context == AVCaptureStillImageIsCapturingStillImageContext) {
138 | BOOL isCapturingStillImage =
139 | [[change objectForKey:NSKeyValueChangeNewKey] boolValue];
140 |
141 | if (isCapturingStillImage) {
142 | // do flash bulb like animation
143 | flashView = [[UIView alloc] initWithFrame:[previewView frame]];
144 | [flashView setBackgroundColor:[UIColor whiteColor]];
145 | [flashView setAlpha:0.f];
146 | [[[self view] window] addSubview:flashView];
147 |
148 | [UIView animateWithDuration:.4f
149 | animations:^{
150 | [flashView setAlpha:1.f];
151 | }];
152 | } else {
153 | [UIView animateWithDuration:.4f
154 | animations:^{
155 | [flashView setAlpha:0.f];
156 | }
157 | completion:^(BOOL finished) {
158 | [flashView removeFromSuperview];
159 | flashView = nil;
160 | }];
161 | }
162 | }
163 | }
164 |
165 | - (AVCaptureVideoOrientation)avOrientationForDeviceOrientation:
166 | (UIDeviceOrientation)deviceOrientation {
167 | AVCaptureVideoOrientation result =
168 | (AVCaptureVideoOrientation)(deviceOrientation);
169 | if (deviceOrientation == UIDeviceOrientationLandscapeLeft)
170 | result = AVCaptureVideoOrientationLandscapeRight;
171 | else if (deviceOrientation == UIDeviceOrientationLandscapeRight)
172 | result = AVCaptureVideoOrientationLandscapeLeft;
173 | return result;
174 | }
175 |
176 | - (IBAction)takePicture:(id)sender {
177 | if ([session isRunning]) {
178 | [session stopRunning];
179 | [sender setTitle:@"Continue" forState:UIControlStateNormal];
180 |
181 | flashView = [[UIView alloc] initWithFrame:[previewView frame]];
182 | [flashView setBackgroundColor:[UIColor whiteColor]];
183 | [flashView setAlpha:0.f];
184 | [[[self view] window] addSubview:flashView];
185 |
186 | [UIView animateWithDuration:.2f
187 | animations:^{
188 | [flashView setAlpha:1.f];
189 | }
190 | completion:^(BOOL finished) {
191 | [UIView animateWithDuration:.2f
192 | animations:^{
193 | [flashView setAlpha:0.f];
194 | }
195 | completion:^(BOOL finished) {
196 | [flashView removeFromSuperview];
197 | flashView = nil;
198 | }];
199 | }];
200 |
201 | } else {
202 | [session startRunning];
203 | [sender setTitle:@"Freeze Frame" forState:UIControlStateNormal];
204 | }
205 | }
206 |
207 | + (CGRect)videoPreviewBoxForGravity:(NSString *)gravity
208 | frameSize:(CGSize)frameSize
209 | apertureSize:(CGSize)apertureSize {
210 | CGFloat apertureRatio = apertureSize.height / apertureSize.width;
211 | CGFloat viewRatio = frameSize.width / frameSize.height;
212 |
213 | CGSize size = CGSizeZero;
214 | if ([gravity isEqualToString:AVLayerVideoGravityResizeAspectFill]) {
215 | if (viewRatio > apertureRatio) {
216 | size.width = frameSize.width;
217 | size.height =
218 | apertureSize.width * (frameSize.width / apertureSize.height);
219 | } else {
220 | size.width =
221 | apertureSize.height * (frameSize.height / apertureSize.width);
222 | size.height = frameSize.height;
223 | }
224 | } else if ([gravity isEqualToString:AVLayerVideoGravityResizeAspect]) {
225 | if (viewRatio > apertureRatio) {
226 | size.width =
227 | apertureSize.height * (frameSize.height / apertureSize.width);
228 | size.height = frameSize.height;
229 | } else {
230 | size.width = frameSize.width;
231 | size.height =
232 | apertureSize.width * (frameSize.width / apertureSize.height);
233 | }
234 | } else if ([gravity isEqualToString:AVLayerVideoGravityResize]) {
235 | size.width = frameSize.width;
236 | size.height = frameSize.height;
237 | }
238 |
239 | CGRect videoBox;
240 | videoBox.size = size;
241 | if (size.width < frameSize.width)
242 | videoBox.origin.x = (frameSize.width - size.width) / 2;
243 | else
244 | videoBox.origin.x = (size.width - frameSize.width) / 2;
245 |
246 | if (size.height < frameSize.height)
247 | videoBox.origin.y = (frameSize.height - size.height) / 2;
248 | else
249 | videoBox.origin.y = (size.height - frameSize.height) / 2;
250 |
251 | return videoBox;
252 | }
253 |
254 | - (void)captureOutput:(AVCaptureOutput *)captureOutput
255 | didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
256 | fromConnection:(AVCaptureConnection *)connection {
257 | CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
258 | CFRetain(pixelBuffer);
259 | [self runCNNOnFrame:pixelBuffer];
260 | CFRelease(pixelBuffer);
261 | }
262 |
263 | - (void)runCNNOnFrame:(CVPixelBufferRef)pixelBuffer {
264 | assert(pixelBuffer != NULL);
265 |
266 | OSType sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer);
267 | int doReverseChannels;
268 | if (kCVPixelFormatType_32ARGB == sourcePixelFormat) {
269 | doReverseChannels = 1;
270 | } else if (kCVPixelFormatType_32BGRA == sourcePixelFormat) {
271 | doReverseChannels = 0;
272 | } else {
273 | assert(false); // Unknown source format
274 | }
275 |
276 | const int sourceRowBytes = (int)CVPixelBufferGetBytesPerRow(pixelBuffer);
277 | const int image_width = (int)CVPixelBufferGetWidth(pixelBuffer);
278 | const int fullHeight = (int)CVPixelBufferGetHeight(pixelBuffer);
279 |
280 | CVPixelBufferLockFlags unlockFlags = kNilOptions;
281 | CVPixelBufferLockBaseAddress(pixelBuffer, unlockFlags);
282 |
283 | unsigned char *sourceBaseAddr =
284 | (unsigned char *)(CVPixelBufferGetBaseAddress(pixelBuffer));
285 | int image_height;
286 | unsigned char *sourceStartAddr;
287 | if (fullHeight <= image_width) {
288 | image_height = fullHeight;
289 | sourceStartAddr = sourceBaseAddr;
290 | } else {
291 | image_height = image_width;
292 | const int marginY = ((fullHeight - image_width) / 2);
293 | sourceStartAddr = (sourceBaseAddr + (marginY * sourceRowBytes));
294 | }
295 | const int image_channels = 4;
296 |
297 | assert(image_channels >= wanted_input_channels);
298 | tensorflow::Tensor image_tensor(
299 | tensorflow::DT_FLOAT,
300 | tensorflow::TensorShape(
301 | {1, wanted_input_height, wanted_input_width, wanted_input_channels}));
302 | auto image_tensor_mapped = image_tensor.tensor();
303 | tensorflow::uint8 *in = sourceStartAddr;
304 | float *out = image_tensor_mapped.data();
305 | for (int y = 0; y < wanted_input_height; ++y) {
306 | float *out_row = out + (y * wanted_input_width * wanted_input_channels);
307 | for (int x = 0; x < wanted_input_width; ++x) {
308 | const int in_x = (y * image_width) / wanted_input_width;
309 | const int in_y = (x * image_height) / wanted_input_height;
310 | tensorflow::uint8 *in_pixel =
311 | in + (in_y * image_width * image_channels) + (in_x * image_channels);
312 | float *out_pixel = out_row + (x * wanted_input_channels);
313 | for (int c = 0; c < wanted_input_channels; ++c) {
314 | //out_pixel[c] = (in_pixel[c] - input_mean) / input_std;
315 | // InceptionV3 preprocessing
316 | out_pixel[c] = ((in_pixel[c] / 255.0) - 0.5) * 2.0;
317 | }
318 | }
319 | }
320 |
321 | CVPixelBufferUnlockBaseAddress(pixelBuffer, unlockFlags);
322 |
323 | LOG(ERROR) << "OUT";
324 | if (tf_session.get()) {
325 | std::vector outputs;
326 | LOG(ERROR) << "***";
327 |
328 | tensorflow::Status run_status = tf_session->Run(
329 | {{input_layer_name, image_tensor}}, {output_layer_name}, {}, &outputs);
330 | if (!run_status.ok()) {
331 | LOG(ERROR) << "Running model failed:" << run_status;
332 | } else {
333 | tensorflow::Tensor *output = &outputs[0];
334 | auto predictions = output->flat();
335 |
336 | NSMutableDictionary *newValues = [NSMutableDictionary dictionary];
337 | for (int index = 0; index < predictions.size(); index += 1) {
338 | const float predictionValue = predictions(index);
339 | if (predictionValue > 0.05f) {
340 | std::string label = labels[index % predictions.size()];
341 | NSString *labelObject = [NSString stringWithUTF8String:label.c_str()];
342 | NSNumber *valueObject = [NSNumber numberWithFloat:predictionValue];
343 | LOG(ERROR) << label << ": " << predictionValue;
344 | [newValues setObject:valueObject forKey:labelObject];
345 | }
346 | }
347 | dispatch_async(dispatch_get_main_queue(), ^(void) {
348 | [self setPredictionValues:newValues];
349 | });
350 | }
351 | }
352 | CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
353 | }
354 |
355 | - (void)dealloc {
356 | [self teardownAVCapture];
357 | }
358 |
359 | // use front/back camera
360 | - (IBAction)switchCameras:(id)sender {
361 | AVCaptureDevicePosition desiredPosition;
362 | if (isUsingFrontFacingCamera)
363 | desiredPosition = AVCaptureDevicePositionBack;
364 | else
365 | desiredPosition = AVCaptureDevicePositionFront;
366 |
367 | for (AVCaptureDevice *d in
368 | [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
369 | if ([d position] == desiredPosition) {
370 | [[previewLayer session] beginConfiguration];
371 | AVCaptureDeviceInput *input =
372 | [AVCaptureDeviceInput deviceInputWithDevice:d error:nil];
373 | for (AVCaptureInput *oldInput in [[previewLayer session] inputs]) {
374 | [[previewLayer session] removeInput:oldInput];
375 | }
376 | [[previewLayer session] addInput:input];
377 | [[previewLayer session] commitConfiguration];
378 | break;
379 | }
380 | }
381 | isUsingFrontFacingCamera = !isUsingFrontFacingCamera;
382 | }
383 |
384 | - (void)didReceiveMemoryWarning {
385 | [super didReceiveMemoryWarning];
386 | }
387 |
388 | - (void)viewDidLoad {
389 | [super viewDidLoad];
390 | square = [UIImage imageNamed:@"squarePNG"];
391 | synth = [[AVSpeechSynthesizer alloc] init];
392 | labelLayers = [[NSMutableArray alloc] init];
393 | oldPredictionValues = [[NSMutableDictionary alloc] init];
394 |
395 | tensorflow::Status load_status;
396 | if (model_uses_memory_mapping) {
397 | load_status = LoadMemoryMappedModel(
398 | model_file_name, model_file_type, &tf_session, &tf_memmapped_env);
399 | } else {
400 | load_status = LoadModel(model_file_name, model_file_type, &tf_session);
401 | }
402 | if (!load_status.ok()) {
403 | LOG(FATAL) << "Couldn't load model: " << load_status;
404 | }
405 |
406 | tensorflow::Status labels_status =
407 | LoadLabels(labels_file_name, labels_file_type, &labels);
408 | if (!labels_status.ok()) {
409 | LOG(FATAL) << "Couldn't load labels: " << labels_status;
410 | }
411 | [self setupAVCapture];
412 | }
413 |
414 | - (void)viewDidUnload {
415 | [super viewDidUnload];
416 | }
417 |
418 | - (void)viewWillAppear:(BOOL)animated {
419 | [super viewWillAppear:animated];
420 | }
421 |
422 | - (void)viewDidAppear:(BOOL)animated {
423 | [super viewDidAppear:animated];
424 | }
425 |
426 | - (void)viewWillDisappear:(BOOL)animated {
427 | [super viewWillDisappear:animated];
428 | }
429 |
430 | - (void)viewDidDisappear:(BOOL)animated {
431 | [super viewDidDisappear:animated];
432 | }
433 |
434 | - (BOOL)shouldAutorotateToInterfaceOrientation:
435 | (UIInterfaceOrientation)interfaceOrientation {
436 | return (interfaceOrientation == UIInterfaceOrientationPortrait);
437 | }
438 |
439 | - (BOOL)prefersStatusBarHidden {
440 | return YES;
441 | }
442 |
443 | - (void)setPredictionValues:(NSDictionary *)newValues {
444 | const float decayValue = 0.0f; //0.75f;
445 | const float updateValue = 1.0f; //0.25f;
446 | const float minimumThreshold = 0.2f; //0.01f;
447 |
448 | NSMutableDictionary *decayedPredictionValues =
449 | [[NSMutableDictionary alloc] init];
450 | for (NSString *label in oldPredictionValues) {
451 | NSNumber *oldPredictionValueObject =
452 | [oldPredictionValues objectForKey:label];
453 | const float oldPredictionValue = [oldPredictionValueObject floatValue];
454 | const float decayedPredictionValue = (oldPredictionValue * decayValue);
455 | if (decayedPredictionValue > minimumThreshold) {
456 | NSNumber *decayedPredictionValueObject =
457 | [NSNumber numberWithFloat:decayedPredictionValue];
458 | [decayedPredictionValues setObject:decayedPredictionValueObject
459 | forKey:label];
460 | }
461 | }
462 | oldPredictionValues = decayedPredictionValues;
463 |
464 | for (NSString *label in newValues) {
465 | NSNumber *newPredictionValueObject = [newValues objectForKey:label];
466 | NSNumber *oldPredictionValueObject =
467 | [oldPredictionValues objectForKey:label];
468 | if (!oldPredictionValueObject) {
469 | oldPredictionValueObject = [NSNumber numberWithFloat:0.0f];
470 | }
471 | const float newPredictionValue = [newPredictionValueObject floatValue];
472 | const float oldPredictionValue = [oldPredictionValueObject floatValue];
473 | const float updatedPredictionValue =
474 | (oldPredictionValue + (newPredictionValue * updateValue));
475 | NSNumber *updatedPredictionValueObject =
476 | [NSNumber numberWithFloat:updatedPredictionValue];
477 | [oldPredictionValues setObject:updatedPredictionValueObject forKey:label];
478 | }
479 | NSArray *candidateLabels = [NSMutableArray array];
480 | for (NSString *label in oldPredictionValues) {
481 | NSNumber *oldPredictionValueObject =
482 | [oldPredictionValues objectForKey:label];
483 | const float oldPredictionValue = [oldPredictionValueObject floatValue];
484 | if (oldPredictionValue > 0.05f) {
485 | NSDictionary *entry = @{
486 | @"label" : label,
487 | @"value" : oldPredictionValueObject
488 | };
489 | candidateLabels = [candidateLabels arrayByAddingObject:entry];
490 | }
491 | }
492 | NSSortDescriptor *sort =
493 | [NSSortDescriptor sortDescriptorWithKey:@"value" ascending:NO];
494 | NSArray *sortedLabels = [candidateLabels
495 | sortedArrayUsingDescriptors:[NSArray arrayWithObject:sort]];
496 |
497 | const float leftMargin = 10.0f;
498 | const float topMargin = 10.0f;
499 |
500 | const float valueWidth = 48.0f;
501 | const float valueHeight = 26.0f;
502 |
503 | const float labelWidth = 246.0f;
504 | const float labelHeight = 26.0f;
505 |
506 | const float labelMarginX = 5.0f;
507 | const float labelMarginY = 5.0f;
508 |
509 | [self removeAllLabelLayers];
510 |
511 | int labelCount = 0;
512 | for (NSDictionary *entry in sortedLabels) {
513 | NSString *label = [entry objectForKey:@"label"];
514 | NSNumber *valueObject = [entry objectForKey:@"value"];
515 | const float value = [valueObject floatValue];
516 |
517 | const float originY =
518 | (topMargin + ((labelHeight + labelMarginY) * labelCount));
519 |
520 | const int valuePercentage = (int)roundf(value * 100.0f);
521 |
522 | const float valueOriginX = leftMargin;
523 | NSString *valueText = [NSString stringWithFormat:@"%d%%", valuePercentage];
524 |
525 | [self addLabelLayerWithText:valueText
526 | originX:valueOriginX
527 | originY:originY
528 | width:valueWidth
529 | height:valueHeight
530 | alignment:kCAAlignmentRight];
531 |
532 | const float labelOriginX = (leftMargin + valueWidth + labelMarginX);
533 |
534 | [self addLabelLayerWithText:[label capitalizedString]
535 | originX:labelOriginX
536 | originY:originY
537 | width:labelWidth
538 | height:labelHeight
539 | alignment:kCAAlignmentLeft];
540 |
541 | if ((labelCount == 0) && (value > 0.5f)) {
542 | [self speak:[label capitalizedString]];
543 | }
544 |
545 | labelCount += 1;
546 | if (labelCount > 4) {
547 | break;
548 | }
549 | }
550 | }
551 |
552 | - (void)removeAllLabelLayers {
553 | for (CATextLayer *layer in labelLayers) {
554 | [layer removeFromSuperlayer];
555 | }
556 | [labelLayers removeAllObjects];
557 | }
558 |
559 | - (void)addLabelLayerWithText:(NSString *)text
560 | originX:(float)originX
561 | originY:(float)originY
562 | width:(float)width
563 | height:(float)height
564 | alignment:(NSString *)alignment {
565 | CFTypeRef font = (CFTypeRef) @"Menlo-Regular";
566 | const float fontSize = 20.0f;
567 |
568 | const float marginSizeX = 5.0f;
569 | const float marginSizeY = 2.0f;
570 |
571 | const CGRect backgroundBounds = CGRectMake(originX, originY, width, height);
572 |
573 | const CGRect textBounds =
574 | CGRectMake((originX + marginSizeX), (originY + marginSizeY),
575 | (width - (marginSizeX * 2)), (height - (marginSizeY * 2)));
576 |
577 | CATextLayer *background = [CATextLayer layer];
578 | [background setBackgroundColor:[UIColor blackColor].CGColor];
579 | [background setOpacity:0.5f];
580 | [background setFrame:backgroundBounds];
581 | background.cornerRadius = 5.0f;
582 |
583 | [[self.view layer] addSublayer:background];
584 | [labelLayers addObject:background];
585 |
586 | CATextLayer *layer = [CATextLayer layer];
587 | [layer setForegroundColor:[UIColor whiteColor].CGColor];
588 | [layer setFrame:textBounds];
589 | [layer setAlignmentMode:alignment];
590 | [layer setWrapped:YES];
591 | [layer setFont:font];
592 | [layer setFontSize:fontSize];
593 | layer.contentsScale = [[UIScreen mainScreen] scale];
594 | [layer setString:text];
595 |
596 | [[self.view layer] addSublayer:layer];
597 | [labelLayers addObject:layer];
598 | }
599 |
600 | - (void)setPredictionText:(NSString *)text withDuration:(float)duration {
601 | if (duration > 0.0) {
602 | CABasicAnimation *colorAnimation =
603 | [CABasicAnimation animationWithKeyPath:@"foregroundColor"];
604 | colorAnimation.duration = duration;
605 | colorAnimation.fillMode = kCAFillModeForwards;
606 | colorAnimation.removedOnCompletion = NO;
607 | colorAnimation.fromValue = (id)[UIColor darkGrayColor].CGColor;
608 | colorAnimation.toValue = (id)[UIColor whiteColor].CGColor;
609 | colorAnimation.timingFunction =
610 | [CAMediaTimingFunction functionWithName:kCAMediaTimingFunctionLinear];
611 | [self.predictionTextLayer addAnimation:colorAnimation
612 | forKey:@"colorAnimation"];
613 | } else {
614 | self.predictionTextLayer.foregroundColor = [UIColor whiteColor].CGColor;
615 | }
616 |
617 | [self.predictionTextLayer removeFromSuperlayer];
618 | [[self.view layer] addSublayer:self.predictionTextLayer];
619 | [self.predictionTextLayer setString:text];
620 | }
621 |
622 | - (void)speak:(NSString *)words {
623 | if ([synth isSpeaking]) {
624 | return;
625 | }
626 | AVSpeechUtterance *utterance =
627 | [AVSpeechUtterance speechUtteranceWithString:words];
628 | utterance.voice = [AVSpeechSynthesisVoice voiceWithLanguage:@"en-US"];
629 | utterance.rate = 0.75 * AVSpeechUtteranceDefaultSpeechRate;
630 | [synth speakUtterance:utterance];
631 | }
632 |
633 | @end
634 |
--------------------------------------------------------------------------------
/Creating a Deep Learning iOS App with Keras and Tensorflow.md:
--------------------------------------------------------------------------------
1 |
2 | # Creating a Deep Learning iOS App with Keras and Tensorflow
3 |
4 |
5 | ```python
6 | from IPython.display import display, Image
7 | display(Image('./mobile.jpg'))
8 | ```
9 |
10 |
11 | 
12 |
13 |
14 | ## Introduction
15 |
16 | **CODE AVAILABLE @ https://github.com/stratospark/food-101-mobile**
17 |
18 | In a previous project, I showed how to train a Convolutional Neural Network to [classify food images using Keras/Tensorflow](http://blog.stratospark.com/deep-learning-applied-food-classification-deep-learning-keras.html). We also saw how to export the model to [Keras.js](https://github.com/transcranial/keras-js) for use in a HTML5/Javascript browser application.
19 |
20 | For this next writeup, I'll show how to take the same model and prepare it for use in a mobile app. I only have experience with iOS devices and only have an iPhone for testing, but the process of extracting, modifying, and serializing the computation graphs should apply for Android deployments as well.
21 |
22 | Here is a video capture of the app running on my development device, an iPhone 5s. BTW, all food in the screenshots and video are *vegan*! ;)
23 |
24 |
25 |
26 | I originally trained the model using **Tensorflow 0.11.0 and Keras 1.1.2**.
27 |
28 | For this project, I am using the newer **Tensorflow 1.0.1 and Keras 1.2.2**.
29 |
30 | I am not aware of any incompatibilities with taking a model trained with an older version of Tensorflow and using it for inference in a new version of Tensorflow. However, I could be wrong. Of course, Keras 2 has come out since I started this project and have not had time to test.
31 |
32 | ### Caveats (Major!)
33 |
34 | I consider the code here to be very hacky! There is not much documentation online about preparing Keras models for Mobile Tensorflow apps. I am also not an experienced iOS developer. I mainly wanted to prove to myself that this can work, then refine my approach in the future. I would appreciate feedback on any of these issues:
35 |
36 | 1. When running the app on the device, the inference randomly stops working. The video feed still updates, but no more predictions are made. I can't seem to find a way to reproduce this issue reliably, but it is very common. I noticed a recent open issue that may be related: [Tensorflow freezes on iOS during Session::Run](https://github.com/tensorflow/tensorflow/issues/7108)
37 | 1. I do not know if I am getting full performance from Tensorflow on iOS. I am doing a standard build of Tensorflow from my MacBook Pro. There are apparently some [undocumented flags that can turn on optimization](http://stackoverflow.com/questions/37971426/tensorflow-running-slow-on-ios), but I don't know if they apply to the current version. In any case, I do seem to achieve 1-1.5 sec per inference on my iPhone 5s.
38 | 1. The level of accuracy that I achieved in my previous writeup depended on 10-crops at particular positions in an image. I am sticking with whatever came with the example app sample code to handle resizing of a single crop. I don't know how the portrait orientation of the camera affects accuracy when resizing the image to the 299x299 size needed by the InceptionV3 network.
39 | 1. I don't know if I'm dealing without dropout properly, as a result, the difference between the original model predictions and the modified model predictions are slightly higher than I would have thought. In a production system, I would **definitely** want to run through my test images on the device in order to compare test set accuracy with the original model.
40 | 1. I wasn't able to get weight quantization to work properly. I may need to manually find the min/max ranges.
41 | 1. I am getting non-deterministic results when evaluating the optimized models from disk to compare predictions.
42 |
43 | ### Helpful Resources
44 |
45 | Before trying to replicate what I have done here, know that there are probably better ways of doing this!
46 |
47 | Here are some resources that can help you explore other paths, some that achieve much better performance than what I have here.
48 |
49 | * [Squeezing Deep Learning Into Mobile Phones](https://www.slideshare.net/anirudhkoul/squeezing-deep-learning-into-mobile-phones), excellent slides by Anirudh Koul that summarize various options available for mobile Deep Learning apps. I like how he breaks it down depending on how much time you want to invest (1 day, 1 week, 1 month, 6 months, etc.)
50 | * Matthijs Hollemans's [Machinethink Blog](http://machinethink.net/blog/). Great, in-depth blog posts about Deep Learning on iOS. Includes coverage of BNNS, Metal Performance Shaders, etc.
51 | * Matt Rajca's articles: [Getting Started with Deep MNIST and TensorFlow on iOS](http://www.mattrajca.com/2016/11/25/getting-started-with-deep-mnist-and-tensorflow-on-ios.html) and [Speeding Up TensorFlow with Metal Performance Shaders](http://www.mattrajca.com/2016/11/26/speeding-up-tensorflow-with-metal-performance-shaders.html)
52 | * Apple Developer example, [MetalImageRecognition: Performing Image Recognition with Inception_v3 Network using Metal Performance Shaders Convolutional Neural Network routines](https://developer.apple.com/library/content/samplecode/MetalImageRecognition/Introduction/Intro.html). Code that implements the same InceptionV3 network that I am utilizing here, but probably much, much faster!
53 |
54 | And though I don't believe it supports iOS at the moment, keep an eye out for [Tensorflow XLA compiler](https://www.tensorflow.org/versions/master/experimental/xla/). In the future, we might be able to do mobile-specific builds, which will allow us to execute our computation graphs on a mobile device without having to have the entire Tensorflow inference library. This could allow for dramatic size reductions and possibly speedups!
55 |
56 | All in all, Deep Learning on mobile is looking bright! Hopefully it becomes easier and more straightforward to get your trained models running efficiently on a device.
57 |
58 | ## Step by Step
59 |
60 | First of all, download this trained Keras model and move it into the `model_export` folder if you want to follow along:
61 |
62 | `wget https://s3.amazonaws.com/stratospark/food-101/model4b.10-0.68.hdf5`
63 |
64 | At a high level, we will need to:
65 |
66 | * Reload the Keras model from disk into the Tensorflow session.
67 | * Extract the computation graph from the session object.
68 | * Change all `Switch` nodes to pass the value of the `True` branch as an `Identity` op instead.
69 | * Bypass the `Dropout` nodes.
70 | * Update the `keras_learning_phase` `Placeholder` node to be a `Const` node always outputting Test mode.
71 | * Add the neuron weights to the graph as constants.
72 | * Serialize the graph to a .pb file.
73 | * Build optimization tools with Bazel
74 | * Run optimization graph transformations
75 | * Add model to Tensorflow iOS Camera sample project
76 |
77 | ### Load Keras model and extract the `GraphDef`
78 |
79 |
80 | ```python
81 | import tensorflow as tf
82 | from keras.models import load_model, Model
83 | from keras import backend as K
84 |
85 | sess = tf.Session()
86 | K.set_session(sess)
87 | ```
88 |
89 | Using TensorFlow backend.
90 |
91 |
92 |
93 | ```python
94 | model = load_model('./model4b.10-0.68.hdf5')
95 | ```
96 |
97 |
98 | ```python
99 | gd = sess.graph.as_graph_def()
100 | print(len(gd.node), 'Nodes')
101 | gd.node[:2]
102 | ```
103 |
104 | (40271, 'Nodes')
105 |
106 |
107 |
108 |
109 |
110 | [name: "input_1"
111 | op: "Placeholder"
112 | attr {
113 | key: "dtype"
114 | value {
115 | type: DT_FLOAT
116 | }
117 | }
118 | attr {
119 | key: "shape"
120 | value {
121 | shape {
122 | }
123 | }
124 | }, name: "random_uniform/shape"
125 | op: "Const"
126 | attr {
127 | key: "dtype"
128 | value {
129 | type: DT_INT32
130 | }
131 | }
132 | attr {
133 | key: "value"
134 | value {
135 | tensor {
136 | dtype: DT_INT32
137 | tensor_shape {
138 | dim {
139 | size: 4
140 | }
141 | }
142 | tensor_content: "\003\000\000\000\003\000\000\000\003\000\000\000 \000\000\000"
143 | }
144 | }
145 | }]
146 |
147 |
148 |
149 | ### Testing Model
150 |
151 | Let's sanity check our Keras model and save the predicted values. We will use this later on to compare against the optimized mobile model.
152 |
153 |
154 | ```python
155 | x = tf.placeholder(tf.float32, shape=model.get_input_shape_at(0))
156 | ```
157 |
158 |
159 | ```python
160 | y = model(x)
161 | ```
162 |
163 |
164 | ```python
165 | import numpy as np
166 | import matplotlib.pyplot as plt
167 |
168 | %matplotlib inline
169 | ```
170 |
171 |
172 | ```python
173 | img = plt.imread('sushi.png')
174 | ```
175 |
176 |
177 | ```python
178 | plt.imshow(img)
179 | ```
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 | 
190 |
191 |
192 |
193 | ```python
194 | def preprocess_input(x):
195 | x_copy = np.copy(x)
196 | x_copy -= 0.5
197 | x_copy *= 2.
198 | return x_copy
199 | ```
200 |
201 |
202 | ```python
203 | img_processed = preprocess_input(img)
204 | plt.imshow(img_processed)
205 | ```
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 | 
216 |
217 |
218 |
219 | ```python
220 | imgs = np.expand_dims(img_processed, 0)
221 | ```
222 |
223 |
224 | ```python
225 | orig_scores = sess.run(y, feed_dict={x: imgs, K.learning_phase(): False})
226 | ```
227 |
228 |
229 | ```python
230 | def find_top_pred(scores):
231 | top_label_ix = np.argmax(scores) # label 95 is Sushi
232 | confidence = scores[0][top_label_ix]
233 | print('Label: {}, Confidence: {}'.format(top_label_ix, confidence))
234 |
235 | find_top_pred(orig_scores)
236 | ```
237 |
238 | Label: 95, Confidence: 0.998208284378
239 |
240 |
241 | ### Replace `Switch` Nodes with `Identity`
242 |
243 | There are many `Switch` nodes which are introduced by Keras to handle different paths through the computation graph depending on whether one is Training or Testing.
244 |
245 | We are only going to use inference within the mobile app, and besides, the `Switch` op is not implemented by default in the default Tensorflow mobile builds.
246 |
247 | Therefore, we want to find all the `Switch` nodes and turn them into `Identity` nodes that
248 |
249 |
250 | ```python
251 | nodes_with_switch_op = [x for x in gd.node if x.op.lower().find('switch') != -1]
252 | nodes_with_switch_op[0]
253 | ```
254 |
255 |
256 |
257 |
258 | name: "cond/Switch"
259 | op: "Switch"
260 | input: "keras_learning_phase"
261 | input: "keras_learning_phase"
262 | attr {
263 | key: "T"
264 | value {
265 | type: DT_BOOL
266 | }
267 | }
268 |
269 |
270 |
271 | Let's create a dict so we can lookup the nodes by name:
272 |
273 |
274 | ```python
275 | nodes = {}
276 | for node in gd.node:
277 | nodes[node.name] = node
278 | ```
279 |
280 |
281 | ```python
282 | print(len(nodes_with_switch_op))
283 | [(n.name, [i for i in n.input]) for n in nodes_with_switch_op[:10]]
284 | ```
285 |
286 | 570
287 |
288 |
289 |
290 |
291 |
292 | [(u'cond/Switch', [u'keras_learning_phase', u'keras_learning_phase']),
293 | (u'cond/Switch_1', [u'batchnorm/add_1', u'cond/pred_id']),
294 | (u'cond/Switch_2', [u'batchnorm_1/add_1', u'cond/pred_id']),
295 | (u'cond_1/Switch', [u'keras_learning_phase', u'keras_learning_phase']),
296 | (u'cond_1/Switch_1', [u'batchnorm_2/add_1', u'cond_1/pred_id']),
297 | (u'cond_1/Switch_2', [u'batchnorm_3/add_1', u'cond_1/pred_id']),
298 | (u'cond_2/Switch', [u'keras_learning_phase', u'keras_learning_phase']),
299 | (u'cond_2/Switch_1', [u'batchnorm_4/add_1', u'cond_2/pred_id']),
300 | (u'cond_2/Switch_2', [u'batchnorm_5/add_1', u'cond_2/pred_id']),
301 | (u'cond_3/Switch', [u'keras_learning_phase', u'keras_learning_phase'])]
302 |
303 |
304 |
305 | Let's look up one of the `Switch` nodes, as well as one of it's input nodes.
306 |
307 |
308 | ```python
309 | nodes['cond/Switch_1']
310 | ```
311 |
312 |
313 |
314 |
315 | name: "cond/Switch_1"
316 | op: "Switch"
317 | input: "batchnorm/add_1"
318 | input: "cond/pred_id"
319 | attr {
320 | key: "T"
321 | value {
322 | type: DT_FLOAT
323 | }
324 | }
325 | attr {
326 | key: "_class"
327 | value {
328 | list {
329 | s: "loc:@batchnorm/add_1"
330 | }
331 | }
332 | }
333 |
334 |
335 |
336 |
337 | ```python
338 | nodes['cond/pred_id']
339 | ```
340 |
341 |
342 |
343 |
344 | name: "cond/pred_id"
345 | op: "Identity"
346 | input: "keras_learning_phase"
347 | attr {
348 | key: "T"
349 | value {
350 | type: DT_BOOL
351 | }
352 | }
353 |
354 |
355 |
356 | We also stumble upon this `Merge` node, that seems to take the `Switch` node as an input:
357 |
358 |
359 | ```python
360 | for n in gd.node:
361 | if 'cond/Switch_2' in [i for i in n.input]:
362 | print n
363 | ```
364 |
365 | name: "cond/Merge"
366 | op: "Merge"
367 | input: "cond/Switch_2"
368 | input: "cond/Switch_1:1"
369 | attr {
370 | key: "N"
371 | value {
372 | i: 2
373 | }
374 | }
375 | attr {
376 | key: "T"
377 | value {
378 | type: DT_FLOAT
379 | }
380 | }
381 |
382 |
383 |
384 | Looking through the different nodes, we can see that some nodes take a `Switch` node as an input. In some cases, there is a `:1` appended to the actual name of the node. I believe this stands for the output of a `Switch` node when it is true. **If anyone reading this knows for sure what that means, let me know.** Since there are no nodes with that name, through trial and error, I was able to get the model to run by routing those to the actual `Switch` node name.
385 |
386 | Below, we change the inputs of all nodes that take in a `Switch:1` input to remove the last 2 characters:
387 |
388 |
389 | ```python
390 | nodes['cond/Merge']
391 | ```
392 |
393 |
394 |
395 |
396 | name: "cond/Merge"
397 | op: "Merge"
398 | input: "cond/Switch_2"
399 | input: "cond/Switch_1:1"
400 | attr {
401 | key: "N"
402 | value {
403 | i: 2
404 | }
405 | }
406 | attr {
407 | key: "T"
408 | value {
409 | type: DT_FLOAT
410 | }
411 | }
412 |
413 |
414 |
415 |
416 | ```python
417 | # nodes to reroute switch input
418 | for n in [x for x in gd.node]:
419 | ints = [i for i in n.input]
420 | endswith1 = [ii for ii in ints if ii.endswith(':1')] #and 'Switch' in ii]
421 | if len(endswith1) > 0:
422 | for index, inn in enumerate(n.input):
423 | if inn in endswith1:
424 | new_input = inn[:-2]
425 | n.input.remove(inn)
426 | n.input.insert(index, new_input)
427 | ```
428 |
429 | Finally, let's change those `Switch` nodes to `Identity` nodes, and also remove the input value that will not be used.
430 |
431 |
432 | ```python
433 | for n in nodes_with_switch_op:
434 | n.op = 'Identity'
435 | n.input.pop()
436 | ```
437 |
438 | ### Bypass `Dropout` Nodes
439 |
440 | From various reports online, we need to remove `Dropout` nodes when running inference on mobile devices. I found these two places offering code guidance:
441 |
442 | * https://dato.ml/drop-dropout-from-frozen-model/
443 | * http://stackoverflow.com/questions/40358892/wipe-out-dropout-operations-from-tensorflow-graph
444 |
445 | In addition, there is a ticket opened for Tensorflow to automatically do this when running an optimize script: https://github.com/tensorflow/tensorflow/issues/5867
446 |
447 |
448 | ```python
449 | [(n.name, [i for i in n.input]) for n in gd.node if n.name.lower().find('dropout') != -1]
450 | ```
451 |
452 |
453 |
454 |
455 | [(u'cond_94/dropout/keep_prob', [u'^cond_94/switch_t']),
456 | (u'cond_94/dropout/Shape', [u'cond_94/mul']),
457 | (u'cond_94/dropout/random_uniform/min', [u'^cond_94/switch_t']),
458 | (u'cond_94/dropout/random_uniform/max', [u'^cond_94/switch_t']),
459 | (u'cond_94/dropout/random_uniform/RandomUniform', [u'cond_94/dropout/Shape']),
460 | (u'cond_94/dropout/random_uniform/sub',
461 | [u'cond_94/dropout/random_uniform/max',
462 | u'cond_94/dropout/random_uniform/min']),
463 | (u'cond_94/dropout/random_uniform/mul',
464 | [u'cond_94/dropout/random_uniform/RandomUniform',
465 | u'cond_94/dropout/random_uniform/sub']),
466 | (u'cond_94/dropout/random_uniform',
467 | [u'cond_94/dropout/random_uniform/mul',
468 | u'cond_94/dropout/random_uniform/min']),
469 | (u'cond_94/dropout/add',
470 | [u'cond_94/dropout/keep_prob', u'cond_94/dropout/random_uniform']),
471 | (u'cond_94/dropout/Floor', [u'cond_94/dropout/add']),
472 | (u'cond_94/dropout/div', [u'cond_94/mul', u'cond_94/dropout/keep_prob']),
473 | (u'cond_94/dropout/mul', [u'cond_94/dropout/div', u'cond_94/dropout/Floor']),
474 | (u'gradients/cond_94/dropout/mul_grad/Shape', [u'cond_94/dropout/div']),
475 | (u'gradients/cond_94/dropout/mul_grad/Shape_1', [u'cond_94/dropout/Floor']),
476 | (u'gradients/cond_94/dropout/mul_grad/BroadcastGradientArgs',
477 | [u'gradients/cond_94/dropout/mul_grad/Shape',
478 | u'gradients/cond_94/dropout/mul_grad/Shape_1']),
479 | (u'gradients/cond_94/dropout/mul_grad/mul',
480 | [u'gradients/cond_94/Merge_grad/cond_grad', u'cond_94/dropout/Floor']),
481 | (u'gradients/cond_94/dropout/mul_grad/Sum',
482 | [u'gradients/cond_94/dropout/mul_grad/mul',
483 | u'gradients/cond_94/dropout/mul_grad/BroadcastGradientArgs']),
484 | (u'gradients/cond_94/dropout/mul_grad/Reshape',
485 | [u'gradients/cond_94/dropout/mul_grad/Sum',
486 | u'gradients/cond_94/dropout/mul_grad/Shape']),
487 | (u'gradients/cond_94/dropout/mul_grad/mul_1',
488 | [u'cond_94/dropout/div', u'gradients/cond_94/Merge_grad/cond_grad']),
489 | (u'gradients/cond_94/dropout/mul_grad/Sum_1',
490 | [u'gradients/cond_94/dropout/mul_grad/mul_1',
491 | u'gradients/cond_94/dropout/mul_grad/BroadcastGradientArgs']),
492 | (u'gradients/cond_94/dropout/mul_grad/Reshape_1',
493 | [u'gradients/cond_94/dropout/mul_grad/Sum_1',
494 | u'gradients/cond_94/dropout/mul_grad/Shape_1']),
495 | (u'gradients/cond_94/dropout/div_grad/Shape', [u'cond_94/mul']),
496 | (u'gradients/cond_94/dropout/div_grad/Shape_1', []),
497 | (u'gradients/cond_94/dropout/div_grad/BroadcastGradientArgs',
498 | [u'gradients/cond_94/dropout/div_grad/Shape',
499 | u'gradients/cond_94/dropout/div_grad/Shape_1']),
500 | (u'gradients/cond_94/dropout/div_grad/RealDiv',
501 | [u'gradients/cond_94/dropout/mul_grad/Reshape',
502 | u'cond_94/dropout/keep_prob']),
503 | (u'gradients/cond_94/dropout/div_grad/Sum',
504 | [u'gradients/cond_94/dropout/div_grad/RealDiv',
505 | u'gradients/cond_94/dropout/div_grad/BroadcastGradientArgs']),
506 | (u'gradients/cond_94/dropout/div_grad/Reshape',
507 | [u'gradients/cond_94/dropout/div_grad/Sum',
508 | u'gradients/cond_94/dropout/div_grad/Shape']),
509 | (u'gradients/cond_94/dropout/div_grad/Neg', [u'cond_94/mul']),
510 | (u'gradients/cond_94/dropout/div_grad/RealDiv_1',
511 | [u'gradients/cond_94/dropout/div_grad/Neg', u'cond_94/dropout/keep_prob']),
512 | (u'gradients/cond_94/dropout/div_grad/RealDiv_2',
513 | [u'gradients/cond_94/dropout/div_grad/RealDiv_1',
514 | u'cond_94/dropout/keep_prob']),
515 | (u'gradients/cond_94/dropout/div_grad/mul',
516 | [u'gradients/cond_94/dropout/mul_grad/Reshape',
517 | u'gradients/cond_94/dropout/div_grad/RealDiv_2']),
518 | (u'gradients/cond_94/dropout/div_grad/Sum_1',
519 | [u'gradients/cond_94/dropout/div_grad/mul',
520 | u'gradients/cond_94/dropout/div_grad/BroadcastGradientArgs']),
521 | (u'gradients/cond_94/dropout/div_grad/Reshape_1',
522 | [u'gradients/cond_94/dropout/div_grad/Sum_1',
523 | u'gradients/cond_94/dropout/div_grad/Shape_1'])]
524 |
525 |
526 |
527 |
528 | ```python
529 | for n in gd.node:
530 | if 'cond_94/dropout/mul' in [i for i in n.input]:
531 | print n
532 | ```
533 |
534 | name: "cond_94/Merge"
535 | op: "Merge"
536 | input: "cond_94/Switch_1"
537 | input: "cond_94/dropout/mul"
538 | attr {
539 | key: "N"
540 | value {
541 | i: 2
542 | }
543 | }
544 | attr {
545 | key: "T"
546 | value {
547 | type: DT_FLOAT
548 | }
549 | }
550 |
551 |
552 |
553 |
554 | ```python
555 | nodes['cond_94/mul']
556 | ```
557 |
558 |
559 |
560 |
561 | name: "cond_94/mul"
562 | op: "Mul"
563 | input: "cond_94/mul/Switch"
564 | input: "cond_94/mul/y"
565 | attr {
566 | key: "T"
567 | value {
568 | type: DT_FLOAT
569 | }
570 | }
571 |
572 |
573 |
574 |
575 | ```python
576 | nodes['cond_94/dropout/mul']
577 | ```
578 |
579 |
580 |
581 |
582 | name: "cond_94/dropout/mul"
583 | op: "Mul"
584 | input: "cond_94/dropout/div"
585 | input: "cond_94/dropout/Floor"
586 | attr {
587 | key: "T"
588 | value {
589 | type: DT_FLOAT
590 | }
591 | }
592 |
593 |
594 |
595 |
596 | ```python
597 | nodes['cond_94/dropout/mul'].input.pop()
598 | nodes['cond_94/dropout/mul'].input.append('cond_94/mul')
599 | ```
600 |
601 |
602 | ```python
603 | nodes['cond_94/dropout/mul']
604 | ```
605 |
606 |
607 |
608 |
609 | name: "cond_94/dropout/mul"
610 | op: "Mul"
611 | input: "cond_94/dropout/div"
612 | input: "cond_94/mul"
613 | attr {
614 | key: "T"
615 | value {
616 | type: DT_FLOAT
617 | }
618 | }
619 |
620 |
621 |
622 | ### Replace `keras_learning_phase` `Placeholder` with `Const` value
623 |
624 | We don't need `keras_learning_phase` to be a `Placeholder`, as it should be set to a constant value of `False`, for Test mode. We can set it to a `Const` op, then set it's value to a 1-dimensional tensor containing False. The `shape` attribute is not valid in a `Const` op, so we just delete it:
625 |
626 |
627 | ```python
628 | nodes['keras_learning_phase']
629 | ```
630 |
631 |
632 |
633 |
634 | name: "keras_learning_phase"
635 | op: "Placeholder"
636 | attr {
637 | key: "dtype"
638 | value {
639 | type: DT_BOOL
640 | }
641 | }
642 | attr {
643 | key: "shape"
644 | value {
645 | shape {
646 | }
647 | }
648 | }
649 |
650 |
651 |
652 |
653 | ```python
654 | nodes['keras_learning_phase'].op = 'Const'
655 | ```
656 |
657 |
658 | ```python
659 | from tensorflow.core.framework import attr_value_pb2
660 | from tensorflow.core.framework import graph_pb2
661 | from tensorflow.python.framework import dtypes
662 | from tensorflow.python.framework import tensor_util
663 |
664 | nodes['keras_learning_phase'].attr.get_or_create('value').CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
665 | [False], dtypes.bool, [1])))
666 | ```
667 |
668 |
669 | ```python
670 | del nodes['keras_learning_phase'].attr['shape']
671 | ```
672 |
673 |
674 | ```python
675 | nodes['keras_learning_phase']
676 | ```
677 |
678 |
679 |
680 |
681 | name: "keras_learning_phase"
682 | op: "Const"
683 | attr {
684 | key: "dtype"
685 | value {
686 | type: DT_BOOL
687 | }
688 | }
689 | attr {
690 | key: "value"
691 | value {
692 | tensor {
693 | dtype: DT_BOOL
694 | tensor_shape {
695 | dim {
696 | size: 1
697 | }
698 | }
699 | bool_val: false
700 | }
701 | }
702 | }
703 |
704 |
705 |
706 | ### Baking Weight Constants into GraphDef and Writing to Disk
707 |
708 | We want our output .pb file to be self contained, with both the computation graph and all the trained weights. To do this, we simply need to `convert_variables_to_constants` assuming we want to run the computation all the way up to the `Softmax` output, which will give us the 101 class probabilities.
709 |
710 |
711 | ```python
712 | from tensorflow.python.framework import graph_util
713 |
714 | output_graph_def = graph_util.convert_variables_to_constants(sess, gd, ['Softmax'])
715 | ```
716 |
717 | INFO:tensorflow:Froze 566 variables.
718 | Converted 566 variables to const ops.
719 |
720 |
721 |
722 | ```python
723 | with tf.gfile.GFile('graph.pb', "wb") as f:
724 | f.write(output_graph_def.SerializeToString())
725 | ```
726 |
727 | ### Testing the Modified Model
728 |
729 |
730 | ```python
731 | K.clear_session()
732 | sess = tf.Session()
733 | K.set_session(sess)
734 | ```
735 |
736 |
737 | ```python
738 | from tensorflow.core.framework import graph_pb2
739 | with open('graph.pb', "rb") as f:
740 | output_graph_def = graph_pb2.GraphDef()
741 | output_graph_def.ParseFromString(f.read())
742 | _ = tf.import_graph_def(output_graph_def, name="")
743 | ```
744 |
745 |
746 | ```python
747 | x = sess.graph.get_tensor_by_name('input_1:0')
748 | ```
749 |
750 |
751 | ```python
752 | y = sess.graph.get_tensor_by_name('Softmax:0')
753 | ```
754 |
755 |
756 | ```python
757 | new_scores = sess.run(y, feed_dict={x: imgs})
758 | ```
759 |
760 |
761 | ```python
762 | abs((orig_scores - new_scores)) < 1e-6
763 | ```
764 |
765 |
766 |
767 |
768 | array([[ True, True, True, True, True, True, True, True, True,
769 | True, True, True, True, True, True, True, True, True,
770 | True, True, True, True, True, True, True, True, True,
771 | True, True, False, True, True, True, True, True, True,
772 | True, True, True, True, True, True, True, True, True,
773 | True, True, True, True, True, True, True, True, True,
774 | True, True, True, True, True, True, True, True, True,
775 | True, True, True, True, True, True, True, True, True,
776 | True, True, True, True, True, True, True, True, True,
777 | True, True, True, True, True, False, True, True, True,
778 | True, True, True, True, True, False, True, True, True,
779 | True, True]], dtype=bool)
780 |
781 |
782 |
783 | The scores have changed due to our modifications to the dropout node, but not by much. We also get the same predicted label with a similar confidence level:
784 |
785 |
786 | ```python
787 | find_top_pred(orig_scores)
788 | find_top_pred(new_scores)
789 | ```
790 |
791 | Label: 95, Confidence: 0.998208284378
792 | Label: 95, Confidence: 0.999539613724
793 |
794 |
795 | ### Optimize the Model for Inference
796 |
797 | I originally intended to follow Pete Warden's Tutorial: [Tensorflow for Mobile Poets](https://petewarden.com/2016/09/27/tensorflow-for-mobile-poets/). In that tutorial, he takes an InceptionV3 network and runs it through some optimizations to reduce the number of operations, decrease the resolutions of the weights, and overall make the network smaller and faster.
798 |
799 | Then, I discovered the Tensorflow [Graph Transform Tool](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md), which Pete Warden recommends in a [more recent post](https://petewarden.com/2016/12/30/rewriting-tensorflow-graphs-with-the-gtt/).
800 |
801 | First you need to:
802 | * Do a local build of Tensorflow: https://www.tensorflow.org/install/install_sources, making sure you install Bazel correctly and have executed ./configure
803 | * Build the following tools from within your tensorflow folder:
804 |
805 | ```
806 | bazel build tensorflow/tools/graph_transforms:transform_graph
807 | bazel build tensorflow/tools/graph_transforms:summarize_graph
808 | bazel build tensorflow/contrib/util:convert_graphdef_memmapped_format
809 | ```
810 |
811 | Now, let's try the `summarize_graph` utility on the .pb file that we have just exported:
812 |
813 | ```
814 | (tensorflow) ➜ model_export git:(master) ✗ ../../tensorflow/bazel-bin/tensorflow/tools/graph_transforms/summarize_graph --in_graph=graph.pb
815 |
816 | Found 1 possible inputs: (name=input_1, type=float(1), shape=[])
817 | No variables spotted.
818 | Found 1 possible outputs: (name=Softmax, op=Softmax)
819 | Found 21820820 (21.82M) const parameters, 0 (0) variable parameters, and 190 control_edges
820 | Op types used: 1435 Const, 758 Identity, 754 Mul, 565 Add, 376 Sub, 189 Reshape, 188 Rsqrt, 188 Sum, 95 Shape, 95 Prod, 95 Merge, 94 Mean, 94 Gather, 94 StopGradient, 94 SquaredDifference, 94 Conv2D, 94 Square, 94 Reciprocal, 94 Relu, 94 Cast, 15 ConcatV2, 11 AvgPool, 3 MaxPool, 1 Softmax, 1 RealDiv, 1 Placeholder, 1 Pack, 1 StridedSlice, 1 MatMul
821 | To use with tensorflow/tools/benchmark:benchmark_model try these arguments:
822 | bazel run tensorflow/tools/benchmark:benchmark_model -- --graph=graph.pb --show_flops --logtostderr --input_layer=input_1 --input_layer_type=float --input_layer_shape= --output_layer=Softmax
823 | ```
824 |
825 |
826 | ```python
827 | !ls -altr graph.pb
828 | ```
829 |
830 | -rw-rw-r-- 1 stratospark stratospark 87833459 Mar 22 00:24 graph.pb
831 |
832 |
833 | Then we can optimize the graph for deployment. Notice that we are rounding the weights so that the file can compress better when added to the device bundle.
834 |
835 | ```
836 | (tensorflow) ➜ model_export git:(master) ✗ ../../tensorflow/bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
837 | --in_graph=graph.pb \
838 | --out_graph=opt_graph.pb \
839 | --inputs='input_1' \
840 | --outputs='Softmax' \
841 | --transforms='strip_unused_nodes(type=float, shape="1,299,299,3") remove_nodes(op=Identity, op=CheckNumerics) round_weights(num_steps=256) fold_constants(ignore_errors=true) fold_batch_norms fold_old_batch_norms'
842 |
843 | 2017-03-22 00:35:27.886563: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying strip_unused_nodes
844 | 2017-03-22 00:35:28.048049: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying remove_nodes
845 | 2017-03-22 00:35:28.709523: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying round_weights
846 | 2017-03-22 00:35:29.032210: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying fold_constants
847 | 2017-03-22 00:35:29.064884: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
848 | 2017-03-22 00:35:29.064910: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
849 | 2017-03-22 00:35:29.064914: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
850 | 2017-03-22 00:35:29.064917: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.
851 | 2017-03-22 00:35:29.064919: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.
852 | 2017-03-22 00:35:29.544610: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying fold_batch_norms
853 | 2017-03-22 00:35:29.655708: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying fold_old_batch_norms
854 | ```
855 |
856 |
857 |
858 | ```python
859 | !ls -altr opt_graph.pb
860 | ```
861 |
862 | -rw-rw-r-- 1 stratospark stratospark 87683561 Mar 22 00:35 opt_graph.pb
863 |
864 |
865 | Next, we can shrink the graph down by quantizing the weights. ** Warning, this does not work on the iOS device! **
866 |
867 | ```
868 | (tensorflow) ➜ model_export git:(master) ✗ ../../tensorflow/bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
869 | --in_graph=opt_graph.pb \
870 | --out_graph=shrink_graph.pb \
871 | --inputs='input_1' \
872 | --outputs='Softmax' \
873 | --transforms='quantize_weights strip_unused_nodes'
874 |
875 | 2017-03-22 00:39:51.366052: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying quantize_weights
876 | 2017-03-22 00:39:51.913481: I tensorflow/tools/graph_transforms/transform_graph.cc:257] Applying strip_unused_nodes
877 | ```
878 |
879 |
880 | ```python
881 | !ls -altr shrink_graph.pb
882 | ```
883 |
884 | -rw-rw-r-- 1 stratospark stratospark 22445591 Mar 22 00:39 shrink_graph.pb
885 |
886 |
887 | Finally, we can create a memory-mapped model, as described in [Tensorflow for Mobile Poets](https://petewarden.com/2016/09/27/tensorflow-for-mobile-poets/)
888 |
889 | ```
890 | (tensorflow) ➜ model_export git:(master) ✗ ../../tensorflow/bazel-bin/tensorflow/contrib/util/convert_graphdef_memmapped_format --in_graph=opt_graph.pb --out_graph=mem_graph.pb
891 |
892 | 2017-03-22 00:40:32.066048: I tensorflow/contrib/util/convert_graphdef_memmapped_format_lib.cc:168] Converted 94 nodes
893 | ```
894 |
895 |
896 | ```python
897 | !ls -altr *graph*
898 | ```
899 |
900 | -rw-rw-r-- 1 stratospark stratospark 87833459 Mar 22 00:24 graph.pb
901 | -rw-rw-r-- 1 stratospark stratospark 87683561 Mar 22 00:35 opt_graph.pb
902 | -rw-rw-r-- 1 stratospark stratospark 22445591 Mar 22 00:39 shrink_graph.pb
903 | -rw-rw-r-- 1 stratospark stratospark 87693388 Mar 22 00:40 mem_graph.pb
904 |
905 |
906 | ### Testing Optimized Model
907 |
908 |
909 | ```python
910 | def test_model(model_file):
911 | K.clear_session()
912 | sess = tf.Session()
913 | K.set_session(sess)
914 |
915 | with open(model_file, "rb") as f:
916 | output_graph_def = graph_pb2.GraphDef()
917 | output_graph_def.ParseFromString(f.read())
918 | _ = tf.import_graph_def(output_graph_def, name="")
919 |
920 | x = sess.graph.get_tensor_by_name('input_1:0')
921 | y = sess.graph.get_tensor_by_name('Softmax:0')
922 |
923 | new_scores = sess.run(y, feed_dict={x: imgs})
924 | print((orig_scores - new_scores) < 1e-6)
925 | find_top_pred(orig_scores)
926 | find_top_pred(new_scores)
927 |
928 | test_model('graph.pb')
929 | ```
930 |
931 | [[ True True True True True True True True True True True True
932 | True True True True True True True True True True True True
933 | True True True True True False True True True True True True
934 | True True True True True True True True True True True True
935 | True True True True True True True True True True True True
936 | True True True True True True True True True True True True
937 | True True True True True True True True True True True True
938 | True True True True True True True True True True True True
939 | True True True True True]]
940 | Label: 95, Confidence: 0.998208284378
941 | Label: 95, Confidence: 0.99955624342
942 |
943 |
944 |
945 | ```python
946 | test_model('opt_graph.pb')
947 | ```
948 |
949 | [[ True True True True True True True True True True True True
950 | True True True True True True True True True True True True
951 | True True True True True False True True True True True True
952 | True True True True True True True True True True True True
953 | True True True True True True True True True True True True
954 | True True True True True True True True True True True True
955 | True True True True True True True True True True True True
956 | True True True True True True True True True True True True
957 | True True True True True]]
958 | Label: 95, Confidence: 0.998208284378
959 | Label: 95, Confidence: 0.999923110008
960 |
961 |
962 |
963 | ```python
964 | test_model('mem_graph.pb')
965 | ```
966 |
967 |
968 | ---------------------------------------------------------------------------
969 |
970 | DecodeError Traceback (most recent call last)
971 |
972 | in ()
973 | ----> 1 test_model('mem_graph.pb')
974 |
975 |
976 | in test_model(model_file)
977 | 6 with open(model_file, "rb") as f:
978 | 7 output_graph_def = graph_pb2.GraphDef()
979 | ----> 8 output_graph_def.ParseFromString(f.read())
980 | 9 _ = tf.import_graph_def(output_graph_def, name="")
981 | 10
982 |
983 |
984 | DecodeError: Error parsing message
985 |
986 |
987 | **TODO**: How to test the quantized and memory mapped models with non-mobile Tensorflow?
988 |
989 | ### Adding Model to Example Tensorflow / iOS Camera App
990 |
991 | I used this [example app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/ios_examples/camera) as a base. Here are the main changes:
992 |
993 | * Add `mem_graph.pb` file that to the `camera/data` folder. If you didn't buitl it in the previous section, download it here: https://s3.amazonaws.com/stratospark/food-101/mem_graph.pb
994 | * Replace the `model_file_name` variable in `CameraExampleViewController.mm` with `mem_graph`
995 | * Replace labels file with food labels.
996 | * Change input height and width to 299x299, as per InceptionV3 requirements.
997 | * Change `out_pixel[c]` to do InceptionV3 preprocessing: `out_pixel[c] = ((in_pixel[c] / 255.0) - 0.5) * 2.0;`
998 | * Tweak `decayValue, updateValue, minimumThreshold` in `setPredictionValues` method to get a better user experience.
999 | * Be sure to go to `Build Settings` and update `Other Linker Flags`, `Header Search Paths`, and `Library Search Paths` to point to your local build of Tensorflow. This project folder is a sibling of my Tensorflow folder.
1000 |
--------------------------------------------------------------------------------