├── .eslintrc.js
├── .gitignore
├── .markdownlint.json
├── .travis.yml
├── CONTRIBUTING.md
├── LICENSE
├── MAINTAINERS.md
├── README-ja.md
├── README.md
├── app.js
├── css
└── index.css
├── data
├── ATTRIBUTIONS.md
├── coke_bottles.zip
├── coke_bottles_exported.zip
├── coke_bottles_set2.zip
└── test_set.zip
├── doc
└── source
│ └── images
│ ├── Locate-and-count-items-in-an-image-with-object-detection-flow-arch-10.png
│ ├── architecture.png
│ ├── build_model.png
│ ├── create_new_dataset.png
│ ├── model_details.png
│ ├── object_detection_app.png
│ ├── save_labels.png
│ └── test_ui.png
├── env.sample
├── favicon16.png
├── favicon32.png
├── favicon64.png
├── img
└── camera.png
├── index.html
├── js
└── index.js
├── manifest.yml
├── package-lock.json
├── package.json
├── service-worker.js
└── test
└── unit
└── test.index.js
/.eslintrc.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | "env": {
3 | "node": true,
4 | "mocha": true,
5 | },
6 | "plugins": ["node"],
7 | "extends": [
8 | "eslint:recommended",
9 | "google",
10 | "plugin:node/recommended",
11 | ],
12 | "rules": {
13 | "no-console": 0,
14 | "node/no-unpublished-require": ["error", {"allowModules": ["chai", "sinon", "jsdom-global"]}],
15 | }
16 | };
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 |
8 | # Runtime data
9 | pids
10 | *.pid
11 | *.seed
12 | *.pid.lock
13 |
14 | # Directory for instrumented libs generated by jscoverage/JSCover
15 | lib-cov
16 |
17 | # Coverage directory used by tools like istanbul
18 | coverage
19 |
20 | # nyc test coverage
21 | .nyc_output
22 |
23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
24 | .grunt
25 |
26 | # Bower dependency directory (https://bower.io/)
27 | bower_components
28 |
29 | # node-waf configuration
30 | .lock-wscript
31 |
32 | # Compiled binary addons (http://nodejs.org/api/addons.html)
33 | build/Release
34 |
35 | # Dependency directories
36 | node_modules/
37 | jspm_packages/
38 |
39 | # Typescript v1 declaration files
40 | typings/
41 |
42 | # Optional npm cache directory
43 | .npm
44 |
45 | # Optional eslint cache
46 | .eslintcache
47 |
48 | # Optional REPL history
49 | .node_repl_history
50 |
51 | # Output of 'npm pack'
52 | *.tgz
53 |
54 | # Yarn Integrity file
55 | .yarn-integrity
56 |
57 | # dotenv environment variables file
58 | .env
59 |
60 | # VS Code history plugin
61 | .history/
62 |
63 | # IDEA project settings
64 | .idea/
65 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "_docs_for_this_file": "https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md",
3 | "line-length": false,
4 | "first-line-h1": true,
5 | "first-header-h1": {
6 | "level": 1
7 | },
8 | "header-style": {
9 | "style": "atx"
10 | },
11 | "ul-style": {
12 | "style": "asterisk"
13 | },
14 | "hr-style": {
15 | "style": "---"
16 | },
17 | "fenced-code-language": true,
18 | "required-headers": {
19 | "headers": [
20 | "*",
21 | "## Flow",
22 | "## Watch the Video",
23 | "*",
24 | "## Steps",
25 | "*",
26 | "## License"
27 | ]
28 | },
29 | "spelling": false,
30 | "no-alt-text": true,
31 | "no-blanks-blockquote": false,
32 | "html": {
33 | "allowed_elements": [
34 | "kbd"
35 | ]
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | dist: trusty
3 | node_js:
4 | - "node"
5 | - "lts/*"
6 | script:
7 | - npm run test
8 | cache:
9 | directories:
10 | - node_modules
11 | env:
12 | global:
13 | - POWERAI_VISION_WEB_API_URL=:/test/AIVision/api/dlapis/fake
14 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | This is an open source project, and we appreciate your help!
4 |
5 | We use the GitHub issue tracker to discuss new features and non-trivial bugs.
6 |
7 | In addition to the issue tracker, [#journeys on
8 | Slack](https://dwopen.slack.com) is the best way to get into contact with the
9 | project's maintainers.
10 |
11 | To contribute code, documentation, or tests, please submit a pull request to
12 | the GitHub repository. Generally, we expect two maintainers to review your pull
13 | request before it is approved for merging. For more details, see the
14 | [MAINTAINERS](MAINTAINERS.md) page.
15 |
16 | Contributions are subject to the [Developer Certificate of Origin, Version 1.1](https://developercertificate.org/) and the [Apache License, Version 2](http://www.apache.org/licenses/LICENSE-2.0.txt).
17 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # Maintainers Guide
2 |
3 | This guide is intended for maintainers - anybody with commit access to one or
4 | more Code Pattern repositories.
5 |
6 | ## Methodology
7 |
8 | This repository does not have a traditional release management cycle, but
9 | should instead be maintained as a useful, working, and polished reference at
10 | all times. While all work can therefore be focused on the master branch, the
11 | quality of this branch should never be compromised.
12 |
13 | The remainder of this document details how to merge pull requests to the
14 | repositories.
15 |
16 | ## Merge approval
17 |
18 | The project maintainers use LGTM (Looks Good To Me) in comments on the pull
19 | request to indicate acceptance prior to merging. A change requires LGTMs from
20 | two project maintainers. If the code is written by a maintainer, the change
21 | only requires one additional LGTM.
22 |
23 | ## Reviewing Pull Requests
24 |
25 | We recommend reviewing pull requests directly within GitHub. This allows a
26 | public commentary on changes, providing transparency for all users. When
27 | providing feedback be civil, courteous, and kind. Disagreement is fine, so long
28 | as the discourse is carried out politely. If we see a record of uncivil or
29 | abusive comments, we will revoke your commit privileges and invite you to leave
30 | the project.
31 |
32 | During your review, consider the following points:
33 |
34 | ### Does the change have positive impact?
35 |
36 | Some proposed changes may not represent a positive impact to the project. Ask
37 | whether or not the change will make understanding the code easier, or if it
38 | could simply be a personal preference on the part of the author (see
39 | [bikeshedding](https://en.wiktionary.org/wiki/bikeshedding)).
40 |
41 | Pull requests that do not have a clear positive impact should be closed without
42 | merging.
43 |
44 | ### Do the changes make sense?
45 |
46 | If you do not understand what the changes are or what they accomplish, ask the
47 | author for clarification. Ask the author to add comments and/or clarify test
48 | case names to make the intentions clear.
49 |
50 | At times, such clarification will reveal that the author may not be using the
51 | code correctly, or is unaware of features that accommodate their needs. If you
52 | feel this is the case, work up a code sample that would address the pull
53 | request for them, and feel free to close the pull request once they confirm.
54 |
55 | ### Does the change introduce a new feature?
56 |
57 | For any given pull request, ask yourself "is this a new feature?" If so, does
58 | the pull request (or associated issue) contain narrative indicating the need
59 | for the feature? If not, ask them to provide that information.
60 |
61 | Are new unit tests in place that test all new behaviors introduced? If not, do
62 | not merge the feature until they are! Is documentation in place for the new
63 | feature? (See the documentation guidelines). If not do not merge the feature
64 | until it is! Is the feature necessary for general use cases? Try and keep the
65 | scope of any given component narrow. If a proposed feature does not fit that
66 | scope, recommend to the user that they maintain the feature on their own, and
67 | close the request. You may also recommend that they see if the feature gains
68 | traction among other users, and suggest they re-submit when they can show such
69 | support.
70 |
--------------------------------------------------------------------------------
/README-ja.md:
--------------------------------------------------------------------------------
1 | *他の言語で読む: [English](README.md).*
2 |
3 | # PowerAI Vision によるオブジェクト検出
4 |
5 | このコードパターンでは、PowerAI Vision Object Detection を使用して、カスタマイズされたトレーニングに基づいて、イメージ内のオブジェクトを検出してラベル付けします。
6 |
7 | > この例は、独自のデータセットで容易にカスタマイズできます。
8 |
9 | サンプル用のデータセットとして、コカ・コーラの瓶の画像を使用します。
10 | モデルをトレーニングして展開すると、画像内のコーラ瓶を見つけてカウントできる REST エンドポイントが作成できます。
11 |
12 | ディープラーニング (深層学習) トレーニングを使用して、オブジェクト検出のモデルを作成します。
13 | PowerAI Vision では、数回のマウスクリックだけで深層学習を実施できます。
14 | タスクが完了すると、もう一度クリックしてモデルをデプロイできます。
15 |
16 | PowerAI Vision は、推論操作のための REST API を提供しています。
17 | カスタムモデルによるオブジェクト検出は、任意の REST クライアントから使用でき、PowerAI Vision UI でもテストできます。
18 |
19 | このコード・パターンをひと通り完了すると、以下の方法がわかるようになります:
20 |
21 | * PowerAI Vision を使用してオブジェクトを検出する対象のデータセットを作成する
22 | * データセットに基づいてモデルをトレーニングし、デプロイする
23 | * REST 呼び出しを使用してモデルをテストする
24 |
25 | 
26 |
27 | ## Flow
28 |
29 | 1. 画像をアップロードして PowerAI Vision データセットを作成します。
30 | 2. トレーニングを実行する前に、画像データセット内のオブジェクトにラベルを付けます。
31 | 3. PowerAI Vision 内でモデルをトレーニング、デプロイ、テストします。
32 | 4. REST クライアントを使用して、画像内のオブジェクトを検出します。
33 |
34 | ## 含まれるコンポーネント
35 |
36 | * [IBM Power Systems](https://www-03.ibm.com/systems/power/): オープンテクノロジーで構築され、ミッションクリティカルなアプリケーション用に設計されたサーバー。
37 | * [IBM PowerAI](https://www.ibm.com/ms-en/marketplace/deep-learning-platform): 深層学習、機械学習、AIをよりアクセスしやすくし、パフォーマンスを改善するソフトウェアプラットフォーム。
38 | * [IBM PowerAI Vision Technology Preview](https://developer.ibm.com/linuxonpower/deep-learning-powerai/technology-previews/powerai-vision/): コンピュータビジョンのための深層学習モデルをトレーニングし、デプロイして、データセットにラベルを付けするための一貫したエコシステム。
39 |
40 | ## 利用した技術
41 |
42 | * [Artificial Intelligence](https://medium.com/ibm-data-science-experience): 人工知能を分散したソリューション空間に適用して、破壊的技術(新しい価値基準の下で従来よりも優れた特長を持つ新技術)を提供します。
43 | * [Node.js](https://nodejs.org/): サーバー側でJavaScriptコードを実行するためのオープンソースのJavaScriptランタイム環境。
44 |
45 | # ビデオを観る
46 |
47 | [](https://www.youtube.com/watch?v=xoLcXQs4SP4)
48 |
49 | # 前提条件
50 |
51 | *このコードパターンは、PowerAI Vision Technology Preview v3.0 でビルドされました。*
52 |
53 | * `SuperVessel` クラウドを使用してプレビューを試すには、[こちら](https://ny1.ptopenlab.com/AIVision) でログインまたは登録してください。
54 |
55 | * Power Systems を所有しており、インストーラをダウンロードして自分のシステムにプレビューをデプロイする場合は、[こちら](https://www-01.ibm.com/marketing/iwm/iwm/web/preLogin.do?source=mrs-eibmpair) で登録します。
56 |
57 | > ノート: この README の手順と例は、SuperVessel の使用を想定しています。たとえば、幾つかの URL は ny1.ptopenlab.com を使用しています。
58 |
59 | # 手順
60 |
61 | 1. [GitHub リポジトリーを複製する](#1-clone-the-repo)
62 | 2. [PowerAI Vision にログインする](#2-login-to-powerai-vision)
63 | 3. [新規データセットを作成する](#3-create-a-dataset)
64 | 4. [タグを作成しオブジェクトにラベルを付ける](#4-create-tags-and-label-objects)
65 | 5. [深層学習 (DL) タスクを作成する](#5-create-a-dl-task)
66 | 6. [モデルをデプロイしてテストする](#6-deploy-and-test)
67 | 7. [アプリを実行する](#7-run-the-app)
68 |
69 |
70 | ### 1. GitHub リポジトリーを複製する
71 |
72 | `powerai-vision-object-detection` をローカル環境にクローンします。ターミナルで次のコマンドを実行:
73 |
74 | ```
75 | git clone https://github.com/IBM/powerai-vision-object-detection
76 | ```
77 |
78 |
79 | ### 2. PowerAI Vision にログインする
80 |
81 | SuperVessel を使用している場合は、こちらでログインします: https://ny1.ptopenlab.com/AIVision/index.html
82 |
83 |
84 | ### 3. 新規データセットを作成する
85 |
86 | PowerAI Vision Object Detection は、ユーザーと開発者がカスタマイズしたトレーニングに基づいて、画像内のオブジェクトのインスタンスを数えることができます。そして画像内のオブジェクトを検出してラベル付けします。
87 |
88 | オブジェクト検出トレーニング用の新しいデータセットを作成するには:
89 |
90 | * `My Data Sets` ビューから `Add Dataset` ボタンをクリックし、プルダウンで `For Object Detection` を選択します。
91 |
92 | 
93 |
94 | * データセット名を入力し、`Add Dataset` をクリックします。
95 |
96 |
97 |
98 | * ドラッグ&ドロップや `Select some` を使用して1つ以上の画像をアップロードします。一度に多くをアップロードするため、クローンしたリポジトリから [powerai-vision-object-detection/data/coke_bottles.zip](https://github.com/IBM/powerai-vision-object-detection/raw/master/data/coke_bottles.zip) を使うことができます。
99 |
100 | 
101 |
102 | > ノート: 独自の zip ファイルを使用して、アップロード後にファイルサムネイルが表示されない場合は、そのアップロードは失敗しています。特殊文字やスペースを使用せずに、小文字のファイル名を使用してください。個々のファイルをアップロードしたり、幾つかのファイルを一度に複数選択して、どのファイルがアップロードを失敗させたのかを判断することもできます。
103 |
104 |
105 | ### 4. タグを作成しオブジェクトにラベルを付ける
106 |
107 | * `+` アイコンをクリックして新しいタグを作成します。 各タグは、特定の使用例 (コカコーラ、ダイエットコーラ、コーラゼロなど) に基づいて画像内のトレーニングオブジェクトを表します。
108 |
109 | * タグを選択し、画像内のオブジェクトの周囲にあるバウンディングボックス内にドラッグすると、各画像のオブジェクトにラベルを付けることができます。それぞれの画像ごとに設定して `Save` を押します。
110 |
111 | * すべてのタグとすべての画像に対してこのプロセスを繰り返します。
112 |
113 | > ノート: ラベル付けを既に実施済みの状態でエクスポートした [powerai-vision-object-detection/data/coke_bottles_exported.zip](https://github.com/IBM/powerai-vision-object-detection/raw/master/data/coke_bottles_exported.zip) ファイルをインポートすることにより、上記の設定の手間を省くこともできます。
114 |
115 | 
116 |
117 | > ヒント: `Only Show Unlabeled Files` プルダウンを使用すると、実行を完了したことが判断し易くなります。
118 |
119 | * `Data Augmentation` ボタンを使ってデータセットを拡張することができます。元のイメージに最初にラベルを付け、ミラーイメージ (水平または垂直) がユースケースに合致しているかどうかを確認してください。データ拡張機能を使用すると、新しい拡張データセットが作成されます。
120 |
121 | * あなたの仕事のコピーを保存するには、`Export As Zip File` をクリックしてください。ラベルを付けるのには時間がかかるので、何か問題が発生した場合、この zip ファイルから作業をやり直すことができます。
122 |
123 |
124 | ### 5. 深層学習 (DL) タスクを作成する
125 |
126 | * My Workspace の下にある `My DL Tasks` をクリックし、`Create New Task` ボタンをクリックします。`Object Detection` をクリックします。
127 |
128 | * Object Detector に名前を付け、データセットが選択されていることを確認し、`Build Model` をクリックします。
129 |
130 | 
131 |
132 | * 実行時間の予測を含んだ、確認ダイアログが表示されます。`Create New Task` をクリックして開始します。
133 |
134 |
135 |
136 |
137 | ### 6. モデルをデプロイしてテストする
138 |
139 | * モデルがビルドされたら、`Deploy and Test` をクリックします。
140 |
141 | 
142 |
143 | * PowerAI Vision UI でモデルをテストします。 テスト画像を選択するには、`Select some` を使用します。結果は検出されたオブジェクトの数を示し、バウンディングボックス、ラベル、信頼スコアと共に画像が表示されます。
144 |
145 | 
146 |
147 | * コマンドラインから、画像ファイルと `curl` コマンドを使用して、配備された REST エンドポイントをテストすることができます。出力 JSON は複数の瓶が検出されたことを示し、それぞれの瓶には信頼性、ラベル、および場所が示されています。
148 | > 注意点: この例では便宜上 `--insecure` を使いました。
149 |
150 | ```bash
151 | $ curl --insecure -i -F files=@coke_bottle_23.png https://ny1.ptopenlab.com/AIVision/api/dlapis/9f9d6787-0183-4a1b-be49-751b6ca16724
152 | HTTP/1.1 100 Continue
153 |
154 | HTTP/1.1 200 OK
155 | Server: nginx/1.9.13
156 | Date: Thu, 14 Dec 2017 21:58:26 GMT
157 | Content-Type: application/json
158 | Content-Length: 508
159 | Connection: keep-alive
160 | Access-Control-Allow-Origin: *
161 | Access-Control-Allow-Headers: origin, content-type, accept, authorization
162 | Access-Control-Allow-Credentials: true
163 | Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS, HEAD
164 | Access-Control-Allow-Origin: *
165 |
166 | { "classified" : [ { "confidence" : 0.9986369013786316 , "ymax" : 578 , "label" : "coca-cola" , "xmax" : 755 , "xmin" : 588 , "ymin" : 29} , { "confidence" : 0.9954010248184204 , "ymax" : 592 , "label" : "coca-cola" , "xmax" : 601 , "xmin" : 437 , "ymin" : 10} , { "confidence" : 0.8161203265190125 , "ymax" : 567 , "label" : "coca-cola" , "xmax" : 426 , "xmin" : 259 , "ymin" : 17}] , "imageUrl" : "http://ny1.ptopenlab.com:443/AIVision/temp/5a26dd3b-d8ba-4e01-8b93-5a43f28e97c7.png" , "result" : "success"}
167 | ```
168 |
169 |
170 | ### 7. アプリを実行する
171 |
172 | サンプルの Web アプリケーションは、画像をアップロードし、訓練されデプロイされたモデルを使用し、検出されたオブジェクトを画面上にバウンディングボックスとラベルを表示する方法を示しています。機能は上記のテストと似ていますが、カスタマイズするためのコードが用意されています。
173 |
174 | 次の [IBM Cloud にデプロイする](#deploy-to-ibm-cloud) **もしくは** [ローカル環境で実行する](#run-locally) セクションのどちらかを実行してください。
175 |
176 |
177 | #### IBM Cloud にデプロイする
178 |
179 | [](https://bluemix.net/deploy?repository=https://github.com/IBM/powerai-vision-object-detection)
180 |
181 | 1. 上記の ``Deploy to IBM Cloud`` ボタンを押し、次に ``Deploy`` をクリックします。
182 |
183 | 2. ツールチェーンで、デリバリーパイプラインをクリックして、アプリケーションがデプロイされるのを待ちます。デプロイされた後、`View app` をクリックするとアプリを表示できます。
184 |
185 | 3. IBM Cloud ダッシュボードを使用してアプリを管理します。このアプリの名前は、`powerai-vision-object-detection` に、ユニークな接尾辞を追加したものです。
186 |
187 | 4. PowerAI Vision APIエンドポイントを追加します:
188 | * IBM Cloud ダッシュボードでアプリをクリックします。
189 | * サイドバーの `Runtime` を選択します。
190 | * 中央のボタンバーで `Environment variables` (環境変数) を押します。
191 | * `Add` ボタンを押します。
192 | * 名前 `POWERAI_VISION_WEB_API_URL` を追加し、配備した Web API の値を設定します(上記)。
193 | * `Save` ボタンを押します。アプリは自動的に再起動します。
194 | * アプリを使用するには、`Visit App URL` をクリックします。
195 |
196 |
197 | #### ローカル環境で実行する
198 |
199 | 複製されたリポジトリを使用し、Web アプリケーションをビルドして実行します。
200 |
201 | > ノート: これらの手順は、``Deploy to IBM Cloud`` ボタンを使用する代わりに、ローカル環境で実行する場合にのみ必要です。
202 |
203 | * env.sample を .env にコピーします。ファイルを編集して、さきほどデプロイした Web API を指すように URL を設定します。
204 |
205 | * [Node.js](https://nodejs.org/en/download/) と [npm](https://docs.npmjs.com/getting-started/installing-node) があらかじめインストールされていることを前提として、 次のコマンドを実行します:
206 | ```
207 | cd powerai-vision-object-detection
208 | npm install
209 | npm start
210 | ```
211 |
212 | * ブラウザを使用して Web UI を表示します。デフォルトの URLは http://localhost:8081 です。
213 |
214 | #### Web アプリを使用する
215 |
216 | * `Choose File` ボタンを使ってファイルを選択します。電話では、カメラを使用することもできます。ラップトップでは、画像ファイル (JPGまたはPNG) を選択します。
217 |
218 | * Web API に画像を送信し、結果をレンダリングするには、`Upload File` ボタンを押してください。
219 |
220 | 
221 |
222 | * POWERAI_VISION_WEB_API_URL を構成しなかった場合、または API がデプロイされていない場合は、UIにエラーメッセージが表示されます (SuperVessel では1時間ごとにすばやく再デプロイできます)。
223 |
224 | # リンク
225 |
226 | * [Youtube上のデモ](https://www.youtube.com/watch?v=xoLcXQs4SP4): ビデオを観る
227 | * [Object Detection](https://en.wikipedia.org/wiki/Object_detection): Wikipedia の説明
228 | * [PowerAI Vision](https://developer.ibm.com/linuxonpower/deep-learning-powerai/technology-previews/powerai-vision/): 深層学習と PowerAI 開発
229 | * [TensorFlow Object Detection](https://research.googleblog.com/2017/06/supercharge-your-computer-vision-models.html): TensorFlow オブジェクト検出 API を使用してコンピュータビジョン・モデルを強化する
230 | * [AI Article](https://www.entrepreneur.com/article/283990): 人工知能は人よりも写真を識別できるか?
231 | * [From the developers](https://developer.ibm.com/linuxonpower/2017/08/30/ibm-powerai-vision-speeds-transfer-learning-greater-accuracy-real-world-example/): IBM PowerAI Vision が、より正確な伝達学習を加速する、その実際の例
232 |
233 | # もっと詳しく知る
234 |
235 | * **Artificial Intelligence コードパターン**: このコードパターンを気に入りましたか? [AI Code コードパターン](https://developer.ibm.com/jp/technologies/artificial-intelligence/) から関連パターンを参照してください。
236 | * **AI and Data コードパターン・プレイリスト**: コードパターンに関係するビデオ全ての [プレイリスト](https://www.youtube.com/playlist?list=PLzUbsvIyrNfknNewObx5N7uGZ5FKH0Fde) です。
237 | * **PowerAI**: AIのためのエンタープライズプラットフォーム上で機械学習用のソフトウェアを実行することで、より速く開始またはスケーリングできます: [IBM Power Systems](https://www.ibm.com/ms-en/marketplace/deep-learning-platform)
238 |
239 |
240 | # ライセンス
241 | [Apache 2.0](LICENSE)
242 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # WARNING: This repository is no longer maintained
2 |
3 | > This repository will not be updated. The repository will be kept available in read-only mode.
4 |
5 | # Object detection with Maximo Visual Inspection (Formerly PowerAI Vision)
6 |
7 | > Note: This repo has been updated to use Maximo Visual Inspection(Formerly PowerAI Vision).
8 | **Same product, new name. Previously called PowerAI Vision.**
9 |
10 | In this code pattern, we will use Maximo Visual Inspection
11 | to detect and label objects, within an image, based on customized
12 | training.
13 |
14 | > Note: This example can easily be customized with your own data sets.
15 |
16 | An example data set has been provided with images of Coca-Cola bottles.
17 | Once we train and deploy a model, we'll have a REST endpoint
18 | that allows us locate and count Coke bottles in an image.
19 |
20 | Deep learning training will be used to create a model for
21 | Object Detection. With Maximo Visual Inspection, deep learning training is
22 | as easy as a few clicks of a mouse. Once the task has completed,
23 | the model can be deployed with another click.
24 |
25 | Maximo Visual Inspection presents REST APIs for inference operations.
26 | Object detection with your custom model can be used from any REST
27 | client and can also be tested in the Maximo Visual Inspection UI.
28 |
29 | When the reader has completed this code pattern, they will understand how to:
30 |
31 | * Create a data set for object detection with Maximo Visual Inspection (Formerly PowerAI Vision)
32 | * Train and deploy a model based on the data set
33 | * Test the model via REST calls
34 |
35 | 
36 |
37 | ## Flow
38 |
39 | 1. User uploads images to create a Maximo Visual Inspection data set
40 | 1. User labels objects in the image data set prior to training
41 | 1. The model is trained, deployed and tested in Maximo Visual Inspection web interface
42 | 1. User can detect objects in images using a REST client
43 |
44 | ## Watch the Video
45 |
46 | [](https://www.youtube.com/watch?v=xoLcXQs4SP4)
47 |
48 | ## Prerequisites
49 |
50 | This code pattern requires Maximo Visual Inspection.
51 |
52 | Go [here](https://www.ibm.com/support/pages/ibm-maximo-visual-inspection)
53 | to learn more about trial access (Scroll down to the `Give it a try` section)..
54 |
55 | > Note: *This code pattern was tested with Maximo Visual Inspection 1.3.0*
56 |
57 | ## Steps
58 |
59 | 1. [Clone the repo](#1-clone-the-repo)
60 | 2. [Login to Maximo Visual Inspection](#2-login-to-Maximo-visual-Inspection)
61 | 3. [Create a data set](#3-create-a-data-set)
62 | 4. [Label the objects](#4-label-the-objects)
63 | 5. [Train the model](#5-train-the-model)
64 | 6. [Deploy and test](#6-deploy-and-test)
65 | 7. [Run the app](#7-run-the-app)
66 |
67 | ### 1. Clone the repo
68 |
69 | Clone the `powerai-vision-object-detection` locally. In a terminal, run:
70 |
71 | ```bash
72 | git clone https://github.com/IBM/powerai-vision-object-detection
73 | ```
74 |
75 | ### 2. Login to Maximo Visual Inspection
76 |
77 | Use your browser to access the Maximo Visual Inspection web UI for steps 3-6.
78 |
79 | > Note: The images below show the old PowerAI Vision UI, but the interface is nearly the same except the new product name.
80 |
81 | ### 3. Create a data set
82 |
83 | Maximo Visual Inspection Object Detection discovers and labels objects within an image, enabling users and developers to count instances of objects within an
84 | image based on customized training.
85 |
86 | To create a new data set for object detection training:
87 |
88 | * Use the `Data Sets` tab and click on the `Create new data set` card.
89 |
90 | 
91 |
92 | * Provide a data set name and click `Create`.
93 |
94 | * A new data set card will appear. Click on the new card.
95 |
96 | * Upload one or more images using drag-and-drop or `Import files`. You can use `data/coke_bottles.zip` from your cloned repo to upload many at once.
97 |
98 | ### 4. Label the objects
99 |
100 | * Create new object labels for the data set by clicking `+ Add object` under the `Objects` pulldown in the sidebar. To add multiple object labels, enter one label, click `Add`, then enter the next until you are done and then hit `OK`. For our example data set, add "Coca Cola", "Diet Coke", and "Coke Zero".
101 |
102 | * Label the objects in each image by clicking on the image card and hitting `Label objects`. Then choose `Box` from the bottom left. Select the label to use on the left and then click and drag to draw a bounding box around each object of that type in the image.
103 |
104 | Press `Save` when done with each image.
105 |
106 | * Repeat this process for all labels and all images.
107 |
108 | > Note: You can import [powerai-vision-object-detection/data/coke_bottles_exported.zip](https://github.com/IBM/powerai-vision-object-detection/raw/master/data/coke_bottles_exported.zip) which was already labeled and exported.
109 |
110 | 
111 |
112 | > Tip: Use the `Unlabeled` object filter to help you see when you are done.
113 |
114 | * You can use the `Augment data` button to expand your data set. Label the original images first and be sure to consider whether flipped images (horizontal or vertical) are appropriate for your use case. If you use data augmentation, a new expanded data set will be created for you.
115 |
116 | * Click `Export data set` to save a copy of your work. Now that you've spent some time labeling, this zip will let you start over without losing your work.
117 |
118 | ### 5. Train the model
119 |
120 | * Open your augmented data set and click `Train model` (just use the original if you did not augment).
121 |
122 | * Be sure to select `Object detection` as the `Type of training`.
123 |
124 | * Select a model for speed or accuracy.
125 |
126 | * Take a look at the `Advanced options`. You can keep the defaults, but if you'd like to speed things up, try reducing the `Max iteration`.
127 |
128 | * Click the `Train` button.
129 |
130 | 
131 |
132 | * When the training is done, click `Model details` to see some metrics and graphical description of the model accuracy.
133 |
134 | 
135 |
136 | ### 6. Deploy and test
137 |
138 | * Click `Deploy model` and `Deploy` to make the model available via an API endpoint.
139 |
140 | * `Copy` the API endpoint from your deployed model. Use this to test with `curl` (below) and to set the `POWERAI_VISION_WEB_API_URL` for the web app (in step 7).
141 |
142 | * Test your model in the Maximo Visual Inspection UI. Use `Import` to choose a test image. The result shows you the image with bounding boxes around the detected objects and a table showing the labels and confidence scores.
143 |
144 | 
145 |
146 | * From a command-line, you can test your deployed REST endpoint using an image file and the `curl` command. Notice the output JSON shows multiple bottles were detected and provides the confidence, label and location for each of them.
147 |
148 | * Make sure to unzip the `test_set.zip` file in the `data` directory.
149 |
150 | > Warning: this example used `--insecure` for convenience.
151 |
152 | ```bash
153 | $ cd data/test_set
154 | $ curl --compressed --insecure -i -F files=@coke_bottle_23.png https://host-or-ip-addr/powerai-vision-ny/api/dlapis/e4d6101f-3337-49ae-a6ba-5cb5305b28d9
155 |
156 | My request looked like the following:
157 |
158 | curl --compressed --insecure -i -F files=@coke_bottle_23.png https://vision-p.aus.stglabs.ibm.com/visual-inspection-v130-prod/api/dlapis/6e0a7-d9da-4314-a350-d9a2c0f2b
159 |
160 | HTTP/2 200
161 | server: nginx/1.15.6
162 | date: Tue, 01 Dec 2020 17:22:14 GMT
163 | content-type: application/json
164 | vary: Accept-Encoding
165 | x-powered-by: Servlet/3.1
166 | access-control-allow-origin: *
167 | access-control-allow-headers: X-Auth-Token, origin, content-type, accept, authorization
168 | access-control-allow-credentials: true
169 | access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS, HEAD
170 | content-language: en
171 | x-frame-options: SAMEORIGIN
172 | x-content-type-options: nosniff
173 | x-xss-protection: 1; mode=block
174 | strict-transport-security: max-age=15724800; includeSubDomains
175 | content-encoding: gzip
176 |
177 | {"webAPIId":"6e3480a7-d9da-4314-a350-d9aac22c0f2b","imageUrl":"http://vision-v130-prod-service:9080/vision-v130-prod-api/uploads/temp/6e3480a7-d9da-4314-a350-d9aac22c0f2b/c62cc7dc-dbc2-448d-85e8-41485a2c17f5.png","imageMd5":"ea1f6444fa7dabeda7049d426699879c","classified":[{"label":"Coke","confidence":0.995542585849762,"xmin":601,"ymin":29,"xmax":763,"ymax":546,"attr":[{}]},{"label":"Coke","confidence":0.982393741607666,"xmin":447,"ymin":40,"xmax":593,"ymax":572,"attr":[{}]},{"label":"Coke","confidence":0.8604443669319153,"xmin":67,"ymin":18,"xmax":245,"ymax":604,"attr":[{}]},{"label":"Coke","confidence":0.8339363932609558,"xmin":269,"ymin":32,"xmax":422,"ymax":589,"attr":[{}]}],"result":"success"}
178 | ```
179 |
180 | ### 7. Run the app
181 |
182 | An example web app demonstrates how to upload a picture, use the trained and deployed model, and display the detected objects by drawing bounding boxes and labels on the image. The functionality is similar to the above testing, but the code is provided for you to customize.
183 |
184 | Use the [Deploy to IBM Cloud](#deploy-to-ibm-cloud) button **OR** [Run locally](#run-locally).
185 |
186 | #### Deploy to IBM Cloud
187 |
188 | [](https://cloud.ibm.com/devops/setup/deploy?repository=https://github.com/IBM/powerai-vision-object-detection)
189 |
190 | 1. Press the above `Deploy to IBM Cloud` button, click `Create+` to create an *IBM Cloud API Key* and then click on `Deploy`.
191 |
192 | 1. In Toolchains, click on `Delivery Pipeline` to watch while the app is deployed.
193 |
194 | 1. Use the IBM Cloud dashboard to manage the app. The app is named `powerai-vision-object-detection` with a unique suffix.
195 |
196 | 1. Add your PowerAI Vision API endpoint:
197 | * Click on the app in the IBM Cloud dashboard.
198 | * Select `Runtime` in the sidebar.
199 | * Hit `Environment variables` in the middle button bar.
200 | * Hit the `Add` button.
201 | * Add the name `POWERAI_VISION_WEB_API_URL` and set the value to the web API that you deployed (above).
202 | * Hit the `Save` button. The app will restart automatically.
203 | * Click on `Visit App URL` to use the app.
204 |
205 | #### Run locally
206 |
207 | Use your cloned repo to build and run the web app.
208 |
209 | > Note: These steps are only needed when running locally instead of using the ``Deploy to IBM Cloud`` button.
210 |
211 | * Copy the env.sample to .env. Edit the file to set the URL to point to the web API that you deployed (above).
212 |
213 | * Assuming you have pre-installed [Node.js](https://nodejs.org/en/download/) and [npm](https://docs.npmjs.com/getting-started/installing-node), run the following commands:
214 |
215 | ```bash
216 | cd powerai-vision-object-detection
217 | npm install
218 | npm start
219 | ```
220 |
221 | * Use a browser to go to the web UI. The default URL is `http://localhost:8081`.
222 |
223 | #### Use the web app
224 |
225 | * Use the `Choose File` button to choose a file. On a phone this should give you an option to use your camera. On a laptop, you choose an image file (JPG or PNG).
226 |
227 | * Press the `Upload File` button to send the image to your web API and render the results.
228 |
229 | 
230 |
231 | * The UI will show an error message, if you did not configure your POWERAI_VISION_WEB_API_URL or if your API is not deployed (in SuperVessel you can quickly redeploy every hour).
232 |
233 | ## Links
234 |
235 | * [Maximo Visual Inspection Learning Path](https://developer.ibm.com/technologies/vision/series/learning-path-powerai-vision/): From computer vision basics to creating your own apps.
236 | * [Demo on YouTube](https://www.youtube.com/watch?v=xoLcXQs4SP4): Watch the video
237 | * [Object Detection](https://en.wikipedia.org/wiki/Object_detection): Object detection on Wikipedia
238 | * [TensorFlow Object Detection](https://ai.googleblog.com/2017/06/supercharge-your-computer-vision-models.html): Supercharge your Computer Vision models with the TensorFlow Object Detection API
239 | * [AI Article](https://www.entrepreneur.com/article/283990): Can Artificial Intelligence Identify Pictures Better than Humans?
240 | * [From the developers](https://developer.ibm.com/linuxonpower/2017/08/30/ibm-powerai-vision-speeds-transfer-learning-greater-accuracy-real-world-example/): IBM PowerAI Vision speeds transfer learning with greater accuracy -- a real world example
241 |
242 | ## Learn more
243 |
244 | * **Artificial Intelligence code patterns**: Enjoyed this code pattern? Check out our other [AI code patterns](https://developer.ibm.com/technologies/artificial-intelligence/).
245 | * **AI and Data code pattern playlist**: Bookmark our [playlist](https://www.youtube.com/playlist?list=PLzUbsvIyrNfknNewObx5N7uGZ5FKH0Fde) with all of our code pattern videos
246 | * **PowerAI**: Get started or get scaling, faster, with a software distribution for machine learning running on the Enterprise Platform for AI: [IBM Power Systems](https://www.ibm.com/ms-en/marketplace/deep-learning-platform)
247 |
248 | ## License
249 |
250 | This code pattern is licensed under the Apache License, Version 2. Separate third-party code objects invoked within this code pattern are licensed by their respective providers pursuant to their own separate licenses. Contributions are subject to the [Developer Certificate of Origin, Version 1.1](https://developercertificate.org/) and the [Apache License, Version 2](https://www.apache.org/licenses/LICENSE-2.0.txt).
251 |
252 | [Apache License FAQ](https://www.apache.org/foundation/license-faq.html#WhatDoesItMEAN)
253 |
--------------------------------------------------------------------------------
/app.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017 IBM Corp. All Rights Reserved.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the 'License'); you may not
5 | * use this file except in compliance with the License. You may obtain a copy of
6 | * the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | * License for the specific language governing permissions and limitations under
14 | * the License.
15 | */
16 |
17 | 'use strict';
18 | /* eslint-env node */
19 |
20 | const express = require('express');
21 | const request = require('request');
22 | const MISSING_ENV =
23 | 'Missing required runtime environment variable POWERAI_VISION_WEB_API_URL';
24 |
25 | require('dotenv').config({
26 | silent: true,
27 | });
28 |
29 | const app = express();
30 | const port = process.env.PORT || process.env.VCAP_APP_PORT || 8081;
31 | const poweraiVisionWebApiUrl = process.env.POWERAI_VISION_WEB_API_URL;
32 |
33 | console.log('Web API URL: ' + poweraiVisionWebApiUrl);
34 |
35 | if (!poweraiVisionWebApiUrl) {
36 | console.log(MISSING_ENV);
37 | }
38 |
39 | app.use(express.static(__dirname));
40 |
41 | app.post('/uploadpic', function(req, result) {
42 | if (!poweraiVisionWebApiUrl) {
43 | console.log(MISSING_ENV);
44 | result.send({data: JSON.stringify({error: MISSING_ENV})});
45 | } else {
46 | req.pipe(request.post({
47 | url: poweraiVisionWebApiUrl,
48 | gzip: true,
49 | agentOptions: {
50 | rejectUnauthorized: false,
51 | }}, function(err, resp, body) {
52 | if (err) {
53 | console.log(err);
54 | }
55 | console.log(body);
56 | result.send({data: body});
57 | }));
58 | }
59 | });
60 |
61 | app.listen(port, () => {
62 | console.log(`Server starting on ${port}`);
63 | });
64 |
65 |
--------------------------------------------------------------------------------
/css/index.css:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | h1 {
21 | font-size:24px;
22 | font-weight:normal;
23 | margin:10px;
24 | overflow:visible;
25 | padding:0px;
26 | text-align:left;
27 | background-color: navy;
28 | color: white;
29 | }
30 |
31 | html, body {
32 | height: 100%;
33 | margin: 0;
34 | padding: 0%;
35 | font-family: sans-serif;
36 | text-align: left;
37 | }
38 | main {
39 | margin-top: 100px;
40 | margin-left: 20px;
41 | margin-right: 20px;
42 | height: 75%;
43 | font-family: sans-serif;
44 | text-align: left;
45 | flex-direction: column;
46 | }
47 | header {
48 | overflow: hidden;
49 | position: fixed;
50 | top: 0;
51 | width: 100%;
52 | height: 80px;
53 | background-color: navy;
54 | }
55 | header img, header h1 {
56 | float: left;
57 | display: block;
58 | }
59 | header img#logo {
60 | width: 28px;
61 | height: 28px;
62 | margin: 8px;
63 | }
64 | header h1 {
65 | color: white;
66 | text-align: left;
67 | font-weight: bold;
68 | }
69 |
70 | article {
71 | margin-top: 0;
72 | display: flex;
73 | flex-flow: column wrap;
74 | }
75 |
76 | img {
77 | padding: 0;
78 | display: block;
79 | max-height: 100%;
80 | max-width: 100%;
81 | }
82 | canvas {
83 | padding: 0;
84 | display: block;
85 | float: left;
86 | flex-direction: column;
87 | overflow: scroll;
88 | object-fit: contain;
89 | object-position: top left;
90 | max-width: 100%;
91 | }
92 | table {
93 | border-collapse: collapse;
94 | border-spacing: 0;
95 | float: left;
96 | max-width: 700px;
97 | border: 1px solid #ddd;
98 | margin-right: 20px;
99 | margin-bottom: 20px;
100 | table-layout: fixed;
101 | }
102 |
103 | th, td {
104 | text-align: left;
105 | padding: 8px;
106 | width: 100%;
107 | }
108 |
109 | tr:nth-child(even){background-color: #f2f2f2}
110 | tr th:nth-child(n+2){text-align: right}
111 | tr td:nth-child(n+2){text-align: right}
112 |
--------------------------------------------------------------------------------
/data/ATTRIBUTIONS.md:
--------------------------------------------------------------------------------
1 | # Photo Credits
2 |
3 | The images provided in these datasets were collected using
4 | google image search with a usage rights filter of
5 | "Labeled for reuse with modification".
6 |
7 | Images which should not have passed that filter have been removed.
8 | If any additional images should not have passed that filter, please
9 | help us correct that by logging an issue or a pull request.
10 |
11 | Some of the photos request proper attribution. We are happy
12 | to provide that here:
13 |
14 | * Many of the photos are by [Mike Mozart](https://www.flickr.com/photos/jeepersmedia/14287454177/), Flickr [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/)
15 |
16 | ## Modifications
17 |
18 | * The process of gathering and classifying the images may have resulted in resizing or changing the format, but otherwise the original images were not altered.
19 |
20 | * When displayed in the test UI (PowerAI Vision or the example web app), labels and bounding boxes are drawn on top of the images.
21 |
--------------------------------------------------------------------------------
/data/coke_bottles.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/data/coke_bottles.zip
--------------------------------------------------------------------------------
/data/coke_bottles_exported.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/data/coke_bottles_exported.zip
--------------------------------------------------------------------------------
/data/coke_bottles_set2.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/data/coke_bottles_set2.zip
--------------------------------------------------------------------------------
/data/test_set.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/data/test_set.zip
--------------------------------------------------------------------------------
/doc/source/images/Locate-and-count-items-in-an-image-with-object-detection-flow-arch-10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/Locate-and-count-items-in-an-image-with-object-detection-flow-arch-10.png
--------------------------------------------------------------------------------
/doc/source/images/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/architecture.png
--------------------------------------------------------------------------------
/doc/source/images/build_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/build_model.png
--------------------------------------------------------------------------------
/doc/source/images/create_new_dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/create_new_dataset.png
--------------------------------------------------------------------------------
/doc/source/images/model_details.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/model_details.png
--------------------------------------------------------------------------------
/doc/source/images/object_detection_app.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/object_detection_app.png
--------------------------------------------------------------------------------
/doc/source/images/save_labels.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/save_labels.png
--------------------------------------------------------------------------------
/doc/source/images/test_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/doc/source/images/test_ui.png
--------------------------------------------------------------------------------
/env.sample:
--------------------------------------------------------------------------------
1 | POWERAI_VISION_WEB_API_URL=https://ny1.ptopenlab.com/AIVision/api/dlapis/
2 |
--------------------------------------------------------------------------------
/favicon16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/favicon16.png
--------------------------------------------------------------------------------
/favicon32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/favicon32.png
--------------------------------------------------------------------------------
/favicon64.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/favicon64.png
--------------------------------------------------------------------------------
/img/camera.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/powerai-vision-object-detection/46f654fa2af362cb2ca2fe32c6b691e2e03533e8/img/camera.png
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | PowerAI Vision Object Detection
28 |
29 |
30 |
31 |
32 |
PowerAI Vision Object Detection
33 |
34 |
35 |
36 |
37 |
38 |
39 |
47 |
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/js/index.js:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | /* eslint-env browser */
21 |
22 | /**
23 | * Add detected object info as a row in the table.
24 | * @param {Object} table
25 | * @param {string} cellType
26 | * @param {[]} values
27 | */
28 | function addRow(table, cellType, values) {
29 | const row = document.createElement('tr');
30 | for (let i = 0; i < values.length; i++) {
31 | const val = values[i];
32 | const cell = document.createElement(cellType);
33 | const text = document.createTextNode(val);
34 | cell.appendChild(text);
35 | row.appendChild(cell);
36 | }
37 | table.appendChild(row);
38 | }
39 |
40 | /**
41 | * Get the label text color to use given a label string.
42 | * @param {string} label
43 | * @return {string}
44 | */
45 | function textColor(label) {
46 | switch (label) {
47 | case 'coca-cola': return 'white';
48 | case 'diet coke': return 'red';
49 | case 'coke zero': return 'white';
50 | default: return 'cornsilk';
51 | }
52 | }
53 |
54 | /**
55 | * Get the boundary box color to use given a label string.
56 | * @param {string} label
57 | * @return {string}
58 | */
59 | function boundaryColor(label) {
60 | switch (label) {
61 | case 'coca-cola': return 'red';
62 | case 'diet coke': return 'silver';
63 | case 'coke zero': return 'black';
64 | default: return 'cornflowerblue';
65 | }
66 | }
67 |
68 | /**
69 | * Get a string describing how many objects or each type were detected.
70 | * @param {[]} detectedObjects
71 | * @return {string}
72 | */
73 | function countByLabel(detectedObjects) {
74 | const countByLabel = {};
75 | if (detectedObjects.length > 0) {
76 | for (let i = 0; i < detectedObjects.length; i++) {
77 | const obj = detectedObjects[i];
78 | const label = obj['label'];
79 | countByLabel[label] = (countByLabel[label] || 0) + 1;
80 | }
81 | }
82 |
83 | const retStrings = [];
84 | for (const key in countByLabel) {
85 | if (Object.prototype.hasOwnProperty.call(countByLabel, key)) {
86 | retStrings.push(countByLabel[key] + ' ' + key); // e.g. 1 coca-cola
87 | }
88 | }
89 | return retStrings.join(', ');
90 | }
91 |
92 | /**
93 | * Draw boundary boxes around the detected objects.
94 | * @param {[]} detectedObjects
95 | * @param {Object} ctx
96 | */
97 | function drawBoundaryBoxes(detectedObjects, ctx) {
98 | ctx.lineWidth = 5;
99 | ctx.font='24px serif';
100 |
101 | if (detectedObjects.length > 0) {
102 | for (let i = 0; i < detectedObjects.length; i++) {
103 | const obj = detectedObjects[i];
104 | const label = obj['label'];
105 | const color = boundaryColor(label);
106 | ctx.strokeStyle = color;
107 | const xmin = obj['xmin'];
108 | const ymin = obj['ymin'];
109 | const xmax = obj['xmax'];
110 | const ymax = obj['ymax'];
111 | ctx.strokeRect(xmin, ymin, xmax - xmin, ymax - ymin);
112 |
113 | // Now fill a rectangle at the top to put some text on.
114 | ctx.fillStyle = color;
115 | ctx.fillRect(xmin, ymin, xmax - xmin, 25);
116 | ctx.fillStyle = textColor(label);
117 | ctx.fillText(
118 | label + ': ' + obj['confidence'].toFixed(3), xmin + 5, ymin + 20);
119 | }
120 | }
121 | }
122 |
123 | /**
124 | * Create and populate a table to show the result details.
125 | * @param {[]} detectedObjects
126 | * @param {Object} parent
127 | */
128 | function detectedObjectsTable(detectedObjects, parent) {
129 | if (detectedObjects.length > 0) {
130 | const table = document.createElement('table');
131 |
132 | addRow(table, 'th', ['Label', 'Conf', 'Min Pos', 'Max Pos']);
133 |
134 | for (let i = 0; i < detectedObjects.length; i++) {
135 | const obj = detectedObjects[i];
136 | const label = obj['label'];
137 | const conf = obj['confidence'].toFixed(3);
138 | const minPos = '(' + obj['xmin'] + ',' + obj['ymin'] + ')';
139 | const maxPos = '(' + obj['xmax'] + ',' + obj['ymax'] + ')';
140 |
141 | addRow(table, 'td', [label, conf, minPos, maxPos]);
142 | }
143 | parent.appendChild(table);
144 | }
145 | }
146 |
147 | window.addEventListener('load', function() {
148 | const article = document.querySelector('article');
149 |
150 | /**
151 | * Populate the article with formatted results.
152 | * @param {Object} jsonResult
153 | */
154 | function populateArticle(jsonResult) {
155 | // Remove previous results
156 | article.innerHTML = '';
157 |
158 | // Display the image
159 | const myImg = new Image();
160 |
161 | // Read the image file from the input selector for display.
162 | const fileInput = document.getElementById('fileinput');
163 | const reader = new FileReader();
164 | reader.onload = function(e) {
165 | myImg.src = e.target.result;
166 | article.appendChild(myImg);
167 | };
168 | reader.readAsDataURL(fileInput.files[0]);
169 |
170 | myImg.style.display = 'none';
171 | myImg.onload = function() {
172 | const myCanvas = document.createElement('canvas');
173 | const ctx = myCanvas.getContext('2d');
174 | ctx.canvas.height = myImg.height;
175 | ctx.canvas.width = myImg.width;
176 | ctx.drawImage(myImg, 0, 0, myImg.width, myImg.height);
177 | if (Object.prototype.hasOwnProperty.call(jsonResult, 'classified')) {
178 | drawBoundaryBoxes(jsonResult.classified, ctx);
179 | }
180 | article.appendChild(myCanvas);
181 | };
182 | article.appendChild(myImg);
183 |
184 | if (Object.prototype.hasOwnProperty.call(jsonResult, 'classified')) {
185 | const classified = jsonResult.classified;
186 |
187 | const myCount = document.createElement('h3');
188 | myCount.textContent = classified.length + ' objects detected';
189 | article.appendChild(myCount);
190 | article.appendChild(document.createTextNode(countByLabel(classified)));
191 |
192 | detectedObjectsTable(classified, article);
193 | } else {
194 | const myDiv = document.createElement('div');
195 | myDiv.className = 'error';
196 | myDiv.id = 'error-div';
197 | const myTitle = document.createElement('h3');
198 | myTitle.textContent = 'ERROR';
199 | myDiv.appendChild(myTitle);
200 | // Dump keys/values to show error info
201 | for (const key in jsonResult) {
202 | if (Object.prototype.hasOwnProperty.call(jsonResult, key)) {
203 | const myP = document.createElement('p');
204 | myP.textContent = key + ': ' + jsonResult[key];
205 | myDiv.appendChild(myP);
206 | }
207 | }
208 | article.appendChild(myDiv);
209 | }
210 | }
211 |
212 | // When upload results are loaded (hidden), use them build the results.
213 | const raw = top.frames['mytarget'];
214 | const myTarget = document.getElementById('mytarget');
215 | if (myTarget) { // optional for tests
216 | myTarget.addEventListener('load', function() {
217 | const rawContent = raw.document.body.innerText;
218 | const rawJson = JSON.parse(rawContent);
219 | const rawJsonJson = JSON.parse(rawJson.data);
220 | console.log(rawJsonJson);
221 |
222 | populateArticle(rawJsonJson);
223 | });
224 | }
225 | });
226 |
227 | if (typeof module !== 'undefined' && typeof module.exports !== 'undefined') {
228 | module.exports = {addRow, textColor}; // for testing
229 | }
230 |
231 |
--------------------------------------------------------------------------------
/manifest.yml:
--------------------------------------------------------------------------------
1 | applications:
2 | - memory: 256M
3 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "powerai-vision-object-detection",
3 | "description": "Use PowerAI Vision for Object Detection.",
4 | "repository": {
5 | "type": "git",
6 | "url": "git+https://github.com/IBM/powerai-vision-object-detection.git"
7 | },
8 | "keywords": [
9 | "PowerAI"
10 | ],
11 | "license": "Apache-2.0",
12 | "bugs": {
13 | "url": "https://github.com/IBM/powerai-vision-object-detection/issues"
14 | },
15 | "homepage": "https://github.com/IBM/powerai-vision-object-detection#readme",
16 | "devDependencies": {
17 | "chai": "^4.3.4",
18 | "eslint": "^8.6.0",
19 | "eslint-config-google": "^0.14.0",
20 | "eslint-plugin-node": "^11.1.0",
21 | "istanbul": "^0.4.5",
22 | "jsdom": "^19.0.0",
23 | "jsdom-global": "^3.0.2",
24 | "markdownlint-cli": "^0.30.0",
25 | "mocha": "^9.1.3",
26 | "remark-cli": "^10.0.1",
27 | "remark-lint": "^9.1.1",
28 | "remark-lint-no-dead-urls": "^1.1.0",
29 | "remark-validate-links": "^11.0.2",
30 | "sinon": "^12.0.1",
31 | "sinon-test": "^3.1.1"
32 | },
33 | "scripts": {
34 | "start": "node app.js",
35 | "markdownlint": "./node_modules/.bin/markdownlint README.md",
36 | "lint-md": "./node_modules/.bin/remark -f --ignore-pattern *ja.md .",
37 | "lint": "npm run lint:javascript",
38 | "eslint": "eslint *.js js test",
39 | "lint:javascript": "eslint . --ext js,html --ignore-path .gitignore",
40 | "test": "npm run markdownlint && npm run lint-md && npm run unit && npm run eslint",
41 | "unit": "istanbul cover _mocha -V test/unit -- -R spec"
42 | },
43 | "remarkConfig": {
44 | "plugins": [
45 | "remark-validate-links",
46 | "remark-lint-no-dead-urls"
47 | ]
48 | },
49 | "dependencies": {
50 | "dotenv": "^8.0.0",
51 | "express": "^4.17.1",
52 | "handlebars": "^4.1.2",
53 | "js-yaml": "^3.13.1",
54 | "request": "^2.88.0"
55 | },
56 | "engines": {
57 | "node": ">=10.0.0"
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/service-worker.js:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | /* global self, caches */
21 | 'use strict';
22 |
23 | const CACHE_NAME = 'powerai-vision-object-detection';
24 |
25 | const urlstocache = [
26 | 'css/index.css',
27 | 'js/index.js',
28 | ];
29 |
30 | // install/cache page assets
31 | self.addEventListener('install', function(event) {
32 | event.waitUntil(
33 | caches.open(CACHE_NAME)
34 | .then(function(cache) {
35 | console.log('cache opened');
36 | return cache.addAll(urlstocache);
37 | }),
38 | );
39 | });
40 |
41 |
42 | // service worker activated, remove outdated cache
43 | self.addEventListener('activate', function(event) {
44 | console.log('worker activated');
45 | event.waitUntil(
46 | caches.keys().then(function(keys) {
47 | return Promise.all(
48 | keys.filter(function(key) {
49 | // filter old versioned keys
50 | return key !== CACHE_NAME;
51 | }).map(function(key) {
52 | return caches.delete(key);
53 | }),
54 | );
55 | }),
56 | );
57 | });
58 |
--------------------------------------------------------------------------------
/test/unit/test.index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2017 IBM Corp. All Rights Reserved.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the 'License'); you may not
5 | * use this file except in compliance with the License. You may obtain a copy of
6 | * the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
12 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | * License for the specific language governing permissions and limitations under
14 | * the License.
15 | */
16 |
17 | 'use strict';
18 |
19 | const chai = require('chai');
20 | const expect = chai.expect;
21 | const sinon = require('sinon');
22 | require('jsdom-global')();
23 | const indexjs = require('../../js/index');
24 |
25 | describe('test index.js functions', function() {
26 | it('#addRow()', function(done) {
27 | const appendChild = sinon.spy();
28 | const mockTable = {
29 | appendChild: appendChild,
30 | };
31 |
32 | indexjs.addRow(mockTable, 'testType', ['a', 'b', 'c']);
33 |
34 | sinon.assert.calledOnce(appendChild);
35 | done();
36 | });
37 |
38 | it('#textColor()', function(done) {
39 | const f = indexjs.textColor;
40 | expect(f('coca-cola')).to.equal('white');
41 | expect(f('diet coke')).to.equal('red');
42 | expect(f('coke zero')).to.equal('white');
43 | expect(f('how did pepsi get in here')).to.equal('cornsilk');
44 | done();
45 | });
46 | });
47 |
--------------------------------------------------------------------------------