Latest tweet data
41 |Nouns
47 |Verbs
48 |Adjectives
49 |Latest tweet sentiment
60 |
63 |
64 |
65 |
66 | Latest tweet data
41 |Nouns
47 |Verbs
48 |Adjectives
49 |Latest tweet sentiment
60 |
63 |
64 |
65 |
66 |
39 |
40 | 5. Generate an API key and add it to `local.json`
41 | 6. Change line 37 to filter tweets on whichver terms you'd like
42 | 7. Install node modules: `npm install`
43 | 8. Run the script: `node twitter.js`
44 |
45 | ## Natural Language API + Firebase realtime Twitter dashboard demo
46 |
47 | 1. `cd` into `nl-firebase-twitter/`
48 | 2. Create a project in the [Firebase console](http://firebase.google.com/console) and install the [Firebase CLI](https://firebase.google.com/docs/cli/)
49 | 3. `cd` into the `frontend/` directory and run `firebase login` and `firebase init` to associate this with the Firebase project you just created. When prompted, don't overwrite existing files. Create a **database** and **hosting** project (no Functions).
50 | 4. In your Firebase console, click "Add Firebase to your web app". Copy the credentials to the top of the main.js file
51 | 5. `cd` into the `backend/` directory and run `npm install` to install dependencies
52 | 6. Generate a service account for your project by navigating to the "Project settings" tab in your Firebase console and then selecting "Service Accouts". Click "Generate New Private Key" and save this in your `backend/` directory as `keyfile.json`
53 | 7. Generate [Twitter Streaming API](https://dev.twitter.com/streaming/overview) credentials and copy them to `backend/local.json`
54 | 8. Navigate to the Cloud console for our project. Enabled the Natural Language API and generate an API key. Replace `YOUR-API-KEY` in `backend/local.json` with this key.
55 | 9. Replace `searchTerms` in `backend/index.js` with the search terms you'd like to filter tweets on
56 | 10. Replace `FIREBASE-PROJECT-ID` in `backend/local.json` with the id of your Firebase project
57 | 11. Set up BigQuery: in your Cloud console for the same project, create a BigQuery dataset. Then create a table in that dataset. When creating the table, click **Edit as text** and paste the following:
58 | ```
59 | id:STRING,text:STRING,user:STRING,user_time_zone:STRING,user_followers_count:INTEGER,hashtags:STRING,tokens:STRING,score:STRING,magnitude:STRING,entities:STRING
60 | ```
61 | 12. Add your BigQuery dataset and table names to `backend/local.json`.
62 | 11. Run the server: from the `backend/` directory run `node index.js`. You should see tweet data being written to your Firebase database
63 | 12. In a separate terminal process, run the frontend: from the `frontend/` directory run `firebase serve`
64 | 13. Deploy your frontend: from the `frontend/` directory run `firebase deploy`
65 |
66 |
67 | ## Multiple API demo
68 |
69 | 1. `cd` into `vision-speech-nl-translate`
70 | 2. Make sure you've set up your [GOOGLE_APPLICATION_CREDENTIALS](https://developers.google.com/identity/protocols/application-default-credentials) with a Cloud project that has the Vision, Speech, NL, and Translation APIs enabled
71 | 3. Run the script: `python textify.py`
72 | 4. Note: if you're running it with image OCR, copy an image file to your local directory
73 |
--------------------------------------------------------------------------------
/nl-firebase-twitter/frontend/main.js:
--------------------------------------------------------------------------------
1 | // Copyright 2017 Google Inc.
2 |
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 |
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 |
16 |
17 |
18 | // TODO: initialize Firebase. Place your Firebase config credentials here
19 | // var config = {....}
20 | // firebase.initializeApp(config);
21 |
22 | const database = firebase.database();
23 | const adjRef = database.ref('tokens').child('ADJ');
24 | const htRef = database.ref('hashtags');
25 |
26 |
27 | database.ref('latest').on('value', function(data) {
28 |
29 | let tweet = data.val();
30 | let currentScore = tweet.score;
31 |
32 | let hashtagArr = [];
33 | let entityArr = [];
34 | let nounArr = [];
35 | let adjArr = [];
36 | let verbArr = [];
37 |
38 | for (let i in tweet.hashtags) {
39 | let htText = tweet.hashtags[i].text;
40 | hashtagArr.push(htText);
41 | }
42 |
43 | for (let i in tweet.entities) {
44 | let entityText = tweet.entities[i].name;
45 | entityArr.push(entityText);
46 | }
47 |
48 | for (let i in tweet.tokens) {
49 | let token = tweet.tokens[i];
50 | if ((token.partOfSpeech.tag === "NOUN") && (token.lemma != "#") && (token.lemma.substring(0,4) != "http")) {
51 | nounArr.push(token.lemma.toLowerCase());
52 | } else if (token.partOfSpeech.tag === "ADJ") {
53 | adjArr.push(token.lemma.toLowerCase());
54 | } if (token.partOfSpeech.tag === "VERB") {
55 | verbArr.push(token.lemma.toLowerCase());
56 | }
57 | }
58 |
59 |
60 | $('#latest-tweet').fadeOut();
61 | $('#latest-tweet').html('');
62 | $('#latest-tweet').fadeIn();
63 | $('.nouns').text(nounArr.join(', '));
64 | $('.verbs').text(verbArr.join(', '));
65 | $('.adjectives').text(adjArr.join(', '));
66 |
67 |
68 | // Adjust the sentiment scale for the latest tweet
69 | let scaleWidthPx = 400; // width of our scale in pixels
70 | let scaledSentiment = (scaleWidthPx * (currentScore + 1)) / 2;
71 | $('#current-sentiment-latest-val').css('margin-left', scaledSentiment + 'px');
72 |
73 | });
74 |
75 | Chart.defaults.global.defaultFontColor = '#03A9F4';
76 | Chart.defaults.global.defaultFontStyle = 'bold';
77 | Chart.defaults.global.defaultFontSize = 14;
78 | Chart.defaults.global.elements.rectangle.borderColor = '#2196F3';
79 | Chart.defaults.global.elements.rectangle.backgroundColor = '#90CAF9';
80 | Chart.defaults.global.legend.display = false;
81 |
82 |
83 | adjRef.orderByValue().limitToLast(10).once('value', function(data) {
84 |
85 | let chartLabels = [];
86 | let chartData = [];
87 |
88 | data.forEach(function(token) {
89 | let word = token.key;
90 | chartLabels.push(word);
91 | chartData.push(token.val());
92 | });
93 |
94 | var ctx = document.getElementById("adjChart");
95 |
96 | var myChart = new Chart(ctx, {
97 | type: 'bar',
98 | data: {
99 | labels: chartLabels.reverse(),
100 | datasets: [{
101 | label: '# of mentions',
102 | data: chartData.reverse(),
103 | borderWidth: 1
104 | }]
105 | },
106 | options: {
107 | scales: {
108 | yAxes: [{
109 | ticks: {
110 | beginAtZero:true,
111 | minRotation: 1,
112 | autoSkip: true
113 | }
114 | }]
115 | },
116 | title: {
117 | display: true,
118 | text: 'Most common adjectives'
119 | },
120 | showTooltips: true
121 | }
122 | });
123 |
124 |
125 | adjRef.orderByValue().limitToLast(10).on('value', function(newData) {
126 |
127 | let updatedLabels = [];
128 | let updatedData = [];
129 |
130 | newData.forEach(function(token) {
131 | let word = token.key;
132 | updatedLabels.push(word);
133 | updatedData.push(token.val());
134 | });
135 |
136 | myChart.data.datasets[0].data = updatedData.reverse();
137 | myChart.data.labels = updatedLabels.reverse();
138 | myChart.update();
139 |
140 | });
141 | });
142 |
143 | htRef.orderByChild('numMentions').limitToLast(10).on('value', function(data) {
144 |
145 | let htChartLabels = [];
146 | let labelSentiments = [];
147 |
148 | data.forEach(function(snap) {
149 | let ht = snap.key;
150 | htChartLabels.push(ht);
151 | let numMentions = snap.val().numMentions;
152 | let sentiment = snap.val().totalScore / numMentions;
153 | labelSentiments.push(sentiment);
154 | });
155 |
156 | var scaleChart = document.getElementById("htChart");
157 |
158 | var htChart = new Chart(scaleChart, {
159 | type: 'horizontalBar',
160 | data: {
161 | labels: htChartLabels,
162 | datasets: [{
163 | label: 'sentiment value',
164 | data: labelSentiments,
165 | borderWidth: 1
166 | }]
167 | },
168 | options: {
169 | elements: {
170 | rectangle: {
171 | borderWidth: 2
172 | }
173 | },
174 | title: {
175 | display: true,
176 | text: 'Sentiment by hashtag'
177 | },
178 | scales: {
179 | xAxes: [{
180 | ticks: {
181 | min: -1,
182 | max: 1
183 | }
184 | }]
185 | },
186 | responsive: true
187 | }
188 | });
189 | });
--------------------------------------------------------------------------------
/nl-firebase-twitter/backend/index.js:
--------------------------------------------------------------------------------
1 | // Copyright 2017 Google Inc.
2 |
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 |
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | 'use strict';
16 |
17 | const request = require('request');
18 | const Twitter = require('twitter');
19 | const config = require('./local.json');
20 | const client = new Twitter({
21 | consumer_key: config.twitter_consumer_key,
22 | consumer_secret: config.twitter_consumer_secret,
23 | access_token_key: config.twitter_access_key,
24 | access_token_secret: config.twitter_access_secret
25 | });
26 |
27 | const gcloud = require('google-cloud')({
28 | keyFilename: 'keyfile.json',
29 | projectId: config.project_id
30 | });
31 | const bigquery = gcloud.bigquery();
32 | const dataset = bigquery.dataset(config.bigquery_dataset);
33 | const table = dataset.table(config.bigquery_table);
34 |
35 | const Filter = require('bad-words'),
36 | filter = new Filter();
37 |
38 | // Replace searchTerms with whatever tweets you want to stream
39 | // Details here: https://dev.twitter.com/streaming/overview/request-parameters#track
40 | const searchTerms = 'googleio,googledevelopers,googlecloud,firebase,machine learning,io17,googleio17';
41 |
42 | // Add a filter-level param?
43 | client.stream('statuses/filter', {track: searchTerms, language: 'en'}, function(stream) {
44 | stream.on('data', function(event) {
45 | // Exclude tweets starting with "RT"
46 | if ((event.text != undefined) && (event.text.substring(0,2) != 'RT') && (event.text === filter.clean(event.text))) {
47 | callNLApi(event);
48 | }
49 | });
50 | stream.on('error', function(error) {
51 | console.log('twitter api error: ', error);
52 | });
53 | });
54 |
55 |
56 | // INITIALIZE FIREBASE
57 | var admin = require("firebase-admin");
58 | var serviceAccount = require("./keyfile.json");
59 | admin.initializeApp({
60 | credential: admin.credential.cert(serviceAccount),
61 | databaseURL: "https://" + config.project_id + ".firebaseio.com"
62 | });
63 |
64 | const db = admin.database();
65 | const tweetRef = db.ref('latest');
66 | const hashtagRef = db.ref('hashtags');
67 |
68 | // Uses a Firebase transaction to incrememnt a counter
69 | function incrementCount(ref, child, valToIncrement) {
70 | ref.child(child).transaction(function(data) {
71 | if (data != null) {
72 | data += valToIncrement;
73 | } else {
74 | data = 1;
75 | }
76 | return data;
77 | });
78 | }
79 |
80 |
81 | tweetRef.on('value', function (snap) {
82 | if (snap.exists()) {
83 | let tweet = snap.val();
84 | let tokens = tweet['tokens'];
85 | let hashtags = tweet['hashtags'];
86 |
87 | for (let i in tokens) {
88 | let token = tokens[i];
89 | let word = token.lemma.toLowerCase();
90 |
91 | if ((acceptedWordTypes.indexOf(token.partOfSpeech.tag) != -1) && !(word.match(/[^A-Za-z0-9]/g))) {
92 | let posRef = db.ref('tokens/' + token.partOfSpeech.tag);
93 | incrementCount(posRef, word, 1);
94 | }
95 |
96 | }
97 |
98 | if (hashtags) {
99 | for (let i in hashtags) {
100 | let ht = hashtags[i];
101 | let text = ht.text.toLowerCase();
102 | let htRef = hashtagRef.child(text);
103 | incrementCount(htRef, 'totalScore', tweet.score);
104 | incrementCount(htRef, 'numMentions', 1);
105 | }
106 | }
107 | }
108 | });
109 |
110 |
111 | const acceptedWordTypes = ['ADJ']; // Add the parts of speech you'd like to graph to this array ('NOUN', 'VERB', etc.)
112 |
113 | function callNLApi(tweet) {
114 | const textUrl = "https://language.googleapis.com/v1/documents:annotateText?key=" + config.cloud_api_key;
115 | let requestBody = {
116 | "document": {
117 | "type": "PLAIN_TEXT",
118 | "content": tweet.text
119 | },
120 | "features": {
121 | "extractSyntax": true,
122 | "extractEntities": true,
123 | "extractDocumentSentiment": true
124 | }
125 | }
126 |
127 | let options = {
128 | url: textUrl,
129 | method: "POST",
130 | body: requestBody,
131 | json: true
132 | }
133 |
134 | request(options, function(err, resp, body) {
135 | if ((!err && resp.statusCode == 200) && (body.sentences.length != 0)) {
136 | let tweetForFb = {
137 | id: tweet.id_str,
138 | text: tweet.text,
139 | user: tweet.user.screen_name,
140 | user_time_zone: tweet.user.time_zone,
141 | user_followers_count: tweet.user.followers_count,
142 | hashtags: tweet.entities.hashtags,
143 | tokens: body.tokens,
144 | score: body.documentSentiment.score,
145 | magnitude: body.documentSentiment.magnitude,
146 | entities: body.entities
147 | };
148 |
149 | let bqRow = {
150 | id: tweet.id_str,
151 | text: tweet.text,
152 | user: tweet.user.screen_name,
153 | user_time_zone: tweet.user.time_zone,
154 | user_followers_count: tweet.user.followers_count,
155 | hashtags: JSON.stringify(tweet.entities.hashtags),
156 | tokens: JSON.stringify(body.tokens),
157 | score: body.documentSentiment.score,
158 | magnitude: body.documentSentiment.magnitude,
159 | entities: JSON.stringify(body.entities)
160 | }
161 |
162 | tweetRef.set(tweetForFb);
163 | table.insert(bqRow, function(error, insertErr, apiResp) {
164 | if (error) {
165 | console.log('err', error);
166 | } else if (insertErr.length == 0) {
167 | console.log('success!');
168 | }
169 | });
170 |
171 | } else {
172 | console.log('NL API error: ', err);
173 | }
174 | });
175 | }
176 |
--------------------------------------------------------------------------------
/vision-speech-nl-translate/textify.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Google Inc.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | from __future__ import print_function
12 | import base64
13 | import json
14 | import io
15 | import os
16 | import readline
17 | import time
18 | import ffmpy
19 | import httplib2
20 | from googleapiclient import discovery
21 | from oauth2client.client import GoogleCredentials
22 | from google.cloud import translate
23 | from pick import pick
24 | from termcolor import colored
25 | import sounddevice as sd
26 | import scipy.io.wavfile as scipy
27 | from pygments import highlight, lexers, formatters
28 | # Audio recording duration and sample rate
29 | DURATION = 5
30 | SAMPLE_RATE = 16000
31 | # Languages supported by Neural Machine Translation
32 | SUPPORTED_LANGUAGES = {"German": "de", "Spanish": "es", "French": "fr",
33 | "Japanese": "ja", "Korean": "ko", "Portuguese": "pt",
34 | "Turkish": "tr", "Chinese(Simplified)": "zh-CN"}
35 | # [START authenticating]
36 | DISCOVERY_URL = ('https://{api}.googleapis.com/$discovery/rest?'
37 | 'version={apiVersion}')
38 | # Application default credentials provided by env variable
39 | # GOOGLE_APPLICATION_CREDENTIALS
40 |
41 |
42 | def get_service(api, version):
43 | credentials = GoogleCredentials.get_application_default().create_scoped(
44 | ['https://www.googleapis.com/auth/cloud-platform'])
45 | http = httplib2.Http()
46 | credentials.authorize(http)
47 | return discovery.build(
48 | api, version, http=http, discoveryServiceUrl=DISCOVERY_URL)
49 | # [END authenticating]
50 |
51 |
52 | def call_nl_api(text):
53 | service = get_service('language', 'v1')
54 | service_request = service.documents().annotateText(
55 | body={
56 | 'document': {
57 | 'type': 'PLAIN_TEXT',
58 | 'content': text,
59 | },
60 | 'features': {
61 | "extractSyntax": True,
62 | "extractEntities": True,
63 | "extractDocumentSentiment": True,
64 | }
65 | }
66 | )
67 | response = service_request.execute()
68 | print(colored("\nHere's the JSON repsonse" +
69 | "for one token of your text:\n",
70 | "cyan"))
71 | formatted_json = json.dumps(response['tokens'][0], indent=2)
72 | colorful_json = highlight(formatted_json,
73 | lexers.JsonLexer(),
74 | formatters.TerminalFormatter())
75 | print(colorful_json)
76 | score = response['documentSentiment']['score']
77 | output_text = colored(analyze_sentiment(score), "cyan")
78 | if response['entities']:
79 | entities = str(analyze_entities(response['entities']))
80 | output_text += colored("\nEntities found: " + entities, "white")
81 | return [output_text, response['language']]
82 |
83 |
84 | def translate_text_with_model(text, model=translate.NMT):
85 | # Translates text into the target language.
86 | title = "Which language would you like to translate it to?"
87 | options = ["German", "Spanish", "French", "Japanese",
88 | "Korean", "Portuguese", "Turkish", "Chinese(Simplified)"]
89 | lang, index = pick(options, title)
90 | lang_code = SUPPORTED_LANGUAGES[lang]
91 | translate_client = translate.Client()
92 | result = translate_client.translate(
93 | text,
94 | target_language=lang_code,
95 | model=model)
96 | translate_back = translate_client.translate(
97 | result['translatedText'],
98 | target_language="en",
99 | model=model)
100 | print(colored(("Translated in " + lang +
101 | ": " + result['translatedText']), "white"))
102 | print(colored("Your text translated back to English: " +
103 | translate_back['translatedText'], "white"))
104 |
105 |
106 | def call_speech():
107 | speech_prompt = input(colored("Press enter to start recording " +
108 | str(DURATION) + " seconds of audio", "cyan"))
109 | if speech_prompt == "":
110 | # Record audio and write to file using sounddevice
111 | myrecording = sd.rec(DURATION * SAMPLE_RATE,
112 | samplerate=SAMPLE_RATE,
113 | channels=1,
114 | blocking=True)
115 | print(colored("Writing your audio to a file...", "magenta"))
116 | scipy.write('test.wav', SAMPLE_RATE, myrecording)
117 | filename = 'speech-' + str(int(time.time())) + '.flac'
118 | rec = ffmpy.FFmpeg(
119 | inputs={'test.wav': None},
120 | outputs={filename: None}
121 | )
122 | rec.run()
123 | # Encode audio file and call the Speech API
124 | with io.open(filename, "rb") as speech:
125 | # Base64 encode the binary audio file for inclusion in the JSON
126 | # request.
127 | speech_content = base64.b64encode(speech.read())
128 | service = get_service('speech', 'v1beta1')
129 | print(colored("Transcribing your audio with the Speech API...",
130 | "magenta"))
131 | service_request = service.speech().syncrecognize(
132 | body={
133 | 'config': {
134 | 'encoding': 'FLAC', # raw 16-bit signed LE samples
135 | 'sampleRate': SAMPLE_RATE, # 16 khz
136 | 'languageCode': 'en-US', # a BCP-47 language tag
137 | },
138 | 'audio': {
139 | 'content': speech_content.decode('UTF-8')
140 | }
141 | })
142 | response = service_request.execute()
143 | text_response = response['results'][0]['alternatives'][0]['transcript']
144 | return text_response
145 |
146 |
147 | def call_vision(filename):
148 | service = get_service('vision', 'v1')
149 | with open(filename, 'rb') as image:
150 | image_content = base64.b64encode(image.read())
151 | service_request = service.images().annotate(body={
152 | 'requests': [{
153 | 'image': {
154 | 'content': image_content.decode('UTF-8')
155 | },
156 | 'features': [{
157 | 'type': 'DOCUMENT_TEXT_DETECTION'
158 | }]
159 | }]
160 | })
161 | response = service_request.execute()
162 | ocr_text = response['responses'][0]['textAnnotations'][0]['description']
163 | return ocr_text
164 |
165 |
166 | def analyze_sentiment(score):
167 | sentiment_str = "You seem "
168 | if -1 <= score < -0.5:
169 | sentiment_str += "angry. Hope you feel better soon!"
170 | elif -0.5 <= score < 0.5:
171 | sentiment_str += "pretty neutral."
172 | else:
173 | sentiment_str += "very happy! Yay :)"
174 | return sentiment_str + "\n"
175 |
176 |
177 | def analyze_entities(entities):
178 | arr = []
179 | for entity in entities:
180 | if 'wikipedia_url' in entity['metadata']:
181 | arr.append(entity['name'] + ': ' +
182 | entity['metadata']['wikipedia_url'])
183 | else:
184 | arr.append(entity['name'])
185 | return arr
186 |
187 |
188 | def handle_nl_and_translate_call(text):
189 | nl_response = call_nl_api(text)
190 | analyzed_text = nl_response[0]
191 | print(analyzed_text)
192 | translate_ready = input(colored("Next, we'll translate your text using" +
193 | " Neural Machine Translation.\n" +
194 | "Press enter when you're ready\n", "cyan"))
195 | if translate_ready == "":
196 | translate_text_with_model(text)
197 |
198 |
199 | print(colored("We're going to send some text to the Natural Language API!\n" +
200 | "It supports English, Spanish, and Japanese.\n", "cyan"))
201 | STEP_ONE = input(colored("Enter 't' to type your text,\n" +
202 | "'r' to record your text,\n" +
203 | "or 'p' to send a photo with text: ", "cyan"))
204 | print("\r")
205 | if STEP_ONE == 't':
206 | NL_TEXT = input(colored("Enter your text to send\n", "cyan"))
207 | handle_nl_and_translate_call(NL_TEXT)
208 | elif STEP_ONE == 'r':
209 | TRANSCRIBED_TEXT = call_speech()
210 | print("You said: " + TRANSCRIBED_TEXT)
211 | handle_nl_and_translate_call(TRANSCRIBED_TEXT)
212 | elif STEP_ONE == 'p':
213 | # Get image url
214 | URL = input(colored("Enter the filepath of your image: ", "cyan"))
215 | if os.path.exists(URL):
216 | print(colored("Valid image URL, sending your image" +
217 | " to the Vision API...", "cyan"))
218 | IMG_TEXT = call_vision(URL)
219 | print(colored("Found this text in your image: \n" + IMG_TEXT, "white"))
220 | handle_nl_and_translate_call(IMG_TEXT)
221 | else:
222 | STEP_ONE = input("That's not a valid entry.")
223 |
--------------------------------------------------------------------------------
/vision-api-firebase/main.js:
--------------------------------------------------------------------------------
1 | // Copyright 2017 Google Inc.
2 |
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 |
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | const storage = firebase.storage();
16 | const storageRef = storage.ref();
17 | const db = firebase.database();
18 |
19 | const facesRef = db.ref('faces');
20 | const labelsRef = db.ref('labels');
21 | const entitiesRef = db.ref('entities');
22 | const latestImageRef = db.ref('latest');
23 | const numPhotosRef = db.ref('images');
24 | const devicesRef = db.ref('devices');
25 | const latestImgDataRef = db.ref('latestImgData');
26 | const emotions = ['joy', 'anger', 'sorrow', 'surprise'];
27 | const provider = new firebase.auth.TwitterAuthProvider();
28 |
29 | let isiPhone = false;
30 | let userId;
31 | let userRef;
32 |
33 | // Set default chart settings
34 | Chart.defaults.global.defaultFontColor = '#3F51B5';
35 | Chart.defaults.global.defaultFontStyle = 'bold';
36 | Chart.defaults.global.elements.rectangle.borderColor = '#3F51B5';
37 | Chart.defaults.global.elements.rectangle.backgroundColor = '#9FA8DA';
38 | Chart.defaults.global.legend.display = false;
39 |
40 | numPhotosRef.on('value', function(snap) {
41 | let numPhotos = snap.numChildren();
42 | $('#num-selfies').html('' + numPhotos + '');
43 | });
44 |
45 | function writeImgtoFb(dataURL, imageRef) {
46 | imageRef.putString(dataURL, 'data_url').then(function(snapshot) {
47 | $('#user-img-data').html("");
48 | $('.data-load-spinner').addClass('is-active');
49 | let gcsUrl = "gs://" + imageRef.location.bucket + "/" + imageRef.location.path;
50 | userRef.child('gcsUrl').set(gcsUrl);
51 | latestImageRef.set({gcsUrl: gcsUrl});
52 | }).catch(function(error) {
53 | if (error.code === "storage/unauthorized") {
54 | $(".mdl-spinner").remove();
55 | $('.permission-denied').css('visibility', 'visible');
56 | }
57 | });
58 |
59 |
60 | }
61 |
62 |
63 |
64 |
65 | // iPhones do a weird image rotation thing - this checks for iPhone using WURFL
66 | function checkIfiPhone(deviceType) {
67 | if (deviceType.toLowerCase().includes('iphone')) {
68 | isiPhone = true;
69 | }
70 | }
71 |
72 | devicesRef.push(WURFL);
73 | checkIfiPhone(WURFL.complete_device_name);
74 |
75 | firebase.auth().onAuthStateChanged(function(user) {
76 | if (user) {
77 |
78 | userId = user.uid;
79 | userRef = db.ref('users').child(userId);
80 |
81 | latestImgDataRef.on('value', function (snap) {
82 |
83 | let latestImgData = snap.val();
84 | let facesStr = "";
85 | let labelsStr = "Labels found: ";
86 |
87 | if (latestImgData !== null) {
88 |
89 | if (latestImgData.faceAnnotations) {
90 | facesStr += "Found a face!";
91 | let face = latestImgData.faceAnnotations[0];
92 |
93 | for (let j in emotions) {
94 | let emotion = emotions[j];
95 | if ((face[emotion + 'Likelihood'] === "VERY_LIKELY") || (face[emotion + 'Likelihood'] === "LIKELY") || (face[emotion + 'Likelihood'] === "POSSIBLE")) {
96 | facesStr += " Detected " + emotion + ".";
97 | }
98 | }
99 | }
100 |
101 | if (latestImgData.webDetection.webEntities) {
102 | let labels = latestImgData.webDetection.webEntities;
103 | let labelsFound = [];
104 | for (let i in labels) {
105 | let label = labels[i].description.toLowerCase();
106 | if (label.length > 1) {
107 | labelsFound.push("" + label + "");
108 | }
109 | }
110 | labelsStr += labelsFound.join(", ");
111 | }
112 |
113 | $('#user-img-data').html(labelsStr + "