├── Facebook Sheets API └── FB graph API.gs ├── LICENSE ├── README.md └── alchemyAPI-Scripts ├── alchemyAPI.py └── compare.py /Facebook Sheets API/FB graph API.gs: -------------------------------------------------------------------------------- 1 | 2 | // Get token. Service gets App token, which is given user scope by run() function. 3 | // May need to update by re-running run() and logging in via resulting URL. Need sidebar, and error report that email link. 4 | function getFB_App_User_Token() { 5 | var service = getService(); 6 | return service.getAccessToken() 7 | }; 8 | 9 | // Get the ID of the Ad - needed to reference the ad URL 10 | function get_ad_id_from_API(myAppTkn) { 11 | 12 | var id_string = get_account_id(); 13 | var getURL = "https://graph.facebook.com/v2.8/" + id_string + "/ads?access_token=" + myAppTkn; 14 | var response = UrlFetchApp.fetch(getURL); 15 | var response_obj = JSON.parse(response); 16 | var id_string = response_obj.data[0].id; 17 | return id_string; 18 | 19 | }; 20 | 21 | // May change with campaign - currently no need for an automated way to retreive it since it's a static ad I'm testing on 22 | function get_ad_id() { 23 | 24 | return "6054271782238"; 25 | 26 | }; 27 | 28 | // ID for the ad account 29 | function get_account_id { 30 | 31 | return "act_10150467159223448"; 32 | 33 | }; 34 | 35 | // Cycle through dates by using offset 36 | function fill_sheet() { 37 | 38 | var myAppTkn = getFB_App_User_Token(); 39 | var id_string = get_account_id(); 40 | for (var i = 10; i > 0; i--) { 41 | Logger.log('Looping to offset: ' + i); 42 | fill_date_conversions(i); 43 | Utilities.sleep(2000); 44 | }; 45 | 46 | }; 47 | 48 | function fill_date_conversions(offset) { 49 | if (typeof offset == 'undefined') {var offset = 1;} 50 | var myAppTkn = getFB_App_User_Token(); 51 | var id_string = get_account_id(); 52 | 53 | var current_sheet = SpreadsheetApp.getActiveSpreadsheet().getActiveSheet(); 54 | var sheet_columns = current_sheet.getLastColumn(); 55 | var column_headers = current_sheet.getRange(1, 1, 1, sheet_columns); 56 | var dict_keys = column_headers.getValues()[0]; 57 | 58 | // Get yesterdays date. 59 | var d = new Date(); 60 | Logger.log(d); 61 | d.setDate(d.getDate() - offset); 62 | Logger.log(d); 63 | var month = d.getUTCMonth() + 1; //months from 1-12 64 | var day = d.getUTCDate(); 65 | var year = d.getUTCFullYear(); 66 | var date = year + '-' + month + '-' + day; 67 | Logger.log("Running query for: " + date); 68 | 69 | var getURL = "https://graph.facebook.com/v2.8/" + id_string + "/insights?fields=actions,action_values,spend&time_range[since]=" + date + "&time_range[until]=" 70 | + date + "&limit=50&access_token=" + myAppTkn; 71 | 72 | var response = UrlFetchApp.fetch(getURL); 73 | var response_obj = JSON.parse(response); 74 | Logger.log(response_obj); 75 | if (typeof(response_obj.data[0]) == 'undefined') {current_sheet.appendRow([date, "No data."]);return}; 76 | var actions_list = response_obj.data[0].actions; 77 | if (!(actions_list)) {actions_list = {}}; 78 | // Add any column headers that might be missing 79 | check_column_headers(response_obj); 80 | 81 | // Get the column headers, then iterate through looking for matches. 82 | var new_row = [date]; 83 | var spend = response_obj.data[0].spend; 84 | // Check column headers, fill in if there's info. Skip if it's date or empty. 85 | for (var key in dict_keys) { 86 | 87 | var value = 0; 88 | Logger.log(dict_keys[key]); 89 | if (dict_keys[key] == 'date') {continue}; 90 | if (dict_keys[key] == '') {continue}; 91 | if (dict_keys[key] == 'All Signups') {continue}; 92 | if (dict_keys[key] == 'spend') {new_row.push(spend);continue}; 93 | for (action in actions_list) { 94 | if (dict_keys[key] == actions_list[action].action_type) { 95 | value = actions_list[action].value; 96 | }; 97 | }; 98 | new_row.push(value); 99 | }; 100 | Logger.log(new_row) 101 | current_sheet.appendRow(new_row); 102 | var lastrow = current_sheet.getLastRow(); 103 | var p_range = current_sheet.getRange(lastrow - 1, 16); 104 | p_range.copyTo(current_sheet.getRange(lastrow, 16), {contentsOnly:false}); 105 | 106 | return 107 | 108 | }; 109 | 110 | // Get insights data for a fixed date and return list of conversions 111 | function add_date_conversions() { 112 | var offset = 1; 113 | var myAppTkn = getFB_App_User_Token(); 114 | var id_string = get_ad_id(); 115 | 116 | var current_sheet = SpreadsheetApp.getActiveSpreadsheet().getActiveSheet(); 117 | var sheet_columns = current_sheet.getLastColumn(); 118 | var column_headers = current_sheet.getRange(1, 1, 1, sheet_columns); 119 | var dict_keys = column_headers.getValues()[0]; 120 | 121 | // Get yesterdays date. 122 | var d = new Date(); 123 | Logger.log(d); 124 | d.setDate(d.getDate() - offset); 125 | Logger.log(d); 126 | var month = d.getUTCMonth() + 1; //months from 1-12 127 | var day = d.getUTCDate(); 128 | var year = d.getUTCFullYear(); 129 | var date = year + '-' + month + '-' + day; 130 | Logger.log("Running query for: " + date); 131 | 132 | var getURL = "https://graph.facebook.com/v2.8/" + id_string + "/insights?fields=actions,action_values,spend&time_range[since]=" + date + "&time_range[until]=" 133 | + date + "&limit=50&access_token=" + myAppTkn; 134 | 135 | var response = UrlFetchApp.fetch(getURL); 136 | var response_obj = JSON.parse(response); 137 | Logger.log(response_obj); 138 | if (typeof(response_obj.data[0]) == 'undefined') {current_sheet.appendRow([date, "No data."]);return}; 139 | var actions_list = response_obj.data[0].actions; 140 | if (!(actions_list)) {actions_list = {}}; 141 | // Add any column headers that might be missing 142 | check_column_headers(response_obj); 143 | 144 | // Get the column headers, then iterate through looking for matches. 145 | var new_row = [date]; 146 | var spend = response_obj.data[0].spend; 147 | // Check column headers, fill in if there's info. Skip if it's date or empty. 148 | for (var key in dict_keys) { 149 | 150 | var value = 0; 151 | Logger.log(dict_keys[key]); 152 | if (dict_keys[key] == 'date') {continue}; 153 | if (dict_keys[key] == '') {continue}; 154 | if (dict_keys[key] == 'All Signups') {continue}; 155 | if (dict_keys[key] == 'spend') {new_row.push(spend);continue}; 156 | for (action in actions_list) { 157 | if (dict_keys[key] == actions_list[action].action_type) { 158 | value = actions_list[action].value; 159 | }; 160 | }; 161 | new_row.push(value); 162 | }; 163 | Logger.log(new_row) 164 | current_sheet.appendRow(new_row); 165 | var lastrow = current_sheet.getLastRow(); 166 | var p_range = current_sheet.getRange(lastrow - 1, 16); 167 | p_range.copyTo(current_sheet.getRange(lastrow, 16), {contentsOnly:false}); 168 | 169 | return 170 | 171 | }; 172 | 173 | // Checks column titles in Row 7 and adds any missing so that data isn't lost. 174 | // Columns form the array that will be used to sort data. 175 | function check_column_headers(json1) { 176 | 177 | var current_sheet = SpreadsheetApp.getActiveSpreadsheet().getSheets()[0]; 178 | var sheet_columns = current_sheet.getLastColumn(); 179 | var key_array = []; 180 | var dict_keys = current_sheet.getRange(1, 1, 1, sheet_columns); 181 | var dict_values = dict_keys.getValues()[0]; 182 | 183 | // If there's something in the row's cell, add it to an array. 184 | for (var row in dict_values) { 185 | if (dict_values[row]) {key_array.push(dict_values[row])}; 186 | }; 187 | 188 | // Convert dict_values from object to an array. 189 | var new_keys = key_array; 190 | 191 | for (var key in json1.data[0].actions) { 192 | if (key_array.indexOf(json1.data[0].actions[key].action_type) < 0) { 193 | new_keys.push(json1.data[0].actions[key].action_type); 194 | }; 195 | }; 196 | dict_keys = current_sheet.getRange(1, 1, 1, new_keys.length); 197 | dict_keys.setValues([new_keys]); 198 | return 199 | 200 | }; 201 | 202 | /* 203 | * Facebook oAuth 2.0 guide API requests 204 | * https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow 205 | * https://developers.facebook.com/apps/ 206 | * https://github.com/googlesamples/apps-script-oauth2 207 | * This script creates an Oauth2 object and makes sure it has access to the User scope using the App access. 208 | */ 209 | 210 | /* These are required to build the service. 211 | * Created on Facebook app dashboard. 212 | * var CLIENT_ID = '***'; 213 | */ var CLIENT_SECRET = '***'; 214 | 215 | /* 216 | * Authorizes and makes a request to the Facebook API. 217 | * Returns request if it is already authorised. 218 | */ 219 | 220 | // Test the service with a prebuilt API for my campaign 221 | function run(e) { 222 | var service = getService(); 223 | var html = ''; 224 | if (service.hasAccess()) { 225 | var url = 'https://graph.facebook.com/v2.8/act_10150467159223448/insights?fields=actions,action_values,spend'; 226 | var response = UrlFetchApp.fetch(url, { 227 | headers: { 228 | 'Authorization': 'Bearer ' + service.getAccessToken() 229 | } 230 | }); 231 | var result = JSON.parse(response.getContentText()); 232 | Logger.log(JSON.stringify(result, null, 2)); 233 | Logger.log(service.getAccessToken()) 234 | } else { 235 | var authorizationUrl = service.getAuthorizationUrl(); 236 | Logger.log('Open the following URL and re-run the script: %s', 237 | authorizationUrl); 238 | } 239 | } 240 | 241 | /** 242 | * Reset the authorization state, so that it can be re-tested. 243 | */ 244 | function reset() { 245 | var service = getService(); 246 | service.reset(); 247 | } 248 | 249 | /** 250 | * Configures the service. 251 | */ 252 | function getService() { 253 | return OAuth2.createService('Facebook') 254 | // Set the endpoint URLs. 255 | .setAuthorizationBaseUrl('https://www.facebook.com/dialog/oauth') 256 | .setTokenUrl('https://graph.facebook.com/v2.8/oauth/access_token') 257 | 258 | // Set the client ID and secret. 259 | .setClientId(CLIENT_ID) 260 | .setClientSecret(CLIENT_SECRET) 261 | 262 | // Set the name of the callback function that should be invoked to complete 263 | // the OAuth flow. 264 | .setCallbackFunction('authCallback') 265 | 266 | // Set the property store where authorized tokens should be persisted. 267 | .setPropertyStore(PropertiesService.getUserProperties()); 268 | } 269 | 270 | /** 271 | * Handles the OAuth callback. 272 | */ 273 | function authCallback(request) { 274 | var service = getService(); 275 | var authorized = service.handleCallback(request); 276 | if (authorized) { 277 | return HtmlService.createHtmlOutput('Success!'); 278 | } else { 279 | return HtmlService.createHtmlOutput('Denied'); 280 | } 281 | } 282 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SEO-scripts 2 | Some scripts I use for auditing, research and lead generation. 3 | 4 | ### How to use Compare.py: 5 | 6 | You'll need to install Python >= 3.4 7 | 8 | Get an API key from: http://www.alchemyapi.com/api/register.html 9 | 10 | Download compare.py and alchemyAPI.py. Edit compare.py to add your key and urls. 11 | 12 | Run from Python command line. 13 | 14 | TODO: Command line options to 1) add a key (writes to text file) 2) Specify URLs 3) Do a full single page comparison. 15 | 16 | ### How to use FB graph API.gs: 17 | 18 | You'll need your ad accounts ID: https://www.facebook.com/business/help/1492627900875762 19 | 20 | Plug it into the function to identify your account. Then run the script once and check the logs for the auth URL. Paste that URL into the bar, login, done. 21 | 22 | Set the function to 'add_date_conversion()' to a schedule. Run daily. 23 | 24 | TODO: Create a sidebar and interface to better automate some of the functions. Look at a better way of filling in missing rows or checking dates. Better error reporting so when someone receives an email it's not just 'it's not working'. 25 | -------------------------------------------------------------------------------- /alchemyAPI-Scripts/alchemyAPI.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2013 AlchemyAPI 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Modified 18/03/16 by Liam Martin 18 | 19 | from urllib.parse import urlencode 20 | import requests 21 | 22 | 23 | class AlchemyAPI: 24 | ENDPOINTS = {} 25 | ENDPOINTS['sentiment'] = {} 26 | ENDPOINTS['sentiment']['url'] = '/url/URLGetTextSentiment' 27 | ENDPOINTS['sentiment']['text'] = '/text/TextGetTextSentiment' 28 | ENDPOINTS['sentiment']['html'] = '/html/HTMLGetTextSentiment' 29 | ENDPOINTS['sentiment_targeted'] = {} 30 | ENDPOINTS['sentiment_targeted']['url'] = '/url/URLGetTargetedSentiment' 31 | ENDPOINTS['sentiment_targeted']['text'] = '/text/TextGetTargetedSentiment' 32 | ENDPOINTS['sentiment_targeted']['html'] = '/html/HTMLGetTargetedSentiment' 33 | ENDPOINTS['author'] = {} 34 | ENDPOINTS['author']['url'] = '/url/URLGetAuthor' 35 | ENDPOINTS['author']['html'] = '/html/HTMLGetAuthor' 36 | ENDPOINTS['keywords'] = {} 37 | ENDPOINTS['keywords']['url'] = '/url/URLGetRankedKeywords' 38 | ENDPOINTS['keywords']['text'] = '/text/TextGetRankedKeywords' 39 | ENDPOINTS['keywords']['html'] = '/html/HTMLGetRankedKeywords' 40 | ENDPOINTS['concepts'] = {} 41 | ENDPOINTS['concepts']['url'] = '/url/URLGetRankedConcepts' 42 | ENDPOINTS['concepts']['text'] = '/text/TextGetRankedConcepts' 43 | ENDPOINTS['concepts']['html'] = '/html/HTMLGetRankedConcepts' 44 | ENDPOINTS['entities'] = {} 45 | ENDPOINTS['entities']['url'] = '/url/URLGetRankedNamedEntities' 46 | ENDPOINTS['entities']['text'] = '/text/TextGetRankedNamedEntities' 47 | ENDPOINTS['entities']['html'] = '/html/HTMLGetRankedNamedEntities' 48 | ENDPOINTS['category'] = {} 49 | ENDPOINTS['category']['url'] = '/url/URLGetCategory' 50 | ENDPOINTS['category']['text'] = '/text/TextGetCategory' 51 | ENDPOINTS['category']['html'] = '/html/HTMLGetCategory' 52 | ENDPOINTS['relations'] = {} 53 | ENDPOINTS['relations']['url'] = '/url/URLGetRelations' 54 | ENDPOINTS['relations']['text'] = '/text/TextGetRelations' 55 | ENDPOINTS['relations']['html'] = '/html/HTMLGetRelations' 56 | ENDPOINTS['language'] = {} 57 | ENDPOINTS['language']['url'] = '/url/URLGetLanguage' 58 | ENDPOINTS['language']['text'] = '/text/TextGetLanguage' 59 | ENDPOINTS['language']['html'] = '/html/HTMLGetLanguage' 60 | ENDPOINTS['text'] = {} 61 | ENDPOINTS['text']['url'] = '/url/URLGetText' 62 | ENDPOINTS['text']['html'] = '/html/HTMLGetText' 63 | ENDPOINTS['text_raw'] = {} 64 | ENDPOINTS['text_raw']['url'] = '/url/URLGetRawText' 65 | ENDPOINTS['text_raw']['html'] = '/html/HTMLGetRawText' 66 | ENDPOINTS['title'] = {} 67 | ENDPOINTS['title']['url'] = '/url/URLGetTitle' 68 | ENDPOINTS['title']['html'] = '/html/HTMLGetTitle' 69 | ENDPOINTS['feeds'] = {} 70 | ENDPOINTS['feeds']['url'] = '/url/URLGetFeedLinks' 71 | ENDPOINTS['feeds']['html'] = '/html/HTMLGetFeedLinks' 72 | ENDPOINTS['microformats'] = {} 73 | ENDPOINTS['microformats']['url'] = '/url/URLGetMicroformatData' 74 | ENDPOINTS['microformats']['html'] = '/html/HTMLGetMicroformatData' 75 | ENDPOINTS['combined'] = {} 76 | ENDPOINTS['combined']['url'] = '/url/URLGetCombinedData' 77 | ENDPOINTS['combined']['text'] = '/text/TextGetCombinedData' 78 | ENDPOINTS['image'] = {} 79 | ENDPOINTS['image']['url'] = '/url/URLGetImage' 80 | ENDPOINTS['imagetagging'] = {} 81 | ENDPOINTS['imagetagging']['url'] = '/url/URLGetRankedImageKeywords' 82 | ENDPOINTS['imagetagging']['image'] = '/image/ImageGetRankedImageKeywords' 83 | ENDPOINTS['facetagging'] = {} 84 | ENDPOINTS['facetagging']['url'] = '/url/URLGetRankedImageFaceTags' 85 | ENDPOINTS['facetagging']['image'] = '/image/ImageGetRankedImageFaceTags' 86 | ENDPOINTS['taxonomy'] = {} 87 | ENDPOINTS['taxonomy']['url'] = '/url/URLGetRankedTaxonomy' 88 | ENDPOINTS['taxonomy']['html'] = '/html/HTMLGetRankedTaxonomy' 89 | ENDPOINTS['taxonomy']['text'] = '/text/TextGetRankedTaxonomy' 90 | 91 | # The base URL for all endpoints 92 | BASE_URL = 'http://access.alchemyapi.com/calls' 93 | 94 | s = requests.Session() 95 | 96 | def __init__(self, key): 97 | """ 98 | Initializes the SDK so it can send requests to AlchemyAPI for analysis. 99 | Key needs to be passed to the class on creation of the object. 100 | """ 101 | try: 102 | if len(key) ==40: 103 | self.apikey = key 104 | else: 105 | raise Exception('Invalid Key for AlchemyAPI') 106 | except ValueError: 107 | print('Valid Key required to access AlchemyAPI.') 108 | print('If you do not have an API Key from AlchemyAPI, please register for one at: http://www.alchemyapi.com/api/register.html') 109 | except Exception as e: 110 | print(e) 111 | 112 | def entities(self, flavor, data, options={}): 113 | """ 114 | Extracts the entities for text, a URL or HTML. 115 | For an overview, please refer to: http://www.alchemyapi.com/products/features/entity-extraction/ 116 | For the docs, please refer to: http://www.alchemyapi.com/api/entity-extraction/ 117 | 118 | INPUT: 119 | flavor -> which version of the call, i.e. text, url or html. 120 | data -> the data to analyze, either the text, the url or html code. 121 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 122 | 123 | Available Options: 124 | disambiguate -> disambiguate entities (i.e. Apple the company vs. apple the fruit). 0: disabled, 1: enabled (default) 125 | linkedData -> include linked data on disambiguated entities. 0: disabled, 1: enabled (default) 126 | coreference -> resolve coreferences (i.e. the pronouns that correspond to named entities). 0: disabled, 1: enabled (default) 127 | quotations -> extract quotations by entities. 0: disabled (default), 1: enabled. 128 | sentiment -> analyze sentiment for each entity. 0: disabled (default), 1: enabled. Requires 1 additional API transction if enabled. 129 | showSourceText -> 0: disabled (default), 1: enabled 130 | maxRetrieve -> the maximum number of entities to retrieve (default: 50) 131 | 132 | OUTPUT: 133 | The response, already converted from JSON to a Python object. 134 | """ 135 | 136 | # Make sure this request supports this flavor 137 | if flavor not in AlchemyAPI.ENDPOINTS['entities']: 138 | return {'status': 'ERROR', 'statusInfo': 'entity extraction for ' + flavor + ' not available'} 139 | 140 | # add the data to the options and analyze 141 | options[flavor] = data 142 | return self.__analyze(AlchemyAPI.ENDPOINTS['entities'][flavor], {}, options) 143 | 144 | def keywords(self, flavor, data, options={}): 145 | """ 146 | Extracts the keywords from text, a URL or HTML. 147 | For an overview, please refer to: http://www.alchemyapi.com/products/features/keyword-extraction/ 148 | For the docs, please refer to: http://www.alchemyapi.com/api/keyword-extraction/ 149 | 150 | INPUT: 151 | flavor -> which version of the call, i.e. text, url or html. 152 | data -> the data to analyze, either the text, the url or html code. 153 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 154 | 155 | Available Options: 156 | keywordExtractMode -> normal (default), strict 157 | sentiment -> analyze sentiment for each keyword. 0: disabled (default), 1: enabled. Requires 1 additional API transaction if enabled. 158 | showSourceText -> 0: disabled (default), 1: enabled. 159 | maxRetrieve -> the max number of keywords returned (default: 50) 160 | 161 | OUTPUT: 162 | The response, already converted from JSON to a Python object. 163 | """ 164 | 165 | # Make sure this request supports this flavor 166 | if flavor not in AlchemyAPI.ENDPOINTS['keywords']: 167 | return {'status': 'ERROR', 'statusInfo': 'keyword extraction for ' + flavor + ' not available'} 168 | 169 | # add the data to the options and analyze 170 | options[flavor] = data 171 | return self.__analyze(AlchemyAPI.ENDPOINTS['keywords'][flavor], {}, options) 172 | 173 | def concepts(self, flavor, data, options={}): 174 | """ 175 | Tags the concepts for text, a URL or HTML. 176 | For an overview, please refer to: http://www.alchemyapi.com/products/features/concept-tagging/ 177 | For the docs, please refer to: http://www.alchemyapi.com/api/concept-tagging/ 178 | 179 | Available Options: 180 | maxRetrieve -> the maximum number of concepts to retrieve (default: 8) 181 | linkedData -> include linked data, 0: disabled, 1: enabled (default) 182 | showSourceText -> 0:disabled (default), 1: enabled 183 | 184 | OUTPUT: 185 | The response, already converted from JSON to a Python object. 186 | """ 187 | 188 | # Make sure this request supports this flavor 189 | if flavor not in AlchemyAPI.ENDPOINTS['concepts']: 190 | return {'status': 'ERROR', 'statusInfo': 'concept tagging for ' + flavor + ' not available'} 191 | 192 | # add the data to the options and analyze 193 | options[flavor] = data 194 | return self.__analyze(AlchemyAPI.ENDPOINTS['concepts'][flavor], {}, options) 195 | 196 | def sentiment(self, flavor, data, options={}): 197 | """ 198 | Calculates the sentiment for text, a URL or HTML. 199 | For an overview, please refer to: http://www.alchemyapi.com/products/features/sentiment-analysis/ 200 | For the docs, please refer to: http://www.alchemyapi.com/api/sentiment-analysis/ 201 | 202 | INPUT: 203 | flavor -> which version of the call, i.e. text, url or html. 204 | data -> the data to analyze, either the text, the url or html code. 205 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 206 | 207 | Available Options: 208 | showSourceText -> 0: disabled (default), 1: enabled 209 | 210 | OUTPUT: 211 | The response, already converted from JSON to a Python object. 212 | """ 213 | 214 | # Make sure this request supports this flavor 215 | if flavor not in AlchemyAPI.ENDPOINTS['sentiment']: 216 | return {'status': 'ERROR', 'statusInfo': 'sentiment analysis for ' + flavor + ' not available'} 217 | 218 | # add the data to the options and analyze 219 | options[flavor] = data 220 | return self.__analyze(AlchemyAPI.ENDPOINTS['sentiment'][flavor], {}, options) 221 | 222 | def sentiment_targeted(self, flavor, data, target, options={}): 223 | """ 224 | Calculates the targeted sentiment for text, a URL or HTML. 225 | For an overview, please refer to: http://www.alchemyapi.com/products/features/sentiment-analysis/ 226 | For the docs, please refer to: http://www.alchemyapi.com/api/sentiment-analysis/ 227 | 228 | INPUT: 229 | flavor -> which version of the call, i.e. text, url or html. 230 | data -> the data to analyze, either the text, the url or html code. 231 | target -> the word or phrase to run sentiment analysis on. 232 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 233 | 234 | Available Options: 235 | showSourceText -> 0: disabled, 1: enabled 236 | 237 | OUTPUT: 238 | The response, already converted from JSON to a Python object. 239 | """ 240 | 241 | # Make sure the target is valid 242 | if target is None or target == '': 243 | return {'status': 'ERROR', 'statusInfo': 'targeted sentiment requires a non-null target'} 244 | 245 | # Make sure this request supports this flavor 246 | if flavor not in AlchemyAPI.ENDPOINTS['sentiment_targeted']: 247 | return {'status': 'ERROR', 'statusInfo': 'targeted sentiment analysis for ' + flavor + ' not available'} 248 | 249 | # add the URL encoded data and target to the options and analyze 250 | options[flavor] = data 251 | options['target'] = target 252 | return self.__analyze(AlchemyAPI.ENDPOINTS['sentiment_targeted'][flavor], {}, options) 253 | 254 | def text(self, flavor, data, options={}): 255 | """ 256 | Extracts the cleaned text (removes ads, navigation, etc.) for text, a URL or HTML. 257 | For an overview, please refer to: http://www.alchemyapi.com/products/features/text-extraction/ 258 | For the docs, please refer to: http://www.alchemyapi.com/api/text-extraction/ 259 | 260 | INPUT: 261 | flavor -> which version of the call, i.e. text, url or html. 262 | data -> the data to analyze, either the text, the url or html code. 263 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 264 | 265 | Available Options: 266 | useMetadata -> utilize meta description data, 0: disabled, 1: enabled (default) 267 | extractLinks -> include links, 0: disabled (default), 1: enabled. 268 | 269 | OUTPUT: 270 | The response, already converted from JSON to a Python object. 271 | """ 272 | 273 | # Make sure this request supports this flavor 274 | if flavor not in AlchemyAPI.ENDPOINTS['text']: 275 | return {'status': 'ERROR', 'statusInfo': 'clean text extraction for ' + flavor + ' not available'} 276 | 277 | # add the data to the options and analyze 278 | options[flavor] = data 279 | return self.__analyze(AlchemyAPI.ENDPOINTS['text'][flavor], options) 280 | 281 | def text_raw(self, flavor, data, options={}): 282 | """ 283 | Extracts the raw text (includes ads, navigation, etc.) for a URL or HTML. 284 | For an overview, please refer to: http://www.alchemyapi.com/products/features/text-extraction/ 285 | For the docs, please refer to: http://www.alchemyapi.com/api/text-extraction/ 286 | 287 | INPUT: 288 | flavor -> which version of the call, i.e. text, url or html. 289 | data -> the data to analyze, either the text, the url or html code. 290 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 291 | 292 | Available Options: 293 | none 294 | 295 | OUTPUT: 296 | The response, already converted from JSON to a Python object. 297 | """ 298 | 299 | # Make sure this request supports this flavor 300 | if flavor not in AlchemyAPI.ENDPOINTS['text_raw']: 301 | return {'status': 'ERROR', 'statusInfo': 'raw text extraction for ' + flavor + ' not available'} 302 | 303 | # add the data to the options and analyze 304 | options[flavor] = data 305 | return self.__analyze(AlchemyAPI.ENDPOINTS['text_raw'][flavor], {}, options) 306 | 307 | def author(self, flavor, data, options={}): 308 | """ 309 | Extracts the author from a URL or HTML. 310 | For an overview, please refer to: http://www.alchemyapi.com/products/features/author-extraction/ 311 | For the docs, please refer to: http://www.alchemyapi.com/api/author-extraction/ 312 | 313 | INPUT: 314 | flavor -> which version of the call, i.e. text, url or html. 315 | data -> the data to analyze, either the text, the url or html code. 316 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 317 | 318 | Availble Options: 319 | none 320 | 321 | OUTPUT: 322 | The response, already converted from JSON to a Python object. 323 | """ 324 | 325 | # Make sure this request supports this flavor 326 | if flavor not in AlchemyAPI.ENDPOINTS['author']: 327 | return {'status': 'ERROR', 'statusInfo': 'author extraction for ' + flavor + ' not available'} 328 | 329 | # add the data to the options and analyze 330 | options[flavor] = data 331 | return self.__analyze(AlchemyAPI.ENDPOINTS['author'][flavor], {}, options) 332 | 333 | def title(self, flavor, data, options={}): 334 | """ 335 | Extracts the title for a URL or HTML. 336 | For an overview, please refer to: http://www.alchemyapi.com/products/features/text-extraction/ 337 | For the docs, please refer to: http://www.alchemyapi.com/api/text-extraction/ 338 | 339 | INPUT: 340 | flavor -> which version of the call, i.e. text, url or html. 341 | data -> the data to analyze, either the text, the url or html code. 342 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 343 | 344 | Available Options: 345 | useMetadata -> utilize title info embedded in meta data, 0: disabled, 1: enabled (default) 346 | 347 | OUTPUT: 348 | The response, already converted from JSON to a Python object. 349 | """ 350 | 351 | # Make sure this request supports this flavor 352 | if flavor not in AlchemyAPI.ENDPOINTS['title']: 353 | return {'status': 'ERROR', 'statusInfo': 'title extraction for ' + flavor + ' not available'} 354 | 355 | # add the data to the options and analyze 356 | options[flavor] = data 357 | return self.__analyze(AlchemyAPI.ENDPOINTS['title'][flavor], {}, options) 358 | 359 | def relations(self, flavor, data, options={}): 360 | """ 361 | Extracts the relations for text, a URL or HTML. 362 | For an overview, please refer to: http://www.alchemyapi.com/products/features/relation-extraction/ 363 | For the docs, please refer to: http://www.alchemyapi.com/api/relation-extraction/ 364 | 365 | INPUT: 366 | flavor -> which version of the call, i.e. text, url or html. 367 | data -> the data to analyze, either the text, the url or html code. 368 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 369 | 370 | Available Options: 371 | sentiment -> 0: disabled (default), 1: enabled. Requires one additional API transaction if enabled. 372 | keywords -> extract keywords from the subject and object. 0: disabled (default), 1: enabled. Requires one additional API transaction if enabled. 373 | entities -> extract entities from the subject and object. 0: disabled (default), 1: enabled. Requires one additional API transaction if enabled. 374 | requireEntities -> only extract relations that have entities. 0: disabled (default), 1: enabled. 375 | sentimentExcludeEntities -> exclude full entity name in sentiment analysis. 0: disabled, 1: enabled (default) 376 | disambiguate -> disambiguate entities (i.e. Apple the company vs. apple the fruit). 0: disabled, 1: enabled (default) 377 | linkedData -> include linked data with disambiguated entities. 0: disabled, 1: enabled (default). 378 | coreference -> resolve entity coreferences. 0: disabled, 1: enabled (default) 379 | showSourceText -> 0: disabled (default), 1: enabled. 380 | maxRetrieve -> the maximum number of relations to extract (default: 50, max: 100) 381 | 382 | OUTPUT: 383 | The response, already converted from JSON to a Python object. 384 | """ 385 | 386 | # Make sure this request supports this flavor 387 | if flavor not in AlchemyAPI.ENDPOINTS['relations']: 388 | return {'status': 'ERROR', 'statusInfo': 'relation extraction for ' + flavor + ' not available'} 389 | 390 | # add the data to the options and analyze 391 | options[flavor] = data 392 | return self.__analyze(AlchemyAPI.ENDPOINTS['relations'][flavor], {}, options) 393 | 394 | def category(self, flavor, data, options={}): 395 | """ 396 | Categorizes the text for text, a URL or HTML. 397 | For an overview, please refer to: http://www.alchemyapi.com/products/features/text-categorization/ 398 | For the docs, please refer to: http://www.alchemyapi.com/api/text-categorization/ 399 | 400 | INPUT: 401 | flavor -> which version of the call, i.e. text, url or html. 402 | data -> the data to analyze, either the text, the url or html code. 403 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 404 | 405 | Available Options: 406 | showSourceText -> 0: disabled (default), 1: enabled 407 | 408 | OUTPUT: 409 | The response, already converted from JSON to a Python object. 410 | """ 411 | 412 | # Make sure this request supports this flavor 413 | if flavor not in AlchemyAPI.ENDPOINTS['category']: 414 | return {'status': 'ERROR', 'statusInfo': 'text categorization for ' + flavor + ' not available'} 415 | 416 | # add the data to the options and analyze 417 | options[flavor] = data 418 | 419 | return self.__analyze(AlchemyAPI.ENDPOINTS['category'][flavor], {}, options) 420 | 421 | def microformats(self, flavor, data, options={}): 422 | """ 423 | Parses the microformats for a URL or HTML. 424 | For an overview, please refer to: http://www.alchemyapi.com/products/features/microformats-parsing/ 425 | For the docs, please refer to: http://www.alchemyapi.com/api/microformats-parsing/ 426 | 427 | INPUT: 428 | flavor -> which version of the call, i.e. url or html. 429 | data -> the data to analyze, either the the url or html code. 430 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 431 | 432 | Available Options: 433 | none 434 | 435 | OUTPUT: 436 | The response, already converted from JSON to a Python object. 437 | """ 438 | 439 | # Make sure this request supports this flavor 440 | if flavor not in AlchemyAPI.ENDPOINTS['microformats']: 441 | return {'status': 'ERROR', 'statusInfo': 'microformat extraction for ' + flavor + ' not available'} 442 | 443 | # add the data to the options and analyze 444 | options[flavor] = data 445 | return self.__analyze(AlchemyAPI.ENDPOINTS['microformats'][flavor], {}, options) 446 | 447 | def taxonomy(self, flavor, data, options={}): 448 | """ 449 | Taxonomy classification operations. 450 | 451 | INPUT: 452 | flavor -> which version of the call, i.e. url or html. 453 | data -> the data to analyze, either the the url or html code. 454 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 455 | 456 | 457 | Available Options: 458 | showSourceText -> 459 | include the original 'source text' the taxonomy categories were extracted from within the API response 460 | Possible values: 461 | 1 - enabled 462 | 0 - disabled (default) 463 | 464 | sourceText -> 465 | where to obtain the text that will be processed by this API call. 466 | 467 | AlchemyAPI supports multiple modes of text extraction: 468 | web page cleaning (removes ads, navigation links, etc.), raw text extraction 469 | (processes all web page text, including ads / nav links), visual constraint queries, and XPath queries. 470 | 471 | Possible values: 472 | cleaned_or_raw : cleaning enabled, fallback to raw when cleaning produces no text (default) 473 | cleaned : operate on 'cleaned' web page text (web page cleaning enabled) 474 | raw : operate on raw web page text (web page cleaning disabled) 475 | cquery : operate on the results of a visual constraints query 476 | Note: The 'cquery' http argument must also be set to a valid visual constraints query. 477 | xpath : operate on the results of an XPath query 478 | Note: The 'xpath' http argument must also be set to a valid XPath query. 479 | 480 | cquery -> 481 | a visual constraints query to apply to the web page. 482 | 483 | xpath -> 484 | an XPath query to apply to the web page. 485 | 486 | baseUrl -> 487 | rel-tag output base http url (must be uri-argument encoded) 488 | 489 | OUTPUT: 490 | The response, already converted from JSON to a Python object. 491 | 492 | """ 493 | if flavor not in AlchemyAPI.ENDPOINTS['taxonomy']: 494 | return {'status': 'ERROR', 'statusInfo': 'taxonomy for ' + flavor + ' not available'} 495 | options[flavor] = data 496 | return self.__analyze(AlchemyAPI.ENDPOINTS['taxonomy'][flavor], {}, options) 497 | 498 | def combined(self, flavor, data, options={}): 499 | """ 500 | Combined call for page-image, entity, keyword, title, author, taxonomy, concept. 501 | 502 | INPUT: 503 | flavor -> which version of the call, i.e. url or html. 504 | data -> the data to analyze, either the the url or html code. 505 | options -> various parameters that can be used to adjust how the API works, see below for more info on the available options. 506 | 507 | Available Options: 508 | extract -> 509 | Possible values: page-image, entity, keyword, title, author, taxonomy, concept 510 | default : entity, keyword, taxonomy, concept 511 | 512 | disambiguate -> 513 | disambiguate detected entities 514 | Possible values: 515 | 1 : enabled (default) 516 | 0 : disabled 517 | 518 | linkedData -> 519 | include Linked Data content links with disambiguated entities 520 | Possible values : 521 | 1 : enabled (default) 522 | 0 : disabled 523 | 524 | coreference -> 525 | resolve he/she/etc coreferences into detected entities 526 | Possible values: 527 | 1 : enabled (default) 528 | 0 : disabled 529 | 530 | quotations -> 531 | enable quotations extraction 532 | Possible values: 533 | 1 : enabled 534 | 0 : disabled (default) 535 | 536 | sentiment -> 537 | enable entity-level sentiment analysis 538 | Possible values: 539 | 1 : enabled 540 | 0 : disabled (default) 541 | 542 | showSourceText -> 543 | include the original 'source text' the entities were extracted from within the API response 544 | Possible values: 545 | 1 : enabled 546 | 0 : disabled (default) 547 | 548 | maxRetrieve -> 549 | maximum number of named entities to extract 550 | default : 50 551 | 552 | baseUrl -> 553 | rel-tag output base http url 554 | 555 | 556 | OUTPUT: 557 | The response, already converted from JSON to a Python object. 558 | """ 559 | if flavor not in AlchemyAPI.ENDPOINTS['combined']: 560 | return {'status': 'ERROR', 'statusInfo': 'combined for ' + flavor + ' not available'} 561 | options[flavor] = data 562 | return self.__analyze(AlchemyAPI.ENDPOINTS['combined'][flavor], {}, options) 563 | 564 | def __analyze(self, endpoint, params, post_data=bytearray()): 565 | """ 566 | HTTP Request wrapper that is called by the endpoint functions. This function is not intended to be called through an external interface. 567 | It makes the call, then converts the returned JSON string into a Python object. 568 | 569 | INPUT: 570 | url -> the full URI encoded url 571 | 572 | OUTPUT: 573 | The response, already converted from JSON to a Python object. 574 | """ 575 | 576 | # Add the API Key and set the output mode to JSON 577 | params['apikey'] = self.apikey 578 | params['outputMode'] = 'json' 579 | # Insert the base url 580 | 581 | post_url = "" 582 | try: 583 | post_url = AlchemyAPI.BASE_URL + endpoint + \ 584 | '?' + urlencode(params).encode('utf-8') 585 | except TypeError: 586 | post_url = AlchemyAPI.BASE_URL + endpoint + '?' + urlencode(params) 587 | 588 | results = "" 589 | try: 590 | results = self.s.post(url=post_url, data=post_data) 591 | except Exception as e: 592 | print(e) 593 | return {'status': 'ERROR', 'statusInfo': 'network-error'} 594 | try: 595 | return results.json() 596 | except Exception as e: 597 | if results != "": 598 | print(results) 599 | print(e) 600 | return {'status': 'ERROR', 'statusInfo': 'parse-error'} 601 | 602 | 603 | -------------------------------------------------------------------------------- /alchemyAPI-Scripts/compare.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from alchemyAPI import AlchemyAPI 3 | 4 | # Create the AlchemyAPI Object 5 | alchemyapi = AlchemyAPI('YOUR_KEY_HERE') 6 | 7 | # Add two urls to compare 8 | url1 = 'https://moz.com/beginners-guide-to-seo/keyword-research' 9 | url2 = 'https://www.semrush.com/features/organic-research/' 10 | 11 | print('Processing urls:\n' + url2 + "\n" + url1) 12 | print('') 13 | 14 | # Get a keyword report 15 | response1 = alchemyapi.keywords('url', url1, {'sentiment': 1, 'showSourceText': 1, 'sourceText' : 'raw'}) 16 | response2 = alchemyapi.keywords('url', url2, {'sentiment': 1, 'showSourceText': 1, 'sourceText' : 'raw'}) 17 | 18 | firstlist = [] 19 | secondlist = [] 20 | 21 | if response1['status'] == 'OK': 22 | for keyword in response1['keywords']: 23 | firstlist.append(keyword['text']) 24 | else: 25 | print('Error in keyword extaction call: ', response1['statusInfo']) 26 | 27 | if response2['status'] == 'OK': 28 | for keyword in response2['keywords']: 29 | secondlist.append(keyword['text']) 30 | else: 31 | print('Error in keyword extaction call: ', response1['statusInfo']) 32 | 33 | # Print the report 34 | final_list = zip(firstlist, secondlist) 35 | 36 | print("{:<30} {:<30}".format(url1, url2)) 37 | counter = 0 38 | for item in final_list: 39 | print("{}: {:<30} {}: {:<30}".format(counter, ' '.join(item[0].split()), counter, ' '.join(item[1].split()))) 40 | counter += 1 41 | 42 | # Get a Concepts report 43 | response1 = alchemyapi.concepts('url', url1) 44 | response2 = alchemyapi.concepts('url', url2) 45 | 46 | firstlist = [] 47 | secondlist = [] 48 | 49 | if response1['status'] == 'OK': 50 | for concept in response1['concepts']: 51 | firstlist.append((concept['text'], concept['relevance'])) 52 | else: 53 | print('Error in keyword extaction call: ', response1['statusInfo']) 54 | 55 | if response2['status'] == 'OK': 56 | for concept in response2['concepts']: 57 | secondlist.append((concept['text'], concept['relevance'])) 58 | else: 59 | print('Error in keyword extaction call: ', response1['statusInfo']) 60 | 61 | final_list = zip(firstlist, secondlist) 62 | 63 | # Print Concepts report 64 | 65 | print("{:<30} {:<30}".format(url1, url2)) 66 | counter1 = 0 67 | for item in final_list: 68 | print("{}: {:<30} {}: {:<30}".format(counter1, str(item[0]), counter1, str(item[1]))) 69 | counter1 += 1 70 | --------------------------------------------------------------------------------