├── githubactions ├── scripts │ └── publish_sitemap │ │ ├── publish.sh │ │ └── publish.js ├── .github │ └── workflows │ │ └── publish_sitemap.yml └── README.md ├── package.json ├── .gitignore ├── index.js ├── LICENSE └── README.md /githubactions/scripts/publish_sitemap/publish.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # -- Standard Header -- 4 | echoerr() { printf "%s\n" "$*" >&2; } 5 | export FUSEBIT_DEBUG= 6 | 7 | node scripts/publish_sitemap/publish.js -------------------------------------------------------------------------------- /githubactions/.github/workflows/publish_sitemap.yml: -------------------------------------------------------------------------------- 1 | on: [push] 2 | jobs: 3 | - name: Publish Sitemap 4 | env: 5 | GOOGLE_SEARCH_CONSOLE_JSON_KEY: ${{ secrets.GOOGLE_SEARCH_CONSOLE_JSON_KEY }} 6 | run: ./scripts/publish_sitemap/publish.sh -------------------------------------------------------------------------------- /githubactions/scripts/publish_sitemap/publish.js: -------------------------------------------------------------------------------- 1 | const { google } = require('googleapis'); 2 | const { JWT } = require('google-auth-library'); 3 | const searchconsole = google.searchconsole('v1'); 4 | const keys = JSON.parse(Buffer.from(process.env.GOOGLE_SEARCH_CONSOLE_JSON_KEY, 'base64').toString('utf-8')); 5 | 6 | const client = new JWT({ 7 | email: keys.client_email, 8 | key: keys.private_key, 9 | scopes: ['https://www.googleapis.com/auth/webmasters', 'https://www.googleapis.com/auth/webmasters.readonly'], 10 | }); 11 | 12 | google.options({ auth: client }); 13 | 14 | (async () => { 15 | try { 16 | await searchconsole.sitemaps.submit({ 17 | feedpath: 'https://fusebit.io/sitemap.xml', 18 | siteUrl: 'https://fusebit.io/', 19 | }); 20 | } catch (e) { 21 | console.log(e); 22 | } 23 | })(); 24 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "google-searchconsole-nodejs", 3 | "version": "1.0.0", 4 | "description": "Google Search Console API using Node.js", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/fusebit/google-searchconsole-nodejs.git" 12 | }, 13 | "keywords": [ 14 | "Google", 15 | "Search", 16 | "Console", 17 | "API", 18 | "Node.js" 19 | ], 20 | "author": "Shehzad Akbar", 21 | "license": "Creative Commons", 22 | "bugs": { 23 | "url": "https://github.com/fusebit/google-searchconsole-nodejs/issues" 24 | }, 25 | "homepage": "https://github.com/fusebit/google-searchconsole-nodejs#readme", 26 | "dependencies": { 27 | "google-auth-library": "^7.14.0", 28 | "googleapis": "^96.0.0" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Diagnostic reports (https://nodejs.org/api/report.html) 10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 11 | 12 | # Runtime data 13 | pids 14 | *.pid 15 | *.seed 16 | *.pid.lock 17 | 18 | # Directory for instrumented libs generated by jscoverage/JSCover 19 | lib-cov 20 | 21 | # Coverage directory used by tools like istanbul 22 | coverage 23 | *.lcov 24 | 25 | # nyc test coverage 26 | .nyc_output 27 | 28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 29 | .grunt 30 | 31 | # Bower dependency directory (https://bower.io/) 32 | bower_components 33 | 34 | # node-waf configuration 35 | .lock-wscript 36 | 37 | # Compiled binary addons (https://nodejs.org/api/addons.html) 38 | build/Release 39 | 40 | # Dependency directories 41 | node_modules/ 42 | jspm_packages/ 43 | 44 | # TypeScript v1 declaration files 45 | typings/ 46 | 47 | # TypeScript cache 48 | *.tsbuildinfo 49 | 50 | # Optional npm cache directory 51 | .npm 52 | 53 | # Optional eslint cache 54 | .eslintcache 55 | 56 | # Microbundle cache 57 | .rpt2_cache/ 58 | .rts2_cache_cjs/ 59 | .rts2_cache_es/ 60 | .rts2_cache_umd/ 61 | 62 | # Optional REPL history 63 | .node_repl_history 64 | 65 | # Output of 'npm pack' 66 | *.tgz 67 | 68 | # Yarn Integrity file 69 | .yarn-integrity 70 | 71 | # dotenv environment variables file 72 | .env 73 | .env.test 74 | 75 | # parcel-bundler cache (https://parceljs.org/) 76 | .cache 77 | 78 | # Next.js build output 79 | .next 80 | 81 | # Nuxt.js build / generate output 82 | .nuxt 83 | dist 84 | 85 | # Gatsby files 86 | .cache/ 87 | # Comment in the public line in if your project uses Gatsby and *not* Next.js 88 | # https://nextjs.org/blog/next-9-1#public-directory-support 89 | # public 90 | 91 | # vuepress build output 92 | .vuepress/dist 93 | 94 | # Serverless directories 95 | .serverless/ 96 | 97 | # FuseBox cache 98 | .fusebox/ 99 | 100 | # DynamoDB Local files 101 | .dynamodb/ 102 | 103 | # TernJS port file 104 | .tern-port 105 | 106 | # Mac File System 107 | .DS_Store 108 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | // - Enable the API at: 2 | // https://console.developers.google.com/apis/api/searchconsole.googleapis.com 3 | 4 | // - Create a Service Account in the Google Developers Console at: 5 | // https://console.cloud.google.com/iam-admin/serviceaccounts 6 | // Save the keys in the local directory 7 | 8 | // - Add the email address of this newly created Service Account as an owner in the Search Console 9 | // https://search.google.com/search-console/users 10 | 11 | const { google } = require("googleapis"); 12 | const { JWT } = require("google-auth-library"); 13 | const searchconsole = google.searchconsole("v1"); 14 | 15 | async function main() { 16 | 17 | // Authentication 18 | const keys = require("PATH_TO_KEYS.JSON_FILE"); 19 | const client = new JWT({ 20 | email: keys.client_email, 21 | key: keys.private_key, 22 | scopes: [ 23 | "https://www.googleapis.com/auth/webmasters", 24 | "https://www.googleapis.com/auth/webmasters.readonly", 25 | ], 26 | }); 27 | google.options({ auth: client }); 28 | 29 | // Check which sites belong to this authorized account 30 | const resSiteList = await searchconsole.sites.list({}); 31 | console.log(resSiteList.data); 32 | 33 | // Submit a URL for Inspection 34 | const res = await searchconsole.sitemaps.submit({ 35 | feedpath: 'https://fusebit.io/sitemap.xml', 36 | siteUrl: 'https://fusebit.io/', 37 | }); 38 | console.log(res.data); 39 | 40 | 41 | // Inspect if a URL has been indexed 42 | const resInspectURL = await searchconsole.urlInspection.index.inspect({ 43 | requestBody: { 44 | inspectionUrl: "https://fusebit.io/blog/google-search-console-nodejs/", 45 | languageCode: "en-US", 46 | siteUrl: "https://fusebit.io/", 47 | }, 48 | }); 49 | console.log(resInspectURL.data); 50 | 51 | 52 | // Search Analytics on your Website 53 | const resSearchAnalytics = await searchconsole.searchanalytics.query({ 54 | siteUrl: 'https://fusebit.io/', 55 | 56 | // Detailed Query Parameters: https://developers.google.com/webmaster-tools/v1/searchanalytics/query 57 | requestBody: { 58 | "endDate": "2022-03-08", 59 | "startDate": "2022-03-01", 60 | "dimensionFilterGroupsfilters": ["query contains node"], 61 | "dimensions": ["query"], 62 | "rowLimit": 10 63 | }, 64 | }); 65 | 66 | console.log(resSearchAnalytics.data.rows); 67 | 68 | } 69 | 70 | main().catch((e) => { 71 | console.error(e); 72 | throw e; 73 | }); 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Legal Code 2 | 3 | CC0 1.0 Universal 4 | 5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE 6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN 7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS 8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES 9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS 10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM 11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED 12 | HEREUNDER. 13 | 14 | Statement of Purpose 15 | 16 | The laws of most jurisdictions throughout the world automatically confer 17 | exclusive Copyright and Related Rights (defined below) upon the creator 18 | and subsequent owner(s) (each and all, an "owner") of an original work of 19 | authorship and/or a database (each, a "Work"). 20 | 21 | Certain owners wish to permanently relinquish those rights to a Work for 22 | the purpose of contributing to a commons of creative, cultural and 23 | scientific works ("Commons") that the public can reliably and without fear 24 | of later claims of infringement build upon, modify, incorporate in other 25 | works, reuse and redistribute as freely as possible in any form whatsoever 26 | and for any purposes, including without limitation commercial purposes. 27 | These owners may contribute to the Commons to promote the ideal of a free 28 | culture and the further production of creative, cultural and scientific 29 | works, or to gain reputation or greater distribution for their Work in 30 | part through the use and efforts of others. 31 | 32 | For these and/or other purposes and motivations, and without any 33 | expectation of additional consideration or compensation, the person 34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she 35 | is an owner of Copyright and Related Rights in the Work, voluntarily 36 | elects to apply CC0 to the Work and publicly distribute the Work under its 37 | terms, with knowledge of his or her Copyright and Related Rights in the 38 | Work and the meaning and intended legal effect of CC0 on those rights. 39 | 40 | 1. Copyright and Related Rights. A Work made available under CC0 may be 41 | protected by copyright and related or neighboring rights ("Copyright and 42 | Related Rights"). Copyright and Related Rights include, but are not 43 | limited to, the following: 44 | 45 | i. the right to reproduce, adapt, distribute, perform, display, 46 | communicate, and translate a Work; 47 | ii. moral rights retained by the original author(s) and/or performer(s); 48 | iii. publicity and privacy rights pertaining to a person's image or 49 | likeness depicted in a Work; 50 | iv. rights protecting against unfair competition in regards to a Work, 51 | subject to the limitations in paragraph 4(a), below; 52 | v. rights protecting the extraction, dissemination, use and reuse of data 53 | in a Work; 54 | vi. database rights (such as those arising under Directive 96/9/EC of the 55 | European Parliament and of the Council of 11 March 1996 on the legal 56 | protection of databases, and under any national implementation 57 | thereof, including any amended or successor version of such 58 | directive); and 59 | vii. other similar, equivalent or corresponding rights throughout the 60 | world based on applicable law or treaty, and any national 61 | implementations thereof. 62 | 63 | 2. Waiver. To the greatest extent permitted by, but not in contravention 64 | of, applicable law, Affirmer hereby overtly, fully, permanently, 65 | irrevocably and unconditionally waives, abandons, and surrenders all of 66 | Affirmer's Copyright and Related Rights and associated claims and causes 67 | of action, whether now known or unknown (including existing as well as 68 | future claims and causes of action), in the Work (i) in all territories 69 | worldwide, (ii) for the maximum duration provided by applicable law or 70 | treaty (including future time extensions), (iii) in any current or future 71 | medium and for any number of copies, and (iv) for any purpose whatsoever, 72 | including without limitation commercial, advertising or promotional 73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each 74 | member of the public at large and to the detriment of Affirmer's heirs and 75 | successors, fully intending that such Waiver shall not be subject to 76 | revocation, rescission, cancellation, termination, or any other legal or 77 | equitable action to disrupt the quiet enjoyment of the Work by the public 78 | as contemplated by Affirmer's express Statement of Purpose. 79 | 80 | 3. Public License Fallback. Should any part of the Waiver for any reason 81 | be judged legally invalid or ineffective under applicable law, then the 82 | Waiver shall be preserved to the maximum extent permitted taking into 83 | account Affirmer's express Statement of Purpose. In addition, to the 84 | extent the Waiver is so judged Affirmer hereby grants to each affected 85 | person a royalty-free, non transferable, non sublicensable, non exclusive, 86 | irrevocable and unconditional license to exercise Affirmer's Copyright and 87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the 88 | maximum duration provided by applicable law or treaty (including future 89 | time extensions), (iii) in any current or future medium and for any number 90 | of copies, and (iv) for any purpose whatsoever, including without 91 | limitation commercial, advertising or promotional purposes (the 92 | "License"). The License shall be deemed effective as of the date CC0 was 93 | applied by Affirmer to the Work. Should any part of the License for any 94 | reason be judged legally invalid or ineffective under applicable law, such 95 | partial invalidity or ineffectiveness shall not invalidate the remainder 96 | of the License, and in such case Affirmer hereby affirms that he or she 97 | will not (i) exercise any of his or her remaining Copyright and Related 98 | Rights in the Work or (ii) assert any associated claims and causes of 99 | action with respect to the Work, in either case contrary to Affirmer's 100 | express Statement of Purpose. 101 | 102 | 4. Limitations and Disclaimers. 103 | 104 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 105 | surrendered, licensed or otherwise affected by this document. 106 | b. Affirmer offers the Work as-is and makes no representations or 107 | warranties of any kind concerning the Work, express, implied, 108 | statutory or otherwise, including without limitation warranties of 109 | title, merchantability, fitness for a particular purpose, non 110 | infringement, or the absence of latent or other defects, accuracy, or 111 | the present or absence of errors, whether or not discoverable, all to 112 | the greatest extent permissible under applicable law. 113 | c. Affirmer disclaims responsibility for clearing rights of other persons 114 | that may apply to the Work or any use thereof, including without 115 | limitation any person's Copyright and Related Rights in the Work. 116 | Further, Affirmer disclaims responsibility for obtaining any necessary 117 | consents, permissions or other rights required for any use of the 118 | Work. 119 | d. Affirmer understands and acknowledges that Creative Commons is not a 120 | party to this document and has no duty or obligation with respect to 121 | this CC0 or use of the Work. 122 | -------------------------------------------------------------------------------- /githubactions/README.md: -------------------------------------------------------------------------------- 1 |
6 | 7 | 8 | In an earlier [blog post](https://fusebit.io/blog/google-search-console-nodejs/), I walked you through how to get started using Node.js and Google Search Console’s API. By now, you should be familiar with how to connect your account using the service account credentials and also run basic queries to get information on how your website is performing. 9 | 10 | The real value, however, comes from integrating this capability into your existing CI and publishing workflow. This integration will leverage the efficiencies gained from automation and focus on what matters most, growing your business presence online. 11 | 12 | In this post, I’ll walk you through a simple use case - integrate your publishing workflow directly to the Google Search Console with [Github Actions](https://docs.github.com/en/actions) and automatically submit an updated sitemap for indexing. As a result, without having to do anything, your new blog post or product page will be immediately searchable on Google. 13 | 14 | It’s super quick and you will only need to do three things: 15 | 16 | 1. Add the Google Search Console API Code to your Github repository 17 | 2. Add the Github Actions Configuration File to your Repo 18 | 3. Add your credentials as a Secret in Github 19 | 20 | Let’s get started. 21 | 22 | As a reminder, we also have a [github repo](https://github.com/fusebit/google-searchconsole-nodejs/tree/main/githubactions) containing all the files that you need to get this up and running. 23 | 24 | ## 1. Add the Google Search Console API Code to your Github repository 25 | 26 | Essentially, we need to tell Github exactly which files to run when publishing your website. To do this create a top level directory called `scripts` with the following structure: 27 | 28 |  29 | 30 | In the `publish.js` file, we’re essentially setting up our connection to Google’s Search Console and submitting an updated sitemap directly through the API. 31 | 32 | Note that in the example below, we’ve specified `GOOGLE_SEARCH_CONSOLE_JSON_KEY` as an environment variable and will walk you through how to get this stored into your Github below. 33 | 34 | #### **`File: publish.js`** 35 | ```javascript 36 | 37 | const { google } = require('googleapis'); 38 | const { JWT } = require('google-auth-library'); 39 | const searchconsole = google.searchconsole('v1'); 40 | 41 | const keys = JSON.parse(Buffer.from(process.env.GOOGLE_SEARCH_CONSOLE_JSON_KEY, 'base64').toString('utf-8')); 42 | const client = new JWT({ 43 | email: keys.client_email, 44 | key: keys.private_key, 45 | scopes: ['https://www.googleapis.com/auth/webmasters', 'https://www.googleapis.com/auth/webmasters.readonly'], 46 | }); 47 | 48 | google.options({ auth: client }); 49 | 50 | (async () => { 51 | try { 52 | await searchconsole.sitemaps.submit({ 53 | // UPDATE THIS TO YOUR OWN SITEMAP 54 | feedpath: 'https://fusebit.io/sitemap.xml', 55 | siteUrl: 'https://fusebit.io/', 56 | }); 57 | 58 | } catch (e) { 59 | console.log(e); 60 | } 61 | 62 | })(); 63 | ``` 64 | 65 | In the `publish.sh` file, we’re invoking a bash command to run the above file. 66 | 67 | Note that you may have to update the file and make it executable, you can do this easily by running the following command in your terminal: `chmod +x scripts/publish_sitemap/publish.sh`. 68 | 69 | #### **`File: publish.sh`** 70 | ```bash 71 | #!/usr/bin/env bash 72 | 73 | # -- Standard Header -- 74 | echoerr() { printf "%s\n" "$*" >&2; } 75 | export FUSEBIT_DEBUG= 76 | 77 | node scripts/publish_sitemap/publish.js 78 | ``` 79 | 80 | Finally, make sure to install your dependencies and you can do this by installing the two google npm package: 81 | 82 | `npm install googleapis` 83 | 84 | `npm install google-auth-library` 85 | 86 | ## 2. Add the Github Actions Configuration File to your Repo 87 | 88 | Now, once you’ve uploaded the Search Console specific pieces, you want to upload the [Github Actions Workflow](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions#workflows) file. 89 | 90 | For context, A workflow is a configurable automated process that will run one or more jobs. Workflows are defined by a YAML file checked into your repository and will run when triggered by an event in your repository, or they can be triggered manually, or at a defined schedule. 91 | 92 | To do this create a top level directory called `.github` with the following structure: 93 | 94 |  95 | 96 | In this folder, add your workflow file: 97 | 98 | #### **`File: publish_sitemap.yml`** 99 | ```yaml 100 | on: [push] 101 | jobs: 102 | 103 | - name: Publish Sitemap 104 | env: 105 | GOOGLE_SEARCH_CONSOLE_JSON_KEY: ${{ secrets.GOOGLE_SEARCH_CONSOLE_JSON_KEY }} 106 | run: ./scripts/publish_sitemap/publish.sh 107 | ``` 108 | 109 | Whenever you push to a branch with this file in it, Github will automatically execute the script using the provided environment variables. 110 | 111 | ## 3. Add your credentials as a Secret in Github 112 | 113 | The last step is to add the `GOOGLE_SEARCH_CONSOLE_JSON_KEY` environment variable as secret in your Github repo. Otherwise, Google won’t be able to authenticate your request and return an error. 114 | 115 | To do this, in your terminal window, navigate to the directory where your `keys.json` file is stored, this is the file that contains your Client ID, Private key etc. 116 | 117 | Generate an encoded version of this file by running the following command: 118 | 119 | `cat keys.json | base64 | pbcopy` 120 | 121 | This will copy the file encoding to your clipboard and you will paste it in the next step. 122 | 123 | Next, for your Github Organization navigate to: **Settings > Security > Secrets > Actions **and click on **New Organization Secret** 124 | 125 |  126 | 127 | On this screen, set the name to `GOOGLE_SEARCH_CONSOLE_JSON_KEY`, paste in the encoded file from your clipboard and hit save. 128 | 129 |  130 | 131 | That’s it! Now anytime you publish an update to your website, Github will automatically trigger the Workflow Action that will submit an updated sitemap to Google using your publish scripts. You can check on the Google Search Console to verify the results! 132 | 133 |  134 | 135 | ## Conclusion 136 | 137 | Hopefully that was helpful in getting you set up with automations for your website and to speed up your content’s discovery on organic search. Remember, you can download the codebase on [Fusebit's GitHub](https://github.com/fusebit/google-searchconsole-nodejs/tree/main/githubactions) and easily copy/paste the files you need directly into your codebase. 138 | 139 | Don’t hesitate to reach out if you have any questions, and I’ll be happy to help you get this integration working. You can also reach out to me directly through our [community Slack](https://join.slack.com/t/fusebitio/shared_invite/zt-qe7uidtf-4cs6OgaomFVgAF_fQZubfg), on [Twitter](https://twitter.com/shehzadakbar) and at [shehzad@fusebit.io](mailto:shehzad@fusebit.io). -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## Intro 8 | 9 | Google Search Console is a free service offered by Google that helps you monitor, maintain, and troubleshoot your site's presence in Google Search results. You don't have to use the Search Console to be included in Google Search results, but Search Console helps you understand and improve how Google sees your site. 10 | 11 | For business stakeholders, responding to real-time information on their online properties can have a real impact on their bottom line. Google's Search Console API is an extremely useful tool for webmasters who rely on traffic data and SEO optimizations as a critical part of informing their online strategy. 12 | 13 | The Search Console API offers four key capabilities: 14 | 15 | - [Search Analytics](https://developers.google.com/webmaster-tools/v1/api_reference_index#Search_analytics) - Query traffic data for your site. 16 | - [Sitemaps](https://developers.google.com/webmaster-tools/v1/api_reference_index#Sitemaps) - List all your sitemaps, request information about a specific sitemap, and submit a sitemap to Google. 17 | - [Sites](https://developers.google.com/webmaster-tools/v1/api_reference_index#Sites) - List/add/remove properties from your Search Console account. 18 | - [URL Inspection](https://developers.google.com/webmaster-tools/v1/api_reference_index#Inspection_tools) - Inspect the status of a page in the Google index (equivalent to URL Inspection in Search Console) 19 | 20 | In this post, we'll use [Google's Node.js SDK](https://github.com/googleapis/google-api-nodejs-client) to connect to the Search Console API. With the use of code examples, we'll walk through a very common use case: Submitting an updated Sitemap to Google for indexing and monitoring the impact on your site's online presence. 21 | 22 | ## Configure Access Credentials 23 | 24 | Before you can get started with code, you will need to make sure that you enable the Search Console API for your website, or property, and set up a Service Account with the right access credentials to send requests using the Google API. 25 | 26 | 1. Enable the API in the [Google Cloud Console](https://console.developers.google.com/apis/api/searchconsole.googleapis.com) 27 | 2. Create a Service Account in the [Google Developers Console](https://console.cloud.google.com/iam-admin/serviceaccounts) 28 | 3. For this newly created Service Account, navigate to 'Keys', create a new key, and save the JSON file somewhere secure. Your Node.js application will use these credentials to access the API . 29 | 4. Add the email address of this newly created Service Account as an owner in the [Google Search Console](https://search.google.com/search-console/users) 30 | 31 | Note: Only a **verified owner** of the property can perform this step. 32 | 33 | ## Get Started 34 | 35 | Now that you've configured the proper access credentials let's get started with the fun part - the code. In this first part, you'll need to set up the auth and also make sure that your credentials are properly configured for your Search Console property. 36 | 37 | In your terminal, start up a new node project and install the required packages: 38 | 39 | `npm init` 40 | 41 | `npm install googleapis` 42 | 43 | `npm install google-auth-library` 44 | 45 | In this same directory, you will also need to place the `keys.json` file that you had created earlier. Once you've got that set up, create a new `index.js` file and paste in the following code: 46 | 47 | ```javascript 48 | const {google} = require('googleapis'); 49 | const {JWT} = require('google-auth-library'); 50 | const searchconsole = google.searchconsole('v1'); 51 | const keys = require('PATH_TO_KEYS.JSON_FILE') 52 | const client = new JWT({ 53 | email: 'keys.client_email', 54 | key: 'keys.private_key', 55 | scopes: ['https://www.googleapis.com/auth/webmasters','https://www.googleapis.com/auth/webmasters.readonly'], 56 | }); 57 | google.options({auth: client}); 58 | // Check to see which properties are accessible with these credentials 59 | const resSiteList = await searchconsole.sites.list({}); 60 | console.log(resSiteList.data); 61 | ``` 62 | 63 | If you don't see your property in the list of sites returned, check to see if you've added the Service Account to the right property in your Google Search Console. 64 | 65 | Once you've got that working, we can immediately send requests and queries to the Search Console. 66 | 67 | ## Submit a New URL to Google for Indexing 68 | 69 | 70 | Whether it's a new product page or a new blog post, you want to make them easily searchable as quickly as possible. You can automate this step by submitting the updated sitemap to the Sitemaps endpoint as an automated step in your CI/CD workflow. 71 | 72 | ```javascript 73 | const res = await searchconsole.sitemaps.submit({ 74 | // The URL of the actual sitemap. For example: `https://fusebit.io/sitemap.xml`. 75 | feedpath: 'placeholder-value', 76 | // The site's URL, including protocol. For example: `https://fusebit.io/`. 77 | siteUrl: 'placeholder-value', 78 | }); 79 | console.log(res.data); 80 | } 81 | ``` 82 | 83 | ## Check to see if it has been indexed 84 | 85 | Once you've submitted a new URL to the indexing API, the actual crawling can take anywhere from a few days to a few weeks. To avoid having to manually check, you can set up a scheduled job to check the status of your newly submitted URLs and see if they've been indexed using the URL Inspection endpoint. 86 | 87 | ```javascript 88 | const resInspectURL = await searchconsole.urlInspection.index.inspect({ 89 | // Request body metadata 90 | requestBody: { 91 | // request body parameters 92 | // { 93 | "inspectionUrl": "https://fusebit.io/blog/nodejs-https-imports/", 94 | "languageCode": "en-US", 95 | "siteUrl": "https://fusebit.io/" 96 | // } 97 | }, 98 | }); 99 | console.log(resInspectURL.data); 100 | ``` 101 | 102 | [Learn more about Google's re-indexing process](https://developers.google.com/search/docs/advanced/crawling/ask-google-to-recrawl) for Sitemaps. 103 | 104 | ## Check to see a change in clicks/impressions 105 | 106 | Now that your newly published site has been indexed, you may want to see how impressions or clicks are changing over time for your recent posts. You can do this by leveraging the Search Analytics endpoint and specifying the URL and date range. 107 | 108 | ```javascript 109 | // Search Analytics on your Website 110 | const resSearchAnalytics = await searchconsole.searchanalytics.query({ 111 | // The site's URL, including protocol. For example: `http://www.example.com/`. 112 | siteUrl: 'https://fusebit.io/', 113 | 114 | requestBody: { 115 | "endDate": "2022-03-08", 116 | "startDate": "2022-03-01", 117 | "dimensions": ["date"] 118 | }, 119 | }); 120 | 121 | console.log(resSearchAnalytics.data.rows); 122 | ``` 123 | 124 | For the example above, in the response body you will receive a `rows` object with four items for each data point: 125 | 126 | - **Clicks** - How often someone clicked a link from Google to your site 127 | - **Impressions** - How often someone saw a link to your site on Google 128 | - **CTR** - The calculation of (clicks ÷ impressions) 129 | - **Position** - A relative ranking of the position of your link on Google, where 1 is the topmost position, 2 is the next position, and so on 130 | 131 | [Learn more](https://support.google.com/webmasters/answer/7042828?hl=en) about how these are calculated 132 | 133 | 134 | ### Bonus: See the "average position" of specific keywords 135 | 136 | In the above example, we showed you how to see aggregated data for your website, however, you can add different dimensions to break down your results to get a deeper understanding. For example, if the new product page you just published has certain keywords you were targeting for SEO, you can send a daily report of how those search terms are performing directly to your team's Slack channel. 137 | 138 | ```javascript 139 | // Search Analytics on your Website 140 | const resSearchAnalytics = await searchconsole.searchanalytics.query({ 141 | // The site's URL, including protocol. For example: `http://www.example.com/`. 142 | siteUrl: 'https://fusebit.io/', 143 | 144 | requestBody: { 145 | "dimensionFilterGroupsfilters":["query contains node"], 146 | "dimensions": ["query"], 147 | "endDate": "2022-03-08", 148 | "startDate": "2022-03-01", 149 | }, 150 | }); 151 | 152 | console.log(resSearchAnalytics.data.rows); 153 | ``` 154 | 155 | In the example above, we use `dimensionFilterGroupsfilters` to specify that we want to see how the keyword 'node' impacts our search positions and then break the results down based on the individual queries. 156 | 157 | [Learn more](https://developers.google.com/webmaster-tools/v1/searchanalytics/query#dimensionFilterGroups.filters) about the different dimensions and filters. 158 | 159 | ## Conclusion 160 | 161 | Hopefully, you’ll find the above code and implementation details helpful. While using it, please note that Google’s API limits you to 2,000 queries a day and 600 queries per minute, which is probably something you’ll never cross. 162 | 163 | Don’t hesitate to reach out if you have any questions, and we’ll be happy to help push you through. 164 | 165 | You can find me on the [Fusebit Discord](https://discord.gg/SN4rhhCH), our [community Slack](https://join.slack.com/t/fusebitio/shared_invite/zt-qe7uidtf-4cs6OgaomFVgAF_fQZubfg), and at [shehzad@fusebit.io](mailto:shehzad@fusebit.io). --------------------------------------------------------------------------------