├── .gitignore ├── Exam-Guide ├── Section_3: Storing the data │ └── README.md ├── Section_2:Ingesting and processing the data │ └── README.md ├── Section_4: Preparing and using data for analysis │ └── README.md ├── Section_5: Maintaining and automating data workloads │ └── README.md ├── Section_1:Designing data processing systems │ └── README.md └── README.md ├── Hands-On ├── app_enginee │ ├── app.yaml │ ├── server.js │ ├── package.json │ └── README.md ├── cloud_run │ ├── jobs │ │ ├── index.js │ │ ├── Dockerfile │ │ ├── package.json │ │ └── README.md │ └── services │ │ ├── package.json │ │ ├── index.js │ │ ├── Dockerfile │ │ ├── README.md │ │ └── package-lock.json ├── bigquery │ ├── package.json │ ├── index.js │ └── package-lock.json ├── cloud_function │ ├── gcs-trigger │ │ ├── package.json │ │ ├── index.js │ │ └── README.md │ ├── http-trigger │ │ ├── package.json │ │ ├── index.js │ │ └── README.md │ └── pubsub-trigger │ │ ├── package.json │ │ ├── index.js │ │ └── README.md ├── dockers │ ├── app.js │ ├── package.json │ ├── Dockerfile │ └── README.md ├── service_account │ ├── main.tf │ └── variables.tf ├── cloud_storage │ ├── variables.tf │ └── main.tf └── virtual_machine │ ├── variables.tf │ └── main.tf ├── pde.png ├── Learning ├── Section2_GCP_Fundamental │ ├── images │ │ └── region-zone.png │ └── README.md ├── Section1_Data_Engineering_Concepts │ ├── images │ │ ├── data-lifecycle-1.webp │ │ ├── example-datastructure.png │ │ └── example-semidatastructure.png │ └── README.md ├── Section7_CloudBlockStorage&FileStore │ └── README.md ├── Section3_GCP_Basic_Services │ ├── cloud-run │ │ └── README.md │ ├── README.md │ ├── virtual_machine │ │ └── README.md │ ├── cloud-sql │ │ └── README.md │ ├── app-engine │ │ └── README.md │ ├── cloud-function │ │ └── README.md │ └── IAM │ │ └── README.md ├── Section4_Storage_Product │ └── README.md ├── Section_10_GoogleCloudSQL │ └── README.md ├── Section8_DatabaseConcept │ └── README.md ├── Section6_Data_Transfer_Service │ └── README.md ├── Section9_GCP_Database_Product │ └── README.md └── Section5_Google_Cloud_Storage │ └── README.md ├── Articles └── README.md ├── Test ├── README.md └── Test1.md ├── README.md └── LICENCE /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /Exam-Guide/Section_3: Storing the data/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Hands-On/app_enginee/app.yaml: -------------------------------------------------------------------------------- 1 | 2 | runtime: nodejs -------------------------------------------------------------------------------- /Exam-Guide/Section_2:Ingesting and processing the data/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Hands-On/cloud_run/jobs/index.js: -------------------------------------------------------------------------------- 1 | console.log('Hello, World!'); 2 | -------------------------------------------------------------------------------- /Exam-Guide/Section_4: Preparing and using data for analysis/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Exam-Guide/Section_5: Maintaining and automating data workloads/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pde.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuanai-vireox/gcp-professional-data-engineer/HEAD/pde.png -------------------------------------------------------------------------------- /Exam-Guide/Section_1:Designing data processing systems/README.md: -------------------------------------------------------------------------------- 1 | # Designing data processing systems (~22% of the exam) 2 | -------------------------------------------------------------------------------- /Hands-On/cloud_run/jobs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | # Set the working directory 4 | WORKDIR /usr/src/app 5 | COPY . . 6 | # Command to run the application 7 | CMD ["node", "index.js"] -------------------------------------------------------------------------------- /Learning/Section2_GCP_Fundamental/images/region-zone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuanai-vireox/gcp-professional-data-engineer/HEAD/Learning/Section2_GCP_Fundamental/images/region-zone.png -------------------------------------------------------------------------------- /Articles/README.md: -------------------------------------------------------------------------------- 1 | 2 | [How I cleared the GCP Professional Data Engineer Exam in 3 weeks!](https://medium.com/@nithishreddy95/how-i-cleared-the-gcp-professional-data-engineer-exam-in-3-weeks-37ad8844a8fb) -------------------------------------------------------------------------------- /Learning/Section1_Data_Engineering_Concepts/images/data-lifecycle-1.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuanai-vireox/gcp-professional-data-engineer/HEAD/Learning/Section1_Data_Engineering_Concepts/images/data-lifecycle-1.webp -------------------------------------------------------------------------------- /Learning/Section1_Data_Engineering_Concepts/images/example-datastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuanai-vireox/gcp-professional-data-engineer/HEAD/Learning/Section1_Data_Engineering_Concepts/images/example-datastructure.png -------------------------------------------------------------------------------- /Learning/Section1_Data_Engineering_Concepts/images/example-semidatastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuanai-vireox/gcp-professional-data-engineer/HEAD/Learning/Section1_Data_Engineering_Concepts/images/example-semidatastructure.png -------------------------------------------------------------------------------- /Hands-On/cloud_run/jobs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "jobs", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC" 11 | } 12 | -------------------------------------------------------------------------------- /Hands-On/cloud_run/services/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cloud_run", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "express": "^4.18.2" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /Hands-On/bigquery/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bigquery", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@google-cloud/bigquery": "^7.3.0" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /Hands-On/cloud_function/gcs-trigger/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gcs-trigger", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@google-cloud/functions-framework": "^3.3.0" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /Hands-On/dockers/app.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const app = express(); 3 | const port = 3000; 4 | 5 | // Define a route to handle GET requests to the root URL 6 | app.get('/', (req, res) => { 7 | res.send('Hello, World!'); 8 | }); 9 | 10 | // Start the server 11 | app.listen(port, () => { 12 | console.log(`Example app listening at http://localhost:${port}`); 13 | }); -------------------------------------------------------------------------------- /Hands-On/app_enginee/server.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const app = express(); 3 | const port = 3000; 4 | 5 | // Define a route to handle GET requests to the root URL 6 | app.get('/', (req, res) => { 7 | res.send('Hello, World!'); 8 | }); 9 | 10 | // Start the server 11 | app.listen(port, () => { 12 | console.log(`Example app listening at http://localhost:${port}`); 13 | }); -------------------------------------------------------------------------------- /Hands-On/cloud_function/http-trigger/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cloud_function", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "dependencies": { 10 | "@google-cloud/functions-framework": "^3.3.0" 11 | }, 12 | "author": "", 13 | "license": "ISC" 14 | } 15 | -------------------------------------------------------------------------------- /Hands-On/cloud_function/pubsub-trigger/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "pubsub-trigger", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@google-cloud/functions-framework": "^3.3.0" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /Hands-On/cloud_run/services/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const app = express(); 3 | const port = 3000; 4 | 5 | // Define a route to handle GET requests to the root URL 6 | app.get('/', (req, res) => { 7 | res.send('Hello, World!'); 8 | }); 9 | 10 | // Start the server 11 | app.listen(port, () => { 12 | console.log(`Example app listening at http://localhost:${port}`); 13 | }); -------------------------------------------------------------------------------- /Hands-On/cloud_function/gcs-trigger/index.js: -------------------------------------------------------------------------------- 1 | const { Storage } = require('@google-cloud/storage'); 2 | const storage = new Storage(); 3 | 4 | exports.processFile = (event, context) => { 5 | const file = event; 6 | console.log(`Processing file: ${file.name}`); 7 | 8 | // Custom logic to handle the file 9 | 10 | // Acknowledge the event 11 | console.log(`File processed: ${file.name}`); 12 | }; -------------------------------------------------------------------------------- /Hands-On/dockers/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dockers", 3 | "version": "1.0.0", 4 | "description": "To build a Docker image and push it to Google Container Registry (GCR), here's a basic guide that you can include in a README file:", 5 | "main": "app.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC" 11 | } 12 | -------------------------------------------------------------------------------- /Hands-On/service_account/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | version = "5.8.0" 6 | } 7 | } 8 | } 9 | 10 | provider "google" { 11 | # Configuration options 12 | } 13 | resource "google_service_account" "service_account" { 14 | account_id = var.account_id 15 | display_name = var.display_name 16 | project = var.project_id 17 | } -------------------------------------------------------------------------------- /Hands-On/service_account/variables.tf: -------------------------------------------------------------------------------- 1 | variable "account_id" { 2 | description = "The account id that is used to generate the service account email address and a stable unique id" 3 | } 4 | variable "display_name" { 5 | description = "The display name for the service account. Can be updated without creating a new resource" 6 | } 7 | variable "project_id" { 8 | description = "The project ID to host the service account in GCP" 9 | } -------------------------------------------------------------------------------- /Hands-On/cloud_function/http-trigger/index.js: -------------------------------------------------------------------------------- 1 | // example of a cloud function - http trigger 2 | // https://firebase.google.com/docs/functions/http-events 3 | 4 | const functions = require('@google-cloud/functions-framework'); 5 | 6 | // Register an HTTP function with the Functions Framework that will be executed 7 | // when you make an HTTP request to the deployed function's endpoint. 8 | functions.http('helloGET', (req, res) => { 9 | res.send('Hello World!'); 10 | }); -------------------------------------------------------------------------------- /Hands-On/cloud_run/services/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | # Set the working directory 4 | WORKDIR /usr/src/app 5 | 6 | # Copy package.json and package-lock.json 7 | COPY package*.json ./ 8 | 9 | # Install app dependencies 10 | RUN npm install 11 | 12 | # Copy the application code 13 | COPY . . 14 | 15 | # Set the port 16 | ENV PORT=3000 17 | 18 | # Expose the port 19 | EXPOSE 3000 20 | 21 | # Command to run the application 22 | CMD ["node", "index.js"] -------------------------------------------------------------------------------- /Hands-On/dockers/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Node.js runtime as the base image 2 | FROM node:14 3 | 4 | # Set the working directory 5 | WORKDIR /usr/src/app 6 | 7 | # Copy package.json and package-lock.json 8 | COPY package*.json ./ 9 | 10 | # Install app dependencies 11 | RUN npm install 12 | 13 | # Copy the application code 14 | COPY . . 15 | 16 | ENV PORT=8080 17 | 18 | # Expose the port 19 | EXPOSE 8080 20 | 21 | # Command to run the application 22 | CMD ["node", "app.js"] -------------------------------------------------------------------------------- /Hands-On/app_enginee/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "app_enginee", 3 | "version": "1.0.0", 4 | "description": "To deploy an application to Google Cloud Platform's (GCP) App Engine, you can use the `gcloud` command-line tool. Below are the basic steps to deploy your application:", 5 | "main": "server.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "node server.js" 9 | }, 10 | "engines": { 11 | "node": "10.x.x" 12 | }, 13 | "author": "", 14 | "license": "ISC" 15 | } 16 | -------------------------------------------------------------------------------- /Hands-On/cloud_function/pubsub-trigger/index.js: -------------------------------------------------------------------------------- 1 | const { PubSub } = require('@google-cloud/pubsub'); 2 | 3 | exports.subscribe = (event, context) => { 4 | const pubsubMessage = event.data; 5 | const data = JSON.parse(Buffer.from(pubsubMessage, 'base64').toString()); 6 | 7 | console.log(data); 8 | 9 | // Custom logic to handle the Pub/Sub message 10 | 11 | // Acknowledge the message 12 | const pubsub = new PubSub(); 13 | const subscription = pubsub.subscription(context.subscription); 14 | subscription.ack(pubsubMessage.id); 15 | }; -------------------------------------------------------------------------------- /Hands-On/cloud_storage/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bucket_name" { 2 | type = string 3 | description = "The name of the bucket to create" 4 | } 5 | variable "region" { 6 | type = string 7 | description = "The region to create the bucket in" 8 | } 9 | variable "project_id" { 10 | type = string 11 | description = "The project ID to host the service account in GCP" 12 | } 13 | variable "service_account_name" { 14 | type = string 15 | description = "The name of the service account to create" 16 | } 17 | variable "location" { 18 | type = string 19 | description = "The location to create the service account in" 20 | } -------------------------------------------------------------------------------- /Test/README.md: -------------------------------------------------------------------------------- 1 | ## Test format 2 | - The exam consists of 50 questions that must be answered in 2 hours. 3 | - The questions are either multiple choice (pick one correct answer) or multiple answer (pick M of N possible answers). Questions can be marked for review later. 4 | - The multiple choice questions are "classic" in the sense that two of the answers can be eliminated immediately. Some detail in the question will bias the choice of the remaining answers. 5 | - The exam is taken on a computer and you will not have access to pen and paper. 6 | - The screen is split in half. The left side contains the questions while the right side contains the case studies. 7 | - The two case studies that are published online were the two case studies used in the exam. The exam is setup so that all of the questions pertaining to a particular case study appear together. 8 | -------------------------------------------------------------------------------- /Hands-On/cloud_storage/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | version = "5.9.0" 6 | } 7 | } 8 | } 9 | 10 | provider "google" { 11 | # Configuration options 12 | project = var.project_id 13 | region = var.region 14 | } 15 | resource "google_storage_bucket" "my_bucket" { 16 | name = var.bucket_name # Update with your desired bucket name 17 | location = var.location # Update with your desired location 18 | force_destroy = true # This line is important to allow Terraform to destroy the bucket 19 | 20 | versioning { 21 | enabled = true # Enable versioning for the bucket 22 | } 23 | 24 | lifecycle_rule { 25 | condition { 26 | age = 30 # Move objects to Nearline storage class after 30 days 27 | } 28 | action { 29 | type = "SetStorageClass" 30 | storage_class = "NEARLINE" 31 | } 32 | } 33 | } -------------------------------------------------------------------------------- /Hands-On/virtual_machine/variables.tf: -------------------------------------------------------------------------------- 1 | variable "account_id" { 2 | description = "The account id that is used to generate the service account email address and a stable unique id" 3 | } 4 | variable "display_name" { 5 | description = "The display name for the service account. Can be updated without creating a new resource" 6 | } 7 | variable "project_id" { 8 | description = "The project ID to host the service account in GCP" 9 | } 10 | variable "instance_name" { 11 | type = string 12 | description = "The name of the instance" 13 | } 14 | variable "zone" { 15 | type = string 16 | description = "The zone of the instance" 17 | } 18 | variable "machine_type" { 19 | type = string 20 | description = "The machine type of the instance" 21 | } 22 | variable "boot_disk" { 23 | type = string 24 | description = "The boot disk of the instance" 25 | } 26 | variable "network" { 27 | type = string 28 | description = "The network of the instance" 29 | } 30 | 31 | variable "region" { 32 | type = string 33 | description = "The region of the instance" 34 | } -------------------------------------------------------------------------------- /Hands-On/virtual_machine/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | version = "5.8.0" 6 | } 7 | } 8 | } 9 | 10 | provider "google" { 11 | # Configuration options 12 | project = var.project_id 13 | region = var.region 14 | } 15 | 16 | resource "google_service_account" "default" { 17 | account_id = var.account_id 18 | display_name = var.display_name 19 | } 20 | 21 | resource "google_compute_instance" "default" { 22 | name = var.instance_name 23 | machine_type = var.machine_type 24 | zone = var.zone 25 | 26 | tags = ["foo", "bar"] 27 | 28 | boot_disk { 29 | initialize_params { 30 | image = "debian-cloud/debian-11" 31 | labels = { 32 | my_label = "value" 33 | } 34 | } 35 | } 36 | 37 | // Local SSD disk 38 | scratch_disk { 39 | interface = "NVME" 40 | } 41 | 42 | network_interface { 43 | network = "default" 44 | 45 | access_config { 46 | // Ephemeral public IP 47 | } 48 | } 49 | 50 | metadata = { 51 | foo = "bar" 52 | } 53 | 54 | metadata_startup_script = "echo hi > /test.txt" 55 | 56 | service_account { 57 | # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. 58 | email = google_service_account.default.email 59 | scopes = ["cloud-platform"] 60 | } 61 | } -------------------------------------------------------------------------------- /Learning/Section7_CloudBlockStorage&FileStore/README.md: -------------------------------------------------------------------------------- 1 | In Google Cloud Platform (GCP), for block storage, you can use Google Cloud's Persistent Disks, and for file storage, you can use Google Cloud's Filestore service. Below is an overview of each service: 2 | 3 | ### Google Cloud Persistent Disks (Block Storage) 4 | 5 | Google Cloud Persistent Disks provide block storage for your virtual machine instances. Persistent Disks are durable and reliable storage options that can be attached to virtual machine instances to store data. There are two types of Persistent Disks: Standard Persistent Disks and SSD Persistent Disks, each optimized for different use cases. 6 | 7 | You can create and manage Persistent Disks through the Google Cloud Console, the `gcloud` command-line tool, or the Google Cloud API. 8 | 9 | ### Google Cloud Filestore (File Storage) 10 | 11 | Google Cloud Filestore provides a fully managed network-attached storage (NAS) service for applications that require a filesystem interface and a shared filesystem for data. It offers high-performance file storage for applications that need a file system interface and a shared file system. 12 | 13 | Filestore provides two types of storage: Filestore for NFS and Filestore for CIFS. Filestore for NFS is a high-performance shared file storage for Linux workloads, and Filestore for CIFS provides file storage for Windows workloads. 14 | 15 | You can create and manage Filestore instances through the Google Cloud Console, the `gcloud` command-line tool, or the Google Cloud API. 16 | 17 | These services are designed to meet various storage needs in the Google Cloud environment, providing scalable and reliable storage solutions for your applications. -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/cloud-run/README.md: -------------------------------------------------------------------------------- 1 | Google Cloud Run is a fully managed compute platform that allows you to run stateless containers that are invocable via HTTP requests. Here's an overview of how to deploy a container to Google Cloud Run: 2 | 3 | 1. **Containerize Your Application**: 4 | - Containerize your application using Docker. Create a `Dockerfile` in the root of your application to define how your application should be packaged into a container. 5 | 6 | 2. **Build and Push the Docker Image**: 7 | - Build the Docker image and push it to Google Container Registry (GCR) using the following commands: 8 | ```bash 9 | # Build the Docker image 10 | docker build -t gcr.io/[PROJECT_ID]/[IMAGE_NAME] . 11 | 12 | # Push the Docker image to GCR 13 | docker push gcr.io/[PROJECT_ID]/[IMAGE_NAME] 14 | ``` 15 | 16 | 3. **Deploy to Cloud Run**: 17 | - Use the `gcloud run deploy` command to deploy your container to Google Cloud Run: 18 | ```bash 19 | gcloud run deploy --image gcr.io/[PROJECT_ID]/[IMAGE_NAME] --platform managed 20 | ``` 21 | 22 | 4. **Access Your Application**: 23 | - Once the deployment is complete, you will receive a URL that can be used to access your application running on Cloud Run. 24 | 25 | ### Additional Considerations: 26 | - **Environment Variables**: You can set environment variables for your container using the `--set-env-vars` flag during deployment. 27 | - **Concurrency**: Cloud Run automatically scales your container up and down based on traffic. You can configure the maximum number of concurrent requests using the `--concurrency` flag. 28 | 29 | By following these steps, you can deploy your stateless container to Google Cloud Run, allowing you to run and scale your applications automatically in a serverless environment. -------------------------------------------------------------------------------- /Hands-On/cloud_function/gcs-trigger/README.md: -------------------------------------------------------------------------------- 1 | To create a Google Cloud Function triggered by Google Cloud Storage using Node.js, follow these steps: 2 | 3 | 1. **Set Up Your Development Environment**: 4 | - Ensure you have Node.js and npm installed on your local development environment. 5 | 6 | 2. **Initialize a New Node.js Project**: 7 | - Create a new directory for your project and run `npm init -y` to initialize a new Node.js project. This will create a `package.json` file. 8 | 9 | 3. **Install the Google Cloud Functions SDK**: 10 | - Install the `@google-cloud/functions-framework` package, which is the Functions Framework for Node.js, by running the following command: 11 | ```bash 12 | npm install @google-cloud/functions-framework 13 | ``` 14 | 15 | 4. **Create Your Cloud Function**: 16 | - Create a new JavaScript file (e.g., `index.js`) and define your Cloud Function. Here's a basic example for a Cloud Function triggered by changes in a Cloud Storage bucket: 17 | ```javascript 18 | const { Storage } = require('@google-cloud/storage'); 19 | const storage = new Storage(); 20 | 21 | exports.processFile = (event, context) => { 22 | const file = event; 23 | console.log(`Processing file: ${file.name}`); 24 | 25 | // Custom logic to handle the file 26 | 27 | // Acknowledge the event 28 | console.log(`File processed: ${file.name}`); 29 | }; 30 | ``` 31 | 32 | 5. **Deploy the Function to GCP**: 33 | - Deploy the function to Google Cloud by using the `gcloud` command-line tool. Ensure that you have authenticated your gcloud CLI and set the project: 34 | ```bash 35 | gcloud functions deploy processFile --runtime=nodejs14 --trigger-resource=[YOUR_BUCKET_NAME] --trigger-event=google.storage.object.finalize 36 | ``` 37 | 38 | 6. **Test the Deployed Function**: 39 | - Once the function is deployed, you can upload -------------------------------------------------------------------------------- /Hands-On/cloud_run/jobs/README.md: -------------------------------------------------------------------------------- 1 | To deploy a Node.js application to Google Cloud Run, you can follow these general steps: 2 | 3 | 1. **Set Up Your Node.js Application**: 4 | - Create a Node.js application or use an existing one that you want to deploy to Google Cloud Run. 5 | 6 | 2. **Dockerize Your Application**: 7 | - Create a `Dockerfile` in the root of your Node.js application to containerize the application. Here's a basic example for a Node.js application: 8 | ```Dockerfile 9 | # Use the official Node.js image 10 | FROM node:14 11 | 12 | # Set the working directory 13 | WORKDIR /usr/src/app 14 | 15 | # Copy package.json and package-lock.json 16 | COPY package*.json ./ 17 | 18 | # Install app dependencies 19 | RUN npm install 20 | 21 | # Copy the application code 22 | COPY . . 23 | 24 | # Command to run the application 25 | CMD ["node", "index.js"] 26 | ``` 27 | 28 | 3. **Build and Push the Docker Image**: 29 | - Build the Docker image using the `docker build` command and then push the image to Google Container Registry (GCR). 30 | ```bash 31 | # Build the Docker image 32 | docker build -t gcr.io/[PROJECT_ID]/[IMAGE_NAME] . 33 | 34 | # Push the Docker image to GCR 35 | docker push gcr.io/[PROJECT_ID]/[IMAGE_NAME] 36 | ``` 37 | 38 | 4. **Deploy to Cloud Run**: 39 | - Deploy the container image to Google Cloud Run using the `gcloud run deploy` command. 40 | ```bash 41 | gcloud run deploy --image gcr.io/[PROJECT_ID]/[IMAGE_NAME] --platform managed 42 | ``` 43 | 44 | 5. **Access Your Application**: 45 | - Once the deployment is complete, you will receive a URL that can be used to access your deployed Node.js application. 46 | 47 | By following these steps, you can containerize and deploy a Node.js application to Google Cloud Run, allowing you to run your application in a fully managed serverless environment. -------------------------------------------------------------------------------- /Hands-On/app_enginee/README.md: -------------------------------------------------------------------------------- 1 | To deploy an application to Google Cloud Platform's (GCP) App Engine, you can use the `gcloud` command-line tool. Below are the basic steps to deploy your application: 2 | 3 | ### Prerequisites 4 | 1. **Install `gcloud` SDK**: If you haven't installed `gcloud` SDK, you can download it from the [Google Cloud SDK page](https://cloud.google.com/sdk/docs/install). 5 | 6 | 2. **Authenticate and Set Project**: Run `gcloud auth login` to authenticate and select the project where you want to deploy the application: `gcloud config set project PROJECT_ID`. 7 | 8 | ### Basic Deployment Steps 9 | 1. **Navigate to Your Application Directory**: Open a terminal or command prompt and navigate to the directory where your application files are located. 10 | 11 | 2. **Deploy Your Application**: Run the following command to deploy your application to App Engine: 12 | ```bash 13 | gcloud app deploy 14 | ``` 15 | 16 | 3. **Review and Confirm**: When prompted, review the changes that will be deployed and confirm the deployment. 17 | 18 | 4. **Access the Deployed Application**: After the deployment is complete, you can access the deployed application using the URL provided in the output of the `gcloud app deploy` command. 19 | 20 | ### Example 21 | Suppose you have a Node.js application in a directory named `my-app`. To deploy this application to App Engine, navigate to the `my-app` directory and run the deployment command: 22 | ```bash 23 | cd my-app 24 | gcloud app deploy 25 | ``` 26 | 27 | This command will deploy your application to App Engine based on the settings in the application's configuration files. 28 | 29 | ### Additional Considerations 30 | - Ensure your application's code and configuration files are set up correctly for deployment to App Engine. 31 | - Review the logs and deployment details to verify the success of the deployment. 32 | 33 | By following these steps, you can deploy your application to Google Cloud Platform's App Engine using the `gcloud` command-line tool. -------------------------------------------------------------------------------- /Hands-On/cloud_run/services/README.md: -------------------------------------------------------------------------------- 1 | To deploy a Node.js application to Google Cloud Run, you can follow these general steps: 2 | 3 | 1. **Set Up Your Node.js Application**: 4 | - Create a Node.js application or use an existing one that you want to deploy to Google Cloud Run. 5 | 6 | 2. **Dockerize Your Application**: 7 | - Create a `Dockerfile` in the root of your Node.js application to containerize the application. Here's a basic example for a Node.js application: 8 | ```Dockerfile 9 | # Use the official Node.js image 10 | FROM node:14 11 | 12 | # Set the working directory 13 | WORKDIR /usr/src/app 14 | 15 | # Copy package.json and package-lock.json 16 | COPY package*.json ./ 17 | 18 | # Install app dependencies 19 | RUN npm install 20 | 21 | # Copy the application code 22 | COPY . . 23 | 24 | # Set the port 25 | ENV PORT=3000 26 | 27 | # Expose the port 28 | EXPOSE 3000 29 | 30 | # Command to run the application 31 | CMD ["node", "index.js"] 32 | ``` 33 | 34 | 3. **Build and Push the Docker Image**: 35 | - Build the Docker image using the `docker build` command and then push the image to Google Container Registry (GCR). 36 | ```bash 37 | # Build the Docker image 38 | docker build -t gcr.io/[PROJECT_ID]/[IMAGE_NAME] . 39 | 40 | # Push the Docker image to GCR 41 | docker push gcr.io/[PROJECT_ID]/[IMAGE_NAME] 42 | ``` 43 | 44 | 4. **Deploy to Cloud Run**: 45 | - Deploy the container image to Google Cloud Run using the `gcloud run deploy` command. 46 | ```bash 47 | gcloud run deploy --image gcr.io/[PROJECT_ID]/[IMAGE_NAME] --platform managed 48 | ``` 49 | 50 | 5. **Access Your Application**: 51 | - Once the deployment is complete, you will receive a URL that can be used to access your deployed Node.js application. 52 | 53 | By following these steps, you can containerize and deploy a Node.js application to Google Cloud Run, allowing you to run your application in a fully managed serverless environment. -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/README.md: -------------------------------------------------------------------------------- 1 | # GCP Services 2 | 3 | ## Introduction 4 | Google Cloud Platform (GCP) offers several basic infrastructure services to support computing, networking, and storage needs. Here are some key services in this category: 5 | 6 | 1. **Compute Services**: 7 | - **Compute Engine**: Provides scalable virtual machines that run on Google's infrastructure. 8 | - **App Engine**: A fully managed platform for building and deploying web applications and APIs. 9 | - **Kubernetes Engine (GKE)**: Managed Kubernetes service for orchestrating containerized applications. 10 | 11 | 2. **Storage Services**: 12 | - **Cloud Storage**: Object storage that allows you to store and access data on Google's infrastructure. 13 | - **Cloud SQL**: Fully managed relational database service for MySQL, PostgreSQL, and SQL Server. 14 | - **Cloud Bigtable**: NoSQL wide-column database for real-time analytics. 15 | 16 | 3. **Networking**: 17 | - **Virtual Private Cloud (VPC)**: Provides networking functionality for GCP resources, allowing you to create and manage virtual networks. 18 | - **Cloud Load Balancing**: Distributes incoming traffic across multiple instances to ensure application responsiveness and availability. 19 | - **Cloud CDN**: Content Delivery Network for delivering content to users with low latency and high data transfer speeds. 20 | 21 | 4. **Identity and Access Management (IAM)**: 22 | - **Cloud IAM**: Allows you to manage access control by defining who (identity) has what access (role) for which resource. 23 | 24 | 5. **Developer Tools**: 25 | - **Cloud Build**: Provides a fully managed continuous integration and continuous delivery (CI/CD) platform that runs on Google Cloud. 26 | - **Cloud Source Repositories**: Hosted private Git repositories for storing, managing, and tracking changes to code. 27 | 28 | These basic infrastructure services form the foundation for building and deploying applications on Google Cloud Platform, providing essential computing, storage, networking, and development tools to support a wide range of use cases. 29 | -------------------------------------------------------------------------------- /Hands-On/cloud_function/http-trigger/README.md: -------------------------------------------------------------------------------- 1 | To create an HTTP-triggered Cloud Function using Node.js in Google Cloud Platform (GCP), you can follow the steps below: 2 | 3 | 1. **Set Up Your Development Environment**: 4 | - Ensure you have Node.js and npm installed on your local development environment. 5 | 6 | 2. **Initialize a New Node.js Project**: 7 | - Create a new directory for your project and run `npm init -y` to initialize a new Node.js project. This will create a `package.json` file. 8 | 9 | 3. **Install the Cloud Functions SDK**: 10 | - Install the `@google-cloud/functions-framework` package, which is the Functions Framework for Node.js, by running the following command: 11 | ```bash 12 | npm install @google-cloud/functions-framework 13 | ``` 14 | 15 | 4. **Create the Cloud Function**: 16 | - Create a new JavaScript file (e.g., `index.js`) and define your HTTP-triggered Cloud Function. Here's a simple example that responds with a "Hello, World!" message: 17 | ```javascript 18 | const functions = require('@google-cloud/functions-framework'); 19 | functions.http('helloGET', (req, res) => { 20 | res.send('Hello World!'); 21 | });``` 22 | 23 | 5. **Run the Function Locally**: 24 | - You can run the function locally for testing using the Functions Framework by running the following command: 25 | ```bash 26 | npx @google-cloud/functions-framework --target=helloWorld 27 | ``` 28 | 29 | 6. **Deploy the Function to GCP**: 30 | - Once you have tested the function locally, you can deploy it to Google Cloud by using the `gcloud` command-line tool. Ensure that you have authenticated your gcloud CLI and set the project: 31 | ```bash 32 | gcloud functions deploy helloWorld --runtime=nodejs14 --trigger-http 33 | ``` 34 | 35 | 7. **Test the Deployed Function**: 36 | - Once the function is deployed, you can test it by sending an HTTP request to the provided URL. 37 | 38 | By following these steps, you can create an HTTP-triggered Cloud Function using Node.js and deploy it to Google Cloud Platform. This will allow you to handle HTTP requests and create serverless API endpoints or webhooks. -------------------------------------------------------------------------------- /Hands-On/dockers/README.md: -------------------------------------------------------------------------------- 1 | To build a Docker image and push it to Google Container Registry (GCR), here's a basic guide that you can include in a README file: 2 | 3 | ### Building and Pushing a Docker Image to GCR 4 | 5 | 1. **Prerequisites**: 6 | - Ensure that you have Docker installed locally and that you are authenticated to Google Cloud using the `gcloud` command-line tool. 7 | 8 | 2. **Dockerfile**: 9 | - Create a `Dockerfile` in the root directory of your project. This file defines the steps needed to build your Docker image. 10 | 11 | Example `Dockerfile`: 12 | ```Dockerfile 13 | # Use an official Node.js runtime as the base image 14 | FROM node:14 15 | 16 | # Set the working directory 17 | WORKDIR /usr/src/app 18 | 19 | # Copy package.json and package-lock.json 20 | COPY package*.json ./ 21 | 22 | # Install app dependencies 23 | RUN npm install 24 | 25 | # Copy the application code 26 | COPY . . 27 | 28 | ENV PORT=8080 29 | 30 | # Expose the port 31 | EXPOSE 8080 32 | 33 | # Command to run the application 34 | CMD ["node", "app.js"] 35 | ``` 36 | 37 | 3. **Build the Docker Image**: 38 | - Open a terminal and navigate to the directory containing your `Dockerfile`. Run the following command to build the Docker image: 39 | ```bash 40 | docker build -t gcr.io/[PROJECT_ID]/[IMAGE_NAME] . 41 | ``` 42 | 43 | 4. **Push the Docker Image to GCR**: 44 | - After the image is built, push it to Google Container Registry using the following command: 45 | ```bash 46 | docker push gcr.io/[PROJECT_ID]/[IMAGE_NAME] 47 | ``` 48 | 49 | 5. **Deploy the Docker Image**: 50 | - Use the pushed image to deploy your application to Google Cloud services, such as Google Kubernetes Engine (GKE) or Google Cloud Run. 51 | 52 | 6. **Accessing the Image**: 53 | - Once the image is pushed to GCR, it can be accessed and used in various Google Cloud services that support containerized applications. 54 | 55 | By including these steps in the README file, you provide a clear guide for building and pushing a Docker image to Google Container Registry, allowing others to understand the process for deploying containerized applications on Google Cloud Platform. -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/virtual_machine/README.md: -------------------------------------------------------------------------------- 1 | # Virtual Machine 2 | 3 | To provision a virtual machine (VM) in Google Cloud Platform (GCP), you can use the Google Cloud Console or the `gcloud` command-line tool. Here's a basic example using the `gcloud` tool to create a VM instance: 4 | 5 | ### Using `gcloud` Command-line Tool: 6 | 7 | 1. **Set up gcloud SDK**: If you haven't installed `gcloud` SDK, you can download it from the [Google Cloud SDK page](https://cloud.google.com/sdk/docs/install). 8 | 9 | 2. **Authenticate and Set Project**: Run `gcloud auth login` to authenticate and select the project where you want to create the VM: `gcloud config set project PROJECT_ID`. 10 | 11 | 3. **Create a VM Instance**: Use the `gcloud compute instances create` command to create a VM. Replace `INSTANCE_NAME`, `MACHINE_TYPE`, `IMAGE`, and `ZONE` with your desired values. 12 | 13 | ```bash 14 | gcloud compute instances create INSTANCE_NAME \ 15 | --machine-type MACHINE_TYPE \ 16 | --image IMAGE \ 17 | --zone ZONE 18 | ``` 19 | 20 | For example: 21 | ```bash 22 | gcloud compute instances create my-vm \ 23 | --machine-type n1-standard-1 \ 24 | --image-family debian-9 \ 25 | --image-project debian-cloud \ 26 | --zone us-central1-a 27 | ``` 28 | 29 | This command creates a VM instance named `my-vm` with machine type `n1-standard-1`, using the Debian 9 image in the `us-central1-a` zone. 30 | 31 | 4. **Accessing the VM**: After the instance is created, you can SSH into the VM using the provided external IP address. 32 | 33 | ### Using Google Cloud Console: 34 | 35 | 1. **Navigate to Compute Engine**: In the Google Cloud Console, go to "Compute Engine" from the left-hand navigation menu. 36 | 37 | 2. **Create Instance**: Click on "Create" to create a new instance, then fill in the details such as instance name, machine type, boot disk, etc. 38 | 39 | 3. **Networking and Firewall**: Set up networking and firewall rules as per your requirements. 40 | 41 | 4. **Create**: Click "Create" to provision the VM instance. 42 | 43 | These are the basic steps to provision a VM in Google Cloud Platform. Adjust the parameters such as machine type, image, zone, and additional settings based on your specific requirements. -------------------------------------------------------------------------------- /Learning/Section1_Data_Engineering_Concepts/README.md: -------------------------------------------------------------------------------- 1 | ## Data Engineering Overview 2 | 3 | 1. Data pipeline 4 | 2. How data flows 5 | 6 | ```mermaid 7 | graph LR 8 | A[Ingestion] -->B(Storage) --> C(Process and analyze) --> D(Explre and visualize) 9 | 10 | ``` 11 | ![data](./images/data-lifecycle-1.webp) 12 | 13 | ### Ingest 14 | - Gather data from multiple sources 15 | - Data gather from app 16 | - event log, click stream data, ecommerce transaction .. 17 | 18 | - Streaming ingest 19 | - pubsub 20 | 21 | - Bacth ingest 22 | - Different transfer services 23 | - GCS - gsutil 24 | ### Store: 25 | ### Process and analytic: 26 | 1. Cloud Dataproc 27 | - Existing Hadoop, Spark Application 28 | - Machine learning, Data science ecosystem 29 | - Tuneable cluster parameters 30 | 2. Cloud Dataflow 31 | - New Data processing pipeline 32 | - Unified streaming and batch 33 | - Fully managed , No ops 34 | 3. Cloud Data Prep 35 | - UI-Driven data preparation 36 | - Scale on demand 37 | - Fully managed, No ops 38 | ### Types of Data - Structure 39 | 1. Structured : 40 | - Tabular 41 | - Represented by Rows and Columns 42 | - SQL can be used to interact with data 43 | - Fixed Schema 44 | - Eache row has same number of columns 45 | - Relational db are structured data 46 | - My SQL, MSSQL, Postgres, 47 | - GCP Bigquery, Cloud SQL 48 | 49 | `Example:` 50 | ![Alt text](./images/example-datastructure.png) 51 | 2. Semi-structured 52 | - Each Record is same number of properties 53 | - No fixed schema 54 | - Flexible structure 55 | - NoSQL kind of data 56 | - store data as key-value pair 57 | - Json 58 | - Database: MongoDB, Cassandra, Neo4j, Redis 59 | - In GCP: Bigtable, Memorytable, Datastore 60 | 61 | `Example:` 62 | 63 | ![Alt text](./images//example-semidatastructure.png) 64 | 65 | 3. Unstructured 66 | - No pre define structure in data 67 | - Image 68 | - Natural language 69 | - In GCP: Google cloud storage, 70 | 71 | ## Batch Data and Streaming 72 | 1. Batch Data: 73 | - Defined start and end of data - data size is known 74 | - Processing high volume of data after certain periodic interval 75 | - Long time to processing data 76 | - Payment processing 77 | 2. Streaming: 78 | - Unbounded, No end defined 79 | - Data is processed as it is arrives 80 | - Size of data is unknown 81 | - No much heavy processing - take mili-second to process 82 | - Stock data processing -------------------------------------------------------------------------------- /Hands-On/cloud_function/pubsub-trigger/README.md: -------------------------------------------------------------------------------- 1 | To create a Google Cloud Function triggered by Google Cloud Pub/Sub using Node.js, follow these steps: 2 | 3 | 1. **Set Up Your Development Environment**: 4 | - Ensure you have Node.js and npm installed on your local development environment. 5 | 6 | 2. **Initialize a New Node.js Project**: 7 | - Create a new directory for your project and run `npm init -y` to initialize a new Node.js project. This will create a `package.json` file. 8 | 9 | 3. **Install the Google Cloud Functions SDK**: 10 | - Install the `@google-cloud/functions-framework` package, which is the Functions Framework for Node.js, by running the following command: 11 | ```bash 12 | npm install @google-cloud/functions-framework 13 | ``` 14 | 15 | 4. **Create Your Cloud Function**: 16 | - Create a new JavaScript file (e.g., `index.js`) and define your Cloud Function. Here's a basic example for a Pub/Sub-triggered Cloud Function: 17 | ```javascript 18 | const { PubSub } = require('@google-cloud/pubsub'); 19 | 20 | exports.subscribe = (event, context) => { 21 | const pubsubMessage = event.data; 22 | const data = JSON.parse(Buffer.from(pubsubMessage, 'base64').toString()); 23 | 24 | console.log(data); 25 | 26 | // Custom logic to handle the Pub/Sub message 27 | 28 | // Acknowledge the message 29 | const pubsub = new PubSub(); 30 | const subscription = pubsub.subscription(context.subscription); 31 | subscription.ack(pubsubMessage.id); 32 | }; 33 | ``` 34 | 35 | 5. **Run the Function Locally**: 36 | - You can run the function locally for testing using the Functions Framework by running the following command: 37 | ```bash 38 | npx @google-cloud/functions-framework --target=subscribe 39 | ``` 40 | 41 | 6. **Deploy the Function to GCP**: 42 | - Once you have tested the function locally, you can deploy it to Google Cloud by using the `gcloud` command-line tool. Ensure that you have authenticated your gcloud CLI and set the project: 43 | ```bash 44 | gcloud functions deploy subscribe --runtime=nodejs14 --trigger-topic=[YOUR_TOPIC_NAME] 45 | ``` 46 | 47 | 7. **Test the Deployed Function**: 48 | - Once the function is deployed, you can publish a message to the specified Pub/Sub topic to test the function in the GCP environment. 49 | 50 | By following these steps, you can create a Cloud Function triggered by Google Cloud Pub/Sub using Node.js and deploy it to Google Cloud Platform. This will allow you to process Pub/Sub messages in a serverless environment. -------------------------------------------------------------------------------- /Learning/Section4_Storage_Product/README.md: -------------------------------------------------------------------------------- 1 | Google Cloud Platform (GCP) offers various storage products to cater to different use cases and requirements. Here are some of the key storage products provided by GCP: 2 | 3 | ### Cloud Storage 4 | - **Use Case**: Cloud Storage is a general-purpose object storage service designed for durability, availability, and scalability. 5 | - **Features**: It offers features such as multi-regional and regional storage, object versioning, lifecycle management, and integration with other GCP services. 6 | - **Typical Use**: Storing and serving website content, storing backups, archiving data, and serving as a data lake. 7 | 8 | ### Persistent Disk 9 | - **Use Case**: Persistent Disk provides block storage for VM instances, offering both standard and SSD options. 10 | - **Features**: It offers durable and high-performance block storage for VMs and can be resized and attached to VM instances. 11 | - **Typical Use**: Running databases, file systems, and other applications that require high-performance block storage. 12 | 13 | ### Cloud Filestore 14 | - **Use Case**: Cloud Filestore provides a fully managed NFS file system service for applications that require a filesystem interface and a shared file system. 15 | - **Features**: It offers high-performance file storage for applications running on Google Cloud Platform. 16 | - **Typical Use**: Storing application and user data, managing content for media and entertainment, and managing data for analytics workloads. 17 | 18 | ### Cloud Storage for Firebase 19 | - **Use Case**: This product is designed for mobile and web application development, providing a simple and easy-to-use object storage solution for Firebase applications. 20 | - **Features**: It integrates seamlessly with Firebase and provides secure and scalable storage for user-generated content. 21 | - **Typical Use**: Storing user-generated files, images, and other data for mobile and web applications. 22 | 23 | ### Cloud Bigtable 24 | - **Use Case**: Cloud Bigtable is a fully managed NoSQL wide-column database for large analytical and operational workloads. 25 | - **Features**: It offers high throughput and low-latency access to data, making it suitable for time-series data, financial data, IoT, and machine learning applications. 26 | - **Typical Use**: Analyzing large datasets, storing time-series data, and powering machine learning and analytics workloads. 27 | 28 | Each of these storage products caters to different requirements and workloads, offering a variety of features and capabilities to address diverse data storage needs within the Google Cloud Platform ecosystem. 29 | 30 | -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/cloud-sql/README.md: -------------------------------------------------------------------------------- 1 | Google Cloud SQL is a fully managed database service that makes it easy to set up, maintain, manage, and administer relational databases on Google Cloud Platform. It provides support for popular database engines such as MySQL, PostgreSQL, and SQL Server. Here's an overview of Google Cloud SQL: 2 | 3 | ### Key Features: 4 | 5 | 1. **Fully Managed Service**: Google Cloud SQL is a fully managed service, handling database management tasks such as backups, replication, patch management, and updates. 6 | 7 | 2. **High Availability**: It offers high availability through automatic failover and backups, ensuring that your databases remain accessible and your data is protected. 8 | 9 | 3. **Scalability**: Cloud SQL allows for both vertical and horizontal scaling, enabling you to resize your database instances and handle increased workloads. 10 | 11 | 4. **Security and Compliance**: It provides built-in security features such as data encryption, role-based access control, and compliance with industry standards. 12 | 13 | 5. **Integration with GCP**: Cloud SQL integrates seamlessly with other GCP services, allowing you to connect your applications and services to your databases. 14 | 15 | ### Use Cases: 16 | 17 | 1. **Web Applications**: Cloud SQL is commonly used to power web applications, providing a reliable and scalable database backend for applications hosted on GCP. 18 | 19 | 2. **Analytics and Reporting**: It is suitable for storing and managing data for analytics and reporting workloads, supporting complex queries and data analysis. 20 | 21 | 3. **E-commerce and CMS**: Cloud SQL is often used to handle e-commerce databases, content management systems, and other applications that require a robust and scalable database solution. 22 | 23 | 4. **Development and Testing**: It is ideal for development and testing environments, allowing developers to quickly spin up database instances without the overhead of managing infrastructure. 24 | 25 | ### Supported Database Engines: 26 | 27 | - **MySQL**: Cloud SQL supports MySQL 5.6, 5.7, and 8.0, offering features such as replication, automatic backups, and point-in-time recovery. 28 | - **PostgreSQL**: It supports PostgreSQL 9.6, 10, 11, and 12, providing features such as high availability, read replicas, and automated patching. 29 | - **SQL Server**: Cloud SQL for SQL Server offers managed SQL Server instances, supporting features such as automated backups, high availability, and encryption. 30 | 31 | Google Cloud SQL is a versatile and reliable solution for managing relational databases in the cloud, offering features that cater to a wide range of use cases and industries. -------------------------------------------------------------------------------- /Learning/Section_10_GoogleCloudSQL/README.md: -------------------------------------------------------------------------------- 1 | # Google Cloud SQL 2 | 3 | As a Google Cloud Platform (GCP) data engineer professional, having a solid understanding of Google Cloud SQL is crucial. Here are some key areas you should focus on to effectively use Cloud SQL in your data engineering projects: 4 | 5 | 1. **Database Engines**: Understand the differences between the supported database engines (MySQL, PostgreSQL, and SQL Server) and know when to use each based on project requirements. 6 | 7 | 2. **Instance Management**: 8 | - Learn how to create, configure, and manage Cloud SQL instances. 9 | - Understand how to resize instances and storage to handle changing workloads. 10 | - Familiarize yourself with setting up high availability and read replicas for load balancing and fault tolerance. 11 | 12 | 3. **Backup and Restore**: 13 | - Know how to configure automated backups and perform manual backups. 14 | - Understand the process of restoring databases from backups in case of data loss or corruption. 15 | 16 | 4. **Security**: 17 | - Understand how to secure Cloud SQL instances using IAM roles and permissions. 18 | - Familiarize yourself with network security options, such as using private IPs and configuring authorized networks. 19 | - Learn about encryption options for data at rest and in transit. 20 | 21 | 5. **Performance Optimization**: 22 | - Learn how to monitor performance using Cloud Monitoring and Cloud Logging. 23 | - Understand how to use query optimization techniques and indexing to improve database performance. 24 | 25 | 6. **Data Migration**: 26 | - Know how to migrate existing databases to Cloud SQL using tools like Database Migration Service or third-party solutions. 27 | - Understand the considerations and best practices for minimizing downtime during migration. 28 | 29 | 7. **Integration with Other GCP Services**: 30 | - Understand how Cloud SQL integrates with other GCP services like BigQuery, Dataflow, and Kubernetes Engine for data processing and analytics. 31 | - Familiarize yourself with using Cloud SQL as a data source for applications running on Compute Engine or App Engine. 32 | 33 | 8. **Troubleshooting**: 34 | - Develop skills in diagnosing and resolving common issues related to connectivity, performance, and configuration. 35 | - Familiarize yourself with Cloud SQL logs and monitoring tools to identify and address potential problems. 36 | 37 | 9. **Compliance and Best Practices**: 38 | - Understand compliance requirements and ensure that your Cloud SQL implementation adheres to industry standards and best practices. 39 | 40 | 10. **Cost Management**: 41 | - Learn how to estimate and manage costs associated with Cloud SQL usage, including understanding pricing models and optimizing resource usage to reduce expenses. 42 | 43 | By mastering these areas, you'll be well-equipped to leverage Google Cloud SQL effectively as a GCP data engineer professional. -------------------------------------------------------------------------------- /Hands-On/bigquery/index.js: -------------------------------------------------------------------------------- 1 | const schema = [ 2 | { 3 | name: 'id', 4 | type: 'INTEGER' 5 | }, 6 | { 7 | name: 'name', 8 | type: 'STRING' 9 | }, 10 | { 11 | name: 'details', 12 | type: 'RECORD', 13 | fields: [ 14 | { 15 | name: 'age', 16 | type: 'INTEGER' 17 | }, 18 | { 19 | name: 'address', 20 | type: 'RECORD', 21 | fields: [ 22 | { 23 | name: 'city', 24 | type: 'STRING' 25 | }, 26 | { 27 | name: 'zip', 28 | type: 'STRING' 29 | } 30 | ] 31 | } 32 | ] 33 | } 34 | ]; 35 | const rows = { 36 | "id": 123, 37 | "name": "John Doe", 38 | "details": { 39 | "age": 30, 40 | "address": { 41 | "city": "New York", 42 | "zip": "10001" 43 | } 44 | } 45 | } 46 | // Import the @google-cloud/bigquery library 47 | const { BigQuery } = require('@google-cloud/bigquery'); 48 | 49 | // Create a new BigQuery client 50 | const bigquery = new BigQuery({projectId: ""}); 51 | 52 | // Define the dataset and table names 53 | const datasetId = 'tuan_test'; 54 | const tableId = 'test_table_struct_schema'; 55 | 56 | async function createTable() { 57 | const [table] = await bigquery 58 | .dataset(datasetId) 59 | .createTable(tableId, { 60 | schema: schema 61 | }); 62 | return table 63 | } 64 | async function updateTable() { 65 | const [table] = await bigquery 66 | .dataset(datasetId) 67 | .table(tableId) 68 | .get(); 69 | 70 | const newSchema = table.metadata.schema.fields.filter(field => field.name !== 'name'); 71 | await bigquery 72 | .dataset(datasetId) 73 | .table(tableId) 74 | .setMetadata({ 75 | schema: newSchema 76 | }); 77 | return table 78 | } 79 | // Insert the data into the specified table 80 | async function insertNestedData() { 81 | try { 82 | const dataset = bigquery.dataset(datasetId); 83 | const table = dataset.table(tableId); 84 | 85 | // Insert the rows into the table 86 | await table.insert(rows); 87 | console.log('Data inserted successfully.'); 88 | } catch (error) { 89 | console.log(error) 90 | console.error('Error inserting error') 91 | } 92 | } 93 | 94 | // Call the function to insert the data 95 | 96 | const newSchema = [ 97 | { 98 | name: 'id', 99 | type: 'INTEGER' 100 | }, 101 | { 102 | name: 'details', 103 | type: 'RECORD', 104 | fields: [ 105 | { 106 | name: 'age', 107 | type: 'INTEGER' 108 | }, 109 | { 110 | name: 'address', 111 | type: 'RECORD', 112 | fields: [ 113 | { 114 | name: 'city', 115 | type: 'STRING' 116 | }, 117 | { 118 | name: 'zip', 119 | type: 'STRING' 120 | } 121 | ] 122 | } 123 | ] 124 | } 125 | ]; 126 | updateTable(); 127 | -------------------------------------------------------------------------------- /Learning/Section8_DatabaseConcept/README.md: -------------------------------------------------------------------------------- 1 | ### Section 8: Database Concepts 2 | 3 | #### 1. **Types of Databases** 4 | - **Relational Databases**: 5 | - Google Cloud SQL 6 | - Cloud Spanner 7 | - **NoSQL Databases**: 8 | - Firestore 9 | - Cloud Bigtable 10 | 11 | #### 2. **Database Design** 12 | - **Schema Design**: 13 | - Normalization and Denormalization 14 | - Primary and Foreign Keys 15 | - Indexing 16 | - **Data Modeling**: 17 | - Entity-Relationship Diagrams (ERDs) 18 | - Star and Snowflake Schemas (for Data Warehousing) 19 | 20 | #### 3. **Database Management** 21 | - **Backup and Restore**: 22 | - Automated backups 23 | - Point-in-time recovery 24 | - **Replication and High Availability**: 25 | - Multi-regional replication 26 | - Failover strategies 27 | - **Scaling**: 28 | - Horizontal vs. Vertical Scaling 29 | - Sharding 30 | 31 | #### 4. **Querying and Performance Optimization** 32 | - **SQL Queries**: 33 | - Joins, Subqueries, and Aggregations 34 | - **Performance Tuning**: 35 | - Query Optimization 36 | - Indexing strategies 37 | - Caching mechanisms 38 | 39 | #### 5. **Data Security and Compliance** 40 | - **Encryption**: 41 | - At-rest and In-transit encryption 42 | - **Access Control**: 43 | - IAM roles and permissions 44 | - **Compliance**: 45 | - GDPR, HIPAA, and other regulatory requirements 46 | 47 | #### 6. **Transactional and Analytical Processing** 48 | - **OLTP (Online Transaction Processing)**: 49 | - Characteristics and use-cases 50 | - Suitable GCP services (e.g., Cloud SQL, Cloud Spanner) 51 | - **OLAP (Online Analytical Processing)**: 52 | - Characteristics and use-cases 53 | - Suitable GCP services (e.g., BigQuery) 54 | 55 | #### 7. **Data Migration** 56 | - **Migration Strategies**: 57 | - Lift and Shift 58 | - Hybrid and Multi-cloud strategies 59 | - **Tools and Services**: 60 | - Database Migration Service 61 | - Data Transfer Service 62 | 63 | #### 8. **Advanced Database Concepts** 64 | - **Big Data**: 65 | - Handling large-scale data with BigQuery 66 | - **Streaming Data**: 67 | - Real-time data processing with Pub/Sub and Dataflow 68 | 69 | ### Recommended Resources: 70 | 1. **Google Cloud Documentation**: 71 | - Cloud SQL: [Cloud SQL Documentation](https://cloud.google.com/sql/docs) 72 | - Cloud Spanner: [Cloud Spanner Documentation](https://cloud.google.com/spanner/docs) 73 | - BigQuery: [BigQuery Documentation](https://cloud.google.com/bigquery/docs) 74 | - Firestore: [Firestore Documentation](https://cloud.google.com/firestore/docs) 75 | - Bigtable: [Bigtable Documentation](https://cloud.google.com/bigtable/docs) 76 | 77 | 2. **Coursera and Qwiklabs**: 78 | - Google Cloud courses on Coursera 79 | - Hands-on labs on Qwiklabs 80 | 81 | 3. **Books and Guides**: 82 | - "Designing Data-Intensive Applications" by Martin Kleppmann 83 | - "Google Cloud Platform for Architects" by Vitthal Srinivasan 84 | 85 | 4. **Practice Exams**: 86 | - Use practice exams to test your knowledge and identify areas for improvement. 87 | 88 | Understanding these concepts is crucial for the Google Cloud Professional Data Engineer exam, as they form the foundation for designing and managing data solutions on GCP. Good luck with your studies! -------------------------------------------------------------------------------- /Learning/Section6_Data_Transfer_Service/README.md: -------------------------------------------------------------------------------- 1 | # Data Transfer Service 2 | 3 | ## Data Migration Service 4 | The Data Migration Service in Google Cloud Platform (GCP) is a fully managed service that enables you to migrate and sync data between databases, storage, and applications. It simplifies the process of migrating your data to GCP, allowing you to efficiently manage, monitor, and track your data migration tasks. 5 | 6 | Key features of the Google Cloud Data Migration Service include: 7 | 8 | 1. **Ease of Use**: Data Migration Service provides a user-friendly interface and simplifies the migration process, reducing the complexity and time required for data migration tasks. 9 | 10 | 2. **Managed Service**: It is a fully managed service, which means that Google Cloud handles the underlying infrastructure, monitoring, and maintenance of the migration service, allowing you to focus on your data migration tasks. 11 | 12 | 3. **Support for Various Source Databases**: The service supports migration from a variety of source databases, including MySQL, PostgreSQL, SQL Server, and Oracle. 13 | 14 | 4. **High Compatibility**: It offers high compatibility with GCP's database services, making it easy to migrate data to Google Cloud SQL or Google Cloud Spanner. 15 | 16 | 5. **Data Sync Capability**: In addition to data migration, the service also provides data sync capabilities, allowing you to keep the source and destination databases in sync after the initial migration. 17 | 18 | 6. **Monitoring and Management**: Data Migration Service provides monitoring and management tools that allow you to track the progress of your data migration tasks, providing insights into the migration process. 19 | 20 | 7. **Security and Compliance**: The service is designed with security and compliance in mind, ensuring that your data is migrated in a secure and compliant manner. 21 | 22 | Overall, the Data Migration Service in Google Cloud Platform offers a comprehensive solution for migrating and syncing data, simplifying the process and providing a managed and secure environment for your data migration tasks. 23 | 24 | In Google Cloud Platform (GCP), the Data Transfer Service provides a managed solution for transferring data from various sources into GCP storage services. It simplifies the process of migrating data from different platforms, enabling organizations to efficiently manage, monitor, and track data migration tasks. The Data Transfer Service encompasses several data transfer solutions, each tailored to specific data migration needs. Some of the key offerings include: 25 | 26 | 1. **Transfer Appliance**: Google provides a physical device, known as the Transfer Appliance, which allows organizations to transfer large volumes of data to Google Cloud. Users can easily transfer data from their on-premises data centers to GCP by physically shipping the Transfer Appliance. 27 | 28 | 2. **Online Transfer**: The Online Transfer service enables data transfer from various cloud providers, such as Amazon S3 and Azure Blob Storage, to Google Cloud Storage. It also supports data transfer from on-premises data centers to GCP. 29 | 30 | 3. **Transfer Service for SaaS**: This service allows for scheduled, fully managed data transfers from supported SaaS applications to Google Cloud Storage. It simplifies the process of migrating data from SaaS applications into GCP. 31 | 32 | 4. **Transfer Service for on-premises data**: This service facilitates the transfer of data from on-premises data centers to Google Cloud Storage. It provides a managed solution for data migration and synchronization. 33 | 34 | 5. **Transfer Service for Cloud Storage**: This service enables data transfer from other cloud storage providers to Google Cloud Storage, making it easier to migrate data from various cloud platforms to GCP. 35 | 36 | Each of these offerings is designed to simplify and streamline the process of transferring data into Google Cloud Platform, providing a managed, secure, and efficient solution for data migration needs. -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/app-engine/README.md: -------------------------------------------------------------------------------- 1 | ### Google Cloud Platform (GCP) App Engine 2 | 3 | To deploy an application to Google Cloud Platform's (GCP) App Engine, you can use the `gcloud` command-line tool or create a configuration file for App Engine. Here, I'll provide an example of deploying a simple web application to App Engine using a configuration file. 4 | 5 | ### Example: Deploying a Simple Web Application to App Engine 6 | 7 | #### 1. Create a simple web application 8 | Create a simple web application in a directory. The application can be in any language supported by App Engine (such as Python, Java, Go, or Node.js). 9 | 10 | #### 2. Create an `app.yaml` configuration file 11 | Create an `app.yaml` file in the root directory of your application. This file contains configuration settings for App Engine. Here's an example for a basic Python web application: 12 | 13 | ```yaml 14 | runtime: python27 15 | api_version: 1 16 | threadsafe: true 17 | 18 | handlers: 19 | - url: /.* 20 | script: main.app 21 | ``` 22 | 23 | #### 3. Deploy the application 24 | Use the `gcloud` command-line tool to deploy the application to App Engine. Navigate to the root directory of your application and run the following command: 25 | 26 | ```bash 27 | gcloud app deploy 28 | ``` 29 | 30 | This command deploys your application to App Engine based on the settings in the `app.yaml` file. 31 | 32 | #### 4. Access the deployed application 33 | After the deployment is complete, you can access the deployed application using the URL provided in the output of the `gcloud app deploy` command. 34 | 35 | ### Additional Considerations 36 | - Ensure you have the necessary permissions to deploy applications to App Engine in your GCP project. 37 | - You can customize the `app.yaml` file to configure various settings such as scaling, environment variables, and runtime settings based on the requirements of your application. 38 | 39 | By following these steps, you can deploy a simple web application to Google Cloud Platform's App Engine using a configuration file and the `gcloud` command-line tool. 40 | 41 | 42 | #### Feature 43 | Google Cloud Platform's App Engine is a fully managed platform that enables developers to build and deploy applications without managing the infrastructure. It supports multiple programming languages and provides built-in services for monitoring, logging, and scaling. 44 | 45 | #### Use Case 46 | App Engine is suitable for various use cases, including: 47 | 1. **Web Application Hosting**: It's ideal for hosting web applications, APIs, and backend services without worrying about infrastructure management. 48 | 2. **Microservices**: App Engine supports microservices architecture, allowing developers to deploy and manage individual services independently. 49 | 3. **Serverless Applications**: It's a good fit for building serverless applications, where the focus is on writing code without the need to manage servers or containers. 50 | 51 | #### Usage 52 | Developers can use App Engine for: 53 | - **Automated Scaling**: App Engine automatically scales based on the traffic your application receives, ensuring that it can handle varying load levels. 54 | - **Built-in Services**: It provides access to built-in services such as Datastore (NoSQL database), Cloud Storage, and more, reducing the need for external integrations. 55 | - **Continuous Deployment**: App Engine supports continuous deployment, allowing developers to deploy new code versions without downtime. 56 | 57 | #### Example Use Case 58 | A startup company is developing a new web application and wants to focus on application development without managing infrastructure. They choose to use App Engine to host their web application, allowing them to scale seamlessly as their user base grows. Additionally, they leverage built-in GCP services such as Cloud Storage and Datastore for managing application data and user files. 59 | 60 | In summary, GCP's App Engine is well-suited for hosting web applications, APIs, and microservices, providing automated scaling, built-in services, and seamless deployment capabilities for developers and organizations. -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/cloud-function/README.md: -------------------------------------------------------------------------------- 1 | # GCP - Cloud Function 2 | Google Cloud Functions is a serverless compute service that allows you to run event-driven code without having to manage the underlying infrastructure. Here's an overview of Google Cloud Functions: 3 | 4 | ### Key Features: 5 | 1. **Event-driven**: Cloud Functions are triggered by events such as changes in data, messages on a pub/sub topic, or HTTP requests. 6 | 7 | 2. **Pay-as-you-go**: You are billed only for the time your code is running and the resources it consumes. 8 | 9 | 3. **Support for Multiple Languages**: Cloud Functions supports several programming languages, including Node.js, Python, Go, Java, and .NET. 10 | 11 | 4. **Integration**: Cloud Functions seamlessly integrates with other GCP services, allowing for easy event-driven automation. 12 | 13 | ### Use Cases: 14 | 1. **Data Processing and ETL**: Trigger data processing tasks in response to changes in Cloud Storage, Cloud Pub/Sub, or other data sources. 15 | 16 | 2. **Real-time Stream Processing**: Process and analyze real-time data as it arrives from IoT devices, clickstreams, or other sources. 17 | 18 | 3. **Webhooks and API Endpoints**: Create lightweight APIs or handle webhooks to respond to HTTP requests. 19 | 20 | 4. **Automation and Orchestration**: Automate tasks and integrate with other GCP services to build serverless workflows. 21 | 22 | ### Benefits: 23 | 1. **Simplicity**: Cloud Functions abstracts away infrastructure management, allowing developers to focus on writing code. 24 | 25 | 2. **Scalability**: Functions are automatically scaled based on the load, ensuring that they can handle varying workloads. 26 | 27 | 3. **Event-driven Model**: Cloud Functions enable a reactive programming model, responding to events in real time. 28 | 29 | 4. **Fast Deployment**: Deploy code quickly and easily, with automatic scaling and load balancing handled by the platform. 30 | 31 | ### Considerations: 32 | 1. **Stateless**: Cloud Functions are stateless by design, and their execution environment is ephemeral. 33 | 34 | 2. **Execution Time Limits**: Functions have a maximum execution time limit, and long-running processes are not suited for this model. 35 | 36 | 3. **External Dependencies**: Care should be taken when using external dependencies, as they can impact cold start times and performance. 37 | 38 | 4. **Monitoring and Logging**: It's important to implement robust monitoring and logging to gain visibility into function execution and performance. 39 | 40 | ## Triggers 41 | In Google Cloud Functions, triggers are the mechanisms that initiate the execution of a function. Cloud Functions supports various triggers that can initiate the execution of your function based on different events or actions within Google Cloud Platform. Here are some common triggers used in Cloud Functions: 42 | 43 | 1. **HTTP Trigger**: 44 | - Cloud Functions can be triggered by HTTP requests. These functions are invoked when an HTTP request is sent to a specific URL. 45 | 46 | 2. **Cloud Storage Trigger**: 47 | - Functions can be triggered by events in Cloud Storage such as object creation, deletion, archiving, or metadata updates. 48 | 49 | 3. **Cloud Pub/Sub Trigger**: 50 | - Cloud Functions can be triggered by messages published to a Pub/Sub topic. The function is invoked when a new message is published to the specified topic. 51 | 52 | 4. **Cloud Firestore Trigger**: 53 | - Functions can be triggered by changes to a Cloud Firestore database. These functions are invoked when a document is created, updated, or deleted in a specified collection. 54 | 55 | 5. **Cloud Scheduler Trigger**: 56 | - Cloud Functions can be triggered by Cloud Scheduler, allowing you to schedule the execution of a function at defined intervals. 57 | 58 | 6. **Firebase Trigger**: 59 | - For Firebase-related events, Cloud Functions can be triggered by changes in Firebase products such as Realtime Database, Authentication, or Cloud Messaging. 60 | 61 | 7. **Cloud Logging Trigger**: 62 | - Functions can be triggered by log entries written to Cloud Logging. The function is invoked when a log entry matches a specified filter. 63 | 64 | 8. **Direct Invocation**: 65 | - Functions can also be directly invoked using the `gcloud` command-line tool, the Cloud Console, or the Cloud Functions API. 66 | 67 | By leveraging these triggers, you can build serverless applications that respond to various events and actions within Google Cloud Platform, Firebase, and other integrated services. Each trigger type allows you to create event-driven, scalable, and cost-effective applications without the need to manage the underlying infrastructure. 68 | Google Cloud Functions provide a powerful and flexible platform for building event-driven applications and serverless architectures, allowing developers to focus on writing code and creating value without the overhead of managing infrastructure. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

Google Cloud Platform- Professional Data Engineer

2 | 3 | ## 4 | A Professional Data Engineer makes data usable and valuable for others by collecting, transforming, and publishing data. This individual evaluates and selects products and services to meet business and regulatory requirements. A Professional Data Engineer creates and manages robust data processing systems. This includes the ability to design, build, deploy, monitor, maintain, and secure data processing workloads. 5 | 6 | The Professional Data Engineer exam assesses your ability to: 7 | 8 | - Design data processing systems 9 | - Ingest and process data 10 | - Store the data 11 | - Prepare and use data for analysis 12 | - Maintain and automate data workloads 13 | - Design for reliability 14 | - Design for security and compliance 15 | - Analyze data and enable machine learning 16 | 17 | 18 | 19 | ## Study Approach for GCP Professional Data Engineer Certification 20 | 21 | Earning the GCP Professional Data Engineer (PDE) certification validates your skills in designing, building, and managing data processing systems on Google Cloud. Here's a study approach to help you prepare: 22 | 23 | **1. Assess Your Background:** 24 | 25 | * **Start by understanding the exam requirements and recommended experience (3+ years in data engineering, 1+ year with Google Cloud).** 26 | * **Evaluate your existing knowledge in data engineering and cloud fundamentals.** Do you have a solid understanding of data pipelines, data warehousing, data storage, and data analysis? 27 | * **Identify any knowledge gaps and prioritize areas that require more focus.** 28 | 29 | **2. Follow Official Resources:** 30 | 31 | * **Start with Google's official resources:** 32 | * **Professional Data Engineer Certification Exam Guide:** [https://cloud.google.com/learn/certification/data-engineer](https://cloud.google.com/learn/certification/data-engineer) 33 | * **Data Engineer Learning Path:** [https://www.cloudskillsboost.google/paths/16](https://www.cloudskillsboost.google/paths/16) 34 | * **Sample questions and practice tests:** [https://www.testpreptraining.com/certified-professional-data-engineer-practice-exam](https://www.testpreptraining.com/certified-professional-data-engineer-practice-exam) 35 | * **Complete the Google Cloud Platform Fundamentals for Data Engineers course:** This free course provides a solid foundation in GCP concepts and services relevant to the exam. 36 | 37 | **3. Expand your knowledge:** 38 | 39 | * **Enroll in online courses or training programs:** Several reputable platforms offer comprehensive courses specifically designed for the GCP PDE exam. Consider platforms like A Cloud Guru, Udemy, Coursera, or Pluralsight. 40 | * **Read books and articles written by Google Cloud experts:** These resources offer deeper insights into specific topics and real-world case studies. Examples include "Designing Data-Intensive Applications" by Martin Kleppmann and "GCP Data Engineering Cookbook" by Arnaud Mancho. 41 | * **Follow Google Cloud blogs and communities:** Stay updated on the latest developments and get insights from industry experts. Resources include the Google Cloud Blog, the Google Cloud Community forum, and the Google Data Cloud YouTube channel. 42 | 43 | **4. Hands-on experience:** 44 | 45 | * **Set up a Google Cloud account and explore the various services.** Experiment with data ingestion, processing, storage, and analysis tools to solidify your understanding. 46 | * **Complete labs and hands-on exercises:** Several resources provide hands-on practice with GCP services relevant to the exam. Examples include the Qwiklabs platform and the Google Cloud Skills Boost program. 47 | * **Work on personal projects or contribute to open-source projects:** These activities provide a valuable opportunity to apply your knowledge and gain practical experience. 48 | 49 | **5. Practice and Assessment:** 50 | 51 | * **Take mock exams and practice quizzes:** These resources help you assess your understanding and identify areas that need improvement. 52 | * **Form a study group with other data engineers preparing for the exam:** Discuss concepts, share resources, and motivate each other. 53 | * **Review the official practice questions and sample answers:** Focus on understanding the reasoning behind the correct answers. 54 | 55 | **6. Time Management and Planning:** 56 | 57 | * **Create a study schedule and dedicate time each day or week to preparing.** 58 | * **Set realistic goals and track your progress.** 59 | * **Prioritize topics based on your strengths and weaknesses.** 60 | * **Don't hesitate to seek help from experts or online communities when you get stuck.** 61 | 62 | **Additional Tips:** 63 | 64 | * **Focus on understanding the underlying concepts, not just memorizing facts.** 65 | * **Practice answering scenario-based questions, which are heavily emphasized in the exam.** 66 | * **Take care of yourself and manage stress effectively.** 67 | * **Stay motivated and believe in yourself!** 68 | 69 | By following this comprehensive study approach and dedicating consistent effort, you'll be well-prepared to pass the GCP Professional Data Engineer certification exam and achieve your career goals. 70 | 71 | ## Acquire Hands-On Experience 72 | 73 | | No | Resource | Lab | 74 | |---|---|---| 75 | | 1 | Cloud Function | - | 76 | | 2 | Cloud Run | - | 77 | | 3 | Service Account | - | 78 | 79 | ![Alt text](pde.png) 80 | 81 | ## References 82 | - [Jorwalk- Data Engineering GCP](https://github.com/jorwalk/data-engineering-gcp) 83 | - [Google Bard AI](https://bard.google.com/) -------------------------------------------------------------------------------- /Test/Test1.md: -------------------------------------------------------------------------------- 1 | ## Knowledge Area: Machine learning 2 | 1. You are training a deep learning model for a classification task. The precision and recall of the model is quite low. What could you do to improve the precision and recall scores? 3 | - [ ] Use L2 regularization 4 | - [x] Use more training instance 5 | - [ ] Use dropout 6 | - [ ] Use L1 regularization 7 | 8 | ### Explanation: 9 | `The correct answer is to use more training instances. This is an example of underfitting. The other options are all regularizations used in cases of overfitting. See https://machinelearningmastery.com/overfitting-and-underfitting-with-machine-learning-algorithms/` 10 | 11 | 2.A team of analysts is building machine learning models. They want to use managed services when possible but they would also like the ability to customize and tune their models. In particular, they want to be able to tune hyperparameters themselves. What managed AI service would you recommend they use? 12 | - [ ] Bigquery ML 13 | - [x] VertexAI Custom training 14 | - [ ] VertexAI AutoML training 15 | - [ ] Cloud TPUs 16 | ### Explanation: 17 | `Vertex AI custom training allows for tuning hyperparameters. Vertex AI AutoML training tunes hyperparameters for you. BigQuery ML does not allow for hyperparameter tuning. Cloud TPUs are accelerators you can use to train large deep learning models. See https://cloud.google.com/vertex-ai/docs/start/introduction-unified-platform` 18 | 19 | 3. You are developing a deep learning model and have training data with a large number of features. You are not sure which features are important. You'd like to use a regularization technique that will drive the parameter for the least important features toward zero. What regularization technique would you use? 20 | - [ ] L2 or Ridge Regression 21 | - [x] L1 or Lasso Regression 22 | - [ ] Backgropagation 23 | - [ ] Dropout 24 | ### Explanation: 25 | `L1 or Lassos Regression adds an absolute value of magnitude penalty which drives the parameters (or coefficients) of least useful features toward zero. L2 or Ridge Regression adds a squared magnitude penalty that penalizes large parameters. Dropout is another form of regularization that ignores some features at some steps of the training process. Backpropagation is an algorithm for assigning error penalties to nodes in a neural network. See https://cloud.google.com/bigquery-ml/docs/preventing-overfitting and https://machinelearningmastery.com/overfitting-and-underfitting-with-machine-learning-algorithms/` 26 | 27 | ## Knowledge Area: Data Management 28 | 1. You are in the process of creating lifecycle policies to manage objects stored in Cloud storage. Which of the following are lifecycle conditions you can use in your policies? (Choose 3) 29 | - [x] Is Live 30 | - [ ] File Size 31 | - [ ] File Type 32 | - [x] Age 33 | - [x] Matches Storage Class 34 | ### Explanation 35 | `The correct answers are age, matches storage class, and is live. File type and file size are not conditions available in lifecycle management policies. See https://cloud.google.com/storage/docs/lifecycle` 36 | 2. A data warehouse team is concerned that some data sources may have poor quality controls. They do not want to bring incorrect or invalid data into the data warehouse. What could they do to understand the scope of the problem before starting to write ETL code? 37 | - [x] Perform a Data quality assessment on the source data it is extracted from the source system. These should include checks for ranges of value in each attribute, distribution of values in each attribute , counts of the number invalid and missing value , and other checks on the source data. 38 | - [ ] Have admintration of the source systems produce a data quality verification before exporting the data 39 | - [ ] Load all source data into a datalake and then load it to the datawarehouse 40 | - [ ] Load the data into the datawarehouse and log any records that fail integrity or consistency checks 41 | 42 | ### Explaination 43 | `The correct answer is performing a data quality assessment on data extracted from the source system. Loading data from a data lake to a data warehouse will not provide an assessment of the range of the problem. Loading data into the data warehouse and logging failed checks is less efficient because it will provide log messages but not aggregate statistics on the full scope of the problem. The source systems may not have the ability to perform data quality assessments and if they do, you may get different kinds of reports from different systems. By performing a data quality assessment on extracted data you can produce a consistent set of reports for all data sources. See https://cloud.google.com/blog/products/data-analytics/principles-and-best-practices-for-data-governance-in-the-cloud` 44 | 45 | 46 | ## Knowledge Area: Database 47 | A developer is deploying a Cloud SQL database to production and wants to follow Google Cloud recommended best practices. What should the developer use for authentication? 48 | - [ ] Cloud SQL Proxy 49 | - [ ] Strong encryption 50 | - [ ] IAM 51 | - [ ] Cloud Identify 52 | ### Explaination 53 | ` 54 | Cloud SQL Auth proxy is the recommended way to connect to Cloud SQL. Cloud Identity is an Identity as a Service provided by Google Cloud. IAM is Identity and Access Management service for managing identities and their authorizations. Strong encryption is used to protect the confidentiality and integrity of data, not to perform authentication. See https://cloud.google.com/sql/docs/mysql/sql-proxy 55 | 56 | 57 | ` 58 | ## Knowledge Area: Data Pipelines 59 | 60 | ## Knowledge Area: Access Control 61 | 62 | ## Knowledge Area: Data Analysis 63 | 64 | ## Knowledge Area: Compliance 65 | 66 | ## Knowledge Area: Monitoring And Logging -------------------------------------------------------------------------------- /Learning/Section9_GCP_Database_Product/README.md: -------------------------------------------------------------------------------- 1 | # GCP DATABASE PRODUCT 2 | 3 | Google Cloud Platform (GCP) offers a variety of database products to meet different needs, ranging from relational databases to NoSQL databases, and from managed services to fully serverless options. Below is an overview of the main GCP database products: 4 | 5 | ### Relational Databases 6 | 7 | 1. **Google Cloud SQL** 8 | - **Description**: Fully managed relational database service for MySQL, PostgreSQL, and SQL Server. 9 | - **Use Cases**: Traditional web and mobile applications, business applications. 10 | - **Key Features**: Automated backups, high availability, read replicas, and integrated monitoring. 11 | 12 | 2. **Cloud Spanner** 13 | - **Description**: Globally distributed, horizontally scalable, strongly consistent relational database. 14 | - **Use Cases**: Large-scale applications requiring high availability and strong consistency, such as financial services, gaming, and retail. 15 | - **Key Features**: Global distribution, horizontal scalability, strong consistency, automatic sharding, and high availability. 16 | 17 | ### NoSQL Databases 18 | 19 | 3. **Firestore (Cloud Firestore)** 20 | - **Description**: NoSQL document database built for automatic scaling, high performance, and ease of application development. 21 | - **Use Cases**: Real-time applications, mobile and web applications, and serverless applications. 22 | - **Key Features**: Real-time synchronization, offline support, ACID transactions, and flexible data model. 23 | 24 | 4. **Cloud Bigtable** 25 | - **Description**: High-throughput, low-latency NoSQL database ideal for large analytical and operational workloads. 26 | - **Use Cases**: IoT data, time-series data, financial data, and large-scale analytical workloads. 27 | - **Key Features**: Massive scalability, low latency, seamless integration with Hadoop and BigQuery. 28 | 29 | ### Data Warehousing 30 | 31 | 5. **BigQuery** 32 | - **Description**: Fully managed, serverless, highly scalable, and cost-effective multi-cloud data warehouse. 33 | - **Use Cases**: Data analytics, business intelligence, machine learning, and big data processing. 34 | - **Key Features**: SQL queries, real-time analytics, built-in machine learning, integration with other GCP services, and pay-as-you-go pricing. 35 | 36 | ### In-Memory Databases 37 | 38 | 6. **Cloud Memorystore** 39 | - **Description**: Fully managed in-memory data store service for Redis and Memcached. 40 | - **Use Cases**: Caching, session management, real-time analytics, and leaderboard generation. 41 | - **Key Features**: Fully managed, high availability, automatic scaling, and integration with GCP services. 42 | 43 | ### Graph Databases 44 | 45 | 7. **Neo4j on GCP** 46 | - **Description**: Graph database service available through the GCP Marketplace, allowing for the creation and querying of graph data structures. 47 | - **Use Cases**: Social networks, fraud detection, recommendation engines, and network analysis. 48 | - **Key Features**: Graph data modeling, Cypher query language, and integration with GCP services. 49 | 50 | ### Key-Value Databases 51 | 52 | 8. **Cloud Datastore** 53 | - **Description**: Scalable, fully managed NoSQL database for web and mobile applications. 54 | - **Use Cases**: Web and mobile backends, user profiles, and product catalogs. 55 | - **Key Features**: ACID transactions, high availability, automatic scaling, and flexible data model. 56 | 57 | ### Time-Series Databases 58 | 59 | 9. **Cloud InfluxDB** 60 | - **Description**: Time-series database service available through the GCP Marketplace, designed for high-performance handling of time-series data. 61 | - **Use Cases**: IoT data, monitoring, and real-time analytics. 62 | - **Key Features**: High write and query performance, time-series optimized storage, and integration with GCP services. 63 | 64 | ### Hybrid and Multi-Cloud Databases 65 | 66 | 10. **AlloyDB for PostgreSQL** 67 | - **Description**: Fully managed PostgreSQL-compatible database service designed for advanced analytics and transactional workloads. 68 | - **Use Cases**: Hybrid cloud environments, advanced analytics, and transactional applications. 69 | - **Key Features**: High performance, advanced analytics capabilities, and compatibility with existing PostgreSQL applications. 70 | 71 | ### Specialized Databases 72 | 73 | 11. **Cloud SQL for MySQL/PostgreSQL/SQL Server** 74 | - **Description**: Managed database service for MySQL, PostgreSQL, and SQL Server. 75 | - **Use Cases**: Traditional relational database applications, business applications, and web applications. 76 | - **Key Features**: Automated backups, high availability, read replicas, and integrated monitoring. 77 | 78 | ### Data Migration and Integration 79 | 80 | 12. **Database Migration Service** 81 | - **Description**: Service to migrate databases to Cloud SQL with minimal downtime. 82 | - **Use Cases**: Migrating on-premises databases to the cloud, cross-cloud migrations. 83 | - **Key Features**: Minimal downtime, secure migrations, and support for MySQL and PostgreSQL. 84 | 85 | ### Summary 86 | 87 | GCP offers a comprehensive suite of database products to address a wide range of use cases, from transactional and analytical workloads to real-time and large-scale data processing. Each product is designed to provide high performance, scalability, and integration with other GCP services, making it easier to build, manage, and scale your applications. 88 | 89 | **Recommended Learning Resources**: 90 | 91 | - **Google Cloud Documentation**: In-depth guides and tutorials for each database product. 92 | - **Coursera and Qwiklabs**: Online courses and hands-on labs for practical experience. 93 | - **Google Cloud Training**: Official training programs and certifications. 94 | - **Books and Guides**: "Designing Data-Intensive Applications" by Martin Kleppmann and other relevant literature. 95 | 96 | By exploring these resources and gaining hands-on experience with GCP's database products, you will be well-prepared to leverage these tools for your projects and excel in the Google Cloud Professional Data Engineer certification exam. -------------------------------------------------------------------------------- /Learning/Section2_GCP_Fundamental/README.md: -------------------------------------------------------------------------------- 1 | # GCP Fundamental 2 | ## GCP Region vs Zones 3 | In Google Cloud Platform (GCP), regions and zones are important concepts when it comes to deploying and managing resources. 4 | 5 | ### Region 6 | - A region is a specific geographical location where you can run your resources. It is made up of one or more zones. 7 | - Google Cloud Platform has multiple regions across the world to provide flexibility and options for deploying applications and services closer to your users. 8 | - Each region is independent, and resources in one region are isolated from resources in other regions, providing fault tolerance and high availability. 9 | 10 | ### Zone 11 | - A zone is an isolated location within a region. Each zone is designed to be independent of other zones in terms of power, cooling, networking, and connectivity. 12 | - By deploying resources across multiple zones within a region, you can help protect your applications and data from data center failures. 13 | - Zonal resources such as virtual machine instances, disks, and Kubernetes clusters are typically tied to a specific zone. 14 | 15 | ### Key Points 16 | - Regions provide a way to isolate workloads and provide redundancy, while zones provide distinct, independent environments within a region. 17 | - When deploying applications or services, you should consider distributing resources across multiple zones within a region to enhance availability and fault tolerance. 18 | - GCP's global network connects all regions and zones, allowing for reliable and low-latency connectivity across the platform. 19 | 20 | In summary, regions and zones in GCP play a crucial role in ensuring high availability, fault tolerance, and geographic distribution of resources, allowing you to design and deploy applications that are resilient and can meet the needs of users across the globe. 21 | 22 | `Example` 23 | ![Alt text](./images/region-zone.png) 24 | 25 | 26 | ## Note abot GCP Account 27 | Here are some important points to note about a Google Cloud Platform (GCP) account: 28 | 29 | 1. **Resource Management**: A GCP account provides access to a wide range of cloud services and resources, such as virtual machines, databases, storage, and networking capabilities. 30 | 31 | 2. **Billing and Cost Management**: With a GCP account, you can manage billing settings, view usage reports, and set up budgets and alerts to monitor and control costs associated with your usage of GCP services. 32 | 33 | 3. **Identity and Access Management (IAM)**: GCP accounts are associated with IAM roles, allowing you to control and manage permissions for users, groups, and service accounts, ensuring secure access to resources. 34 | 35 | 4. **Organization Hierarchy**: GCP accounts can be organized into a hierarchical structure using GCP Organizations. This allows for centralized management of policies, permissions, and resources across an organization. 36 | 37 | 5. **API Access and Service Usage**: GCP accounts enable access to a wide range of APIs and services, including tools for development, data analysis, machine learning, and more. 38 | 39 | 6. **Support and Service Level Agreements (SLAs)**: Depending on the type of GCP account, you may have access to support options and SLAs that provide commitments for service availability and performance. 40 | 41 | 7. **Integration with Other Google Services**: GCP accounts are often integrated with other Google services such as Gmail, Google Workspace, and Google Analytics, providing a comprehensive suite of cloud and productivity tools. 42 | 43 | It's important to note that GCP accounts come in different types, such as personal accounts, enterprise accounts, and accounts associated with organizations. Each type of account has its own set of features, capabilities, and management options based on the specific needs of the user or organization. 44 | ## GCP Service 45 | Google Cloud Platform (GCP) offers a wide range of services and products to support cloud computing, storage, machine learning, big data, and more. Here are some key GCP services: 46 | 47 | 1. **Compute**: 48 | - Compute Engine: Provides virtual machines (VMs) for running applications. 49 | - Kubernetes Engine: Offers managed Kubernetes clusters for containerized applications. 50 | - App Engine: Platform as a Service (PaaS) for building and deploying applications. 51 | 52 | 2. **Storage**: 53 | - Cloud Storage: Object storage for storing and accessing data. 54 | - Cloud SQL: Fully managed relational databases. 55 | - Cloud Bigtable: NoSQL wide-column database for large analytical and operational workloads. 56 | 57 | 3. **Networking**: 58 | - Virtual Private Cloud (VPC): Provides networking functionality for GCP resources. 59 | - Cloud Load Balancing: Distributes incoming traffic across multiple instances. 60 | - Cloud CDN: Content Delivery Network for delivering content to users. 61 | 62 | 4. **Big Data and Machine Learning**: 63 | - BigQuery: Serverless, highly scalable enterprise data warehouse for analytics. 64 | - Dataflow: Fully managed stream and batch data processing. 65 | - AI Platform: Managed services for building, testing, and deploying machine learning models. 66 | 67 | 5. **Databases**: 68 | - Cloud Spanner: Horizontally scalable, strongly consistent, relational database service. 69 | - Firebase Realtime Database: NoSQL cloud database for mobile and web applications. 70 | - Firestore: Flexible, scalable database for mobile, web, and server development. 71 | 72 | 6. **Identity and Security**: 73 | - Identity and Access Management (IAM): Provides centralized access management for GCP resources. 74 | - Cloud Identity: Manages users, devices, and apps from a single console. 75 | - Security Command Center: Provides security and risk insights for GCP resources. 76 | 77 | 7. **Developer Tools**: 78 | - Cloud Build: Continuously build, test, and deploy software. 79 | - Cloud Source Repositories: Hosted private Git repositories. 80 | - Firebase: Platform for building and operating mobile and web applications. 81 | 82 | 8. **Management Tools**: 83 | - Stackdriver: Monitoring, logging, and diagnostics for applications on GCP. 84 | - Deployment Manager: Infrastructure as Code service for managing GCP resources. 85 | - Cloud Console: Web-based user interface to manage GCP resources. 86 | 87 | These are just a few examples of the extensive suite of services and products offered by Google Cloud Platform to support a wide range of use cases, from basic infrastructure needs to advanced data analytics and machine learning applications. -------------------------------------------------------------------------------- /Learning/Section5_Google_Cloud_Storage/README.md: -------------------------------------------------------------------------------- 1 | # Google Cloud Storage 2 | 3 | ## Storage Location 4 | Here's a table representing the requirements, recommended bucket locations, and workload examples: 5 | 6 | | Requirements | Recommended Bucket Location | Workload Examples | 7 | |---------------------------------------|------------------------------|---------------------------------------------| 8 | | Optimized latency and bandwidth | Regional | Analytics, Backup and archive | 9 | | Lowest data storage cost | Regional | Analytics, Backup and archive | 10 | | Cross-zone redundancy | Regional | Analytics, Backup and archive | 11 | | Optimized latency and bandwidth | Dual-region | Analytics, Backup and archive, Disaster recovery | 12 | | Cross-region redundancy | Dual-region | Analytics, Backup and archive, Disaster recovery | 13 | | Cross-geography data access | Multi-region | Content serving | 14 | | Highest availability | Multi-region | Content serving | 15 | 16 | This table outlines the requirements, the recommended bucket locations, and typical workload examples associated with each requirement. 17 | ## Storage Class 18 | Here's the provided information organized in a table format: 19 | 20 | | Storage Class | Name for APIs and CLIs | Minimum Storage Duration | Retrieval Fees | Typical Monthly Availability | 21 | |-----------------|-------------------------|--------------------------|-----------------|------------------------------| 22 | | Standard storage | STANDARD | None | None | >99.99% in multi-regions and dual-regions, 99.99% in regions | 23 | | Nearline storage | NEARLINE | 30 days | Yes | 99.95% in multi-regions and dual-regions, 99.9% in regions | 24 | | Coldline storage | COLDLINE | 90 days | Yes | 99.95% in multi-regions and dual-regions, 99.9% in regions | 25 | | Archive storage | ARCHIVE | 365 days | Yes | 99.95% in multi-regions and dual-regions, 99.9% in regions | 26 | Storage options 27 | Here's a table summarizing the storage types, their descriptions, and the best use cases for each: 28 | 29 | | Storage Type | Description | Best For | 30 | |--------------------|-----------------------------------------------------------------------------------------------|---------------------------------------------| 31 | | Standard Storage | Storage for frequently accessed ("hot") data, and/or stored for brief periods of time. | Websites, streaming videos, mobile apps | 32 | | Nearline Storage | Low-cost, highly durable storage for infrequently accessed data, stored for up to 30 days. | Data storage for 30-day access patterns | 33 | | Coldline Storage | Very low-cost, highly durable storage for infrequently accessed data, stored for up to 90 days. | Data storage for 90-day access patterns | 34 | | Archival Storage | Lowest cost, highly durable storage for data archiving, online backup, and disaster recovery. | Long-term data archiving and disaster recovery purposes, data stored for 365 days. | 35 | 36 | ## Security Data with Encryption 37 | When working with Google Cloud Storage (GCS) and encryption, you can ensure the security of your data using several methods: 38 | 39 | 1. **Server-Side Encryption (SSE)**: 40 | - GCS provides default encryption for all data at rest. 41 | - Data is automatically encrypted before it is written to disk and decrypted after it is read from disk. 42 | 43 | 2. **Customer-Supplied Encryption Keys (CSEK)**: 44 | - You can manage your own encryption keys and provide them when you upload or download data to or from GCS. 45 | - GCS uses these keys to encrypt and decrypt your data. 46 | 47 | 3. **Customer-Managed Encryption Keys (CMEK)**: 48 | - With CMEK, you can use Cloud Key Management Service (KMS) to manage and control your encryption keys. 49 | - GCS uses these keys to encrypt and decrypt your data at rest. 50 | 51 | 4. **Client-Side Encryption**: 52 | - You can encrypt your data locally before sending it to GCS and decrypt it after retrieval. 53 | - This provides an additional layer of protection, as the data is already encrypted before reaching the storage service. 54 | 55 | ## Object Lifecycle Management 56 | Object Lifecycle Management in Google Cloud Storage (GCS) allows you to automate the management of your objects over their lifetime. You can define lifecycle management rules to automatically transition or delete objects based on their age or storage class. 57 | 58 | Key features of GCS Object Lifecycle Management include: 59 | 60 | 1. **Transition Actions**: 61 | - Automatically transition objects to a different storage class after a specified duration. For example, you can move objects from Standard to Nearline or Coldline storage classes to optimize storage costs. 62 | 63 | 2. **Delete Actions**: 64 | - Automatically delete objects after a specified duration. This can help to enforce data retention policies and ensure compliance with data privacy regulations. 65 | 66 | 3. **Conditions**: 67 | - Define rules based on object age or "created before" date to trigger transitions or deletions. 68 | 69 | 4. **Rules**: 70 | - Rules can be set at the bucket level to apply to all objects in the bucket, or at the object level to apply to specific objects. 71 | 72 | 5. **Storage Class Transitions**: 73 | - Object Lifecycle Management supports transitioning objects between different storage classes, enabling cost optimization based on access patterns. 74 | 75 | By using Object Lifecycle Management in GCS, you can effectively manage your storage costs, automate data retention policies, and ensure that objects are stored in the most cost-effective and operationally efficient manner throughout their lifecycle. 76 | 77 | ## Google Cloud Storage Pricing 78 | Cloud Storage pricing is based on the following components: 79 | 80 | **Data storage:** the amount of data stored in your buckets. Storage rates vary depending on the storage class of your data and location of your buckets. 81 | **Data processing:** the processing done by Cloud Storage, which includes operations charges, any applicable retrieval fees, and inter-region replication. 82 | **Network usage:** the amount of data read from or moved between your buckets. -------------------------------------------------------------------------------- /Learning/Section3_GCP_Basic_Services/IAM/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## IAM 3 | IAM (Identity and Access Management) in Google Cloud Platform (GCP) is a robust service that allows you to manage access control by defining who (identity) has what access (role) for which resource. Here are some key aspects of IAM in GCP: 4 | 5 | 1. **Principals**: 6 | - IAM allows you to manage access for various types of identities, including individual users, groups, service accounts, and Google Groups. 7 | 8 | 2. **Roles**: 9 | - GCP provides predefined roles that encompass a set of permissions for specific GCP resources. These roles can be assigned to users or groups to grant them the necessary permissions. 10 | 11 | 3. **Custom Roles**: 12 | - In addition to predefined roles, IAM also supports creating custom roles that allow you to define granular permissions for specific use cases. 13 | 14 | 4. **Resource Hierarchy**: 15 | - IAM policies can be applied at different levels of the GCP resource hierarchy, such as the organization, folder, or project level, enabling fine-grained control over resource access. 16 | 17 | 5. **Permissions**: 18 | - Permissions in GCP represent a particular capability, such as reading or writing data to a storage bucket, creating virtual machines, or managing IAM policies. 19 | 20 | 6. **Policy Binding**: 21 | - IAM policies are attached to GCP resources using policy bindings, which specify the members who have the defined roles for the resource. 22 | 23 | 7. **Access Control Lists (ACLs)**: 24 | - IAM allows you to set up access control lists to specify who has access to specific resources and what level of access they have. 25 | 26 | 8. **Audit Logging**: 27 | - GCP provides detailed audit logs for IAM activities, allowing you to monitor who has accessed your resources and what actions they have performed. 28 | 29 | IAM in GCP provides a flexible and secure way to manage access to GCP resources, ensuring that only authorized users and services have the necessary permissions to perform specific actions within the cloud environment. 30 | ## IAM - Identiy 31 | In Google Cloud Platform (GCP), IAM (Identity and Access Management) supports various types of identities for managing access to resources. These identities play a crucial role in defining who has access to which resources and what actions they can perform. Here are the key types of identities in GCP IAM: 32 | 33 | 1. **Google Account**: 34 | - A Google Account represents a personal Google identity and is typically associated with an individual user. It is used for accessing various Google services, including GCP. 35 | 36 | 2. **Service Account**: 37 | - Service accounts are used by applications, virtual machines (VMs), and other software to access GCP resources. They are not associated with a specific user but are intended for programmatic access. 38 | 39 | 3. **Google Group**: 40 | - Google Groups allow you to manage a collection of Google Accounts and Service Accounts. You can grant roles to Google Groups, making it easier to manage access for multiple users at once. 41 | 42 | 4. **G Suite Domain**: 43 | - G Suite domains represent an organization's domain in the G Suite environment. G Suite users can be granted access to GCP resources through their G Suite identity. 44 | 45 | 5. **Cloud Identity Domain**: 46 | - Similar to G Suite domains, Cloud Identity domains represent an organization's domain in the Cloud Identity environment. Cloud Identity users can be granted access to GCP resources through their Cloud Identity identity. 47 | 48 | 6. **IAM Policy Member**: 49 | - IAM policy members can be individual users, service accounts, Google Groups, G Suite domains, Cloud Identity domains, or the public. These are entities to which roles are assigned within IAM policies. 50 | 51 | Each of these identity types can be granted roles at different levels of the GCP resource hierarchy, allowing you to control access to various resources within your GCP environment. Understanding and managing these identities is essential for implementing a secure and well-organized access control strategy within GCP. 52 | 53 | ## IAM - Role and Permissions 54 | 55 | In Google Cloud Platform (GCP) IAM (Identity and Access Management), roles and permissions play a crucial role in controlling access to GCP resources. Here's an overview of roles and permissions in GCP IAM: 56 | 57 | ### Roles: 58 | 1. **Predefined Roles**: 59 | - GCP provides a set of predefined roles, such as Owner, Editor, and Viewer, that encompass a set of permissions for specific GCP resources. These roles are managed by Google and cover common use cases. 60 | 61 | 2. **Custom Roles**: 62 | - GCP IAM allows you to create custom roles to define granular permissions tailored to your specific use cases. Custom roles enable you to control access at a more detailed level. 63 | 64 | ### Permissions: 65 | 1. **Definition**: 66 | - Permissions in GCP represent a specific capability, such as reading or writing data to a storage bucket, creating virtual machines, or managing IAM policies. 67 | 68 | 2. **Granularity**: 69 | - GCP permissions are granular, allowing you to specify fine-grained access control for individual resources. Each GCP API defines its set of permissions, and roles are associated with these permissions. 70 | 71 | 3. **Assignment**: 72 | - Roles are assigned to identities (such as users, groups, and service accounts) and determine what actions can be performed on specific resources. 73 | 74 | ### Role Hierarchy: 75 | 1. **Hierarchy Levels**: 76 | - IAM roles can be applied at different levels of the GCP resource hierarchy, such as the organization, folder, or project level. This allows for fine-grained control over resource access. 77 | 78 | 2. **Inheritance**: 79 | - Roles can be inherited through the resource hierarchy, allowing for consistent access control across an organization's GCP resources. 80 | 81 | ### Best Practices: 82 | 1. **Least Privilege**: 83 | - Follow the principle of least privilege when assigning roles and permissions, granting users only the access they need to perform their job functions. 84 | 85 | 2. **Regular Review**: 86 | - Periodically review roles and permissions to ensure they align with the current needs of your organization and to revoke unnecessary access. 87 | 88 | 3. **Auditing and Monitoring**: 89 | - Use IAM audit logs to track changes to roles and permissions and to monitor access to sensitive resources. 90 | 91 | Understanding roles and permissions in GCP IAM is essential for effectively managing access control, ensuring security, and maintaining compliance within your GCP environment. 92 | -------------------------------------------------------------------------------- /Exam-Guide/README.md: -------------------------------------------------------------------------------- 1 | ## Sample Case study 2 | Section 1: Designing data processing systems (~22% of the exam) 3 | 4 | 1.1 Designing for security and compliance. Considerations include: 5 | 6 | ● Identity and Access Management (e.g., Cloud IAM and organization policies) 7 | 8 | ● Data security (encryption and key management) 9 | 10 | ● Privacy (e.g., personally identifiable information, and Cloud Data Loss Prevention API) 11 | 12 | ● Regional considerations (data sovereignty) for data access and storage 13 | 14 | ● Legal and regulatory compliance 15 | 16 | 1.2 Designing for reliability and fidelity. Considerations include: 17 | 18 | ● Preparing and cleaning data (e.g., Dataprep, Dataflow, and Cloud Data Fusion) 19 | 20 | ● Monitoring and orchestration of data pipelines 21 | 22 | ● Disaster recovery and fault tolerance 23 | 24 | ● Making decisions related to ACID (atomicity, consistency, isolation, and durability) compliance and availability 25 | 26 | ● Data validation 27 | 28 | 1.3 Designing for flexibility and portability. Considerations include: 29 | 30 | ● Mapping current and future business requirements to the architecture 31 | 32 | ● Designing for data and application portability (e.g., multi-cloud and data residency requirements) 33 | 34 | ● Data staging, cataloging, and discovery (data governance) 35 | 36 | 1.4 Designing data migrations. Considerations include: 37 | 38 | ● Analyzing current stakeholder needs, users, processes, and technologies and creating a plan to get to desired state 39 | 40 | ● Planning migration to Google Cloud (e.g., BigQuery Data Transfer Service, Database Migration Service, Transfer Appliance, Google Cloud networking, Datastream) 41 | 42 | ● Designing the migration validation strategy 43 | 44 | ● Designing the project, dataset, and table architecture to ensure proper data governance 45 | 46 | Section 2: Ingesting and processing the data (~25% of the exam) 47 | 48 | 2.1 Planning the data pipelines. Considerations include: 49 | 50 | ● Defining data sources and sinks 51 | 52 | ● Defining data transformation logic 53 | 54 | ● Networking fundamentals 55 | 56 | ● Data encryption 57 | 58 | 2.2 Building the pipelines. Considerations include: 59 | 60 | ● Data cleansing 61 | 62 | ● Identifying the services (e.g., Dataflow, Apache Beam, Dataproc, Cloud Data Fusion, BigQuery, Pub/Sub, Apache Spark, Hadoop ecosystem, and Apache Kafka) 63 | 64 | ● Transformations 65 | 66 | ○ Batch 67 | 68 | ○ Streaming (e.g., windowing, late arriving data) 69 | 70 | ○ Language 71 | 72 | ○ Ad hoc data ingestion (one-time or automated pipeline) 73 | 74 | ● Data acquisition and import 75 | 76 | ● Integrating with new data sources 77 | 78 | 2.3 Deploying and operationalizing the pipelines. Considerations include: 79 | 80 | ● Job automation and orchestration (e.g., Cloud Composer and Workflows) 81 | 82 | ● CI/CD (Continuous Integration and Continuous Deployment) 83 | 84 | Section 3: Storing the data (~20% of the exam) 85 | 86 | 3.1 Selecting storage systems. Considerations include: 87 | 88 | ● Analyzing data access patterns 89 | 90 | ● Choosing managed services (e.g., Bigtable, Cloud Spanner, Cloud SQL, Cloud Storage, Firestore, Memorystore) 91 | 92 | ● Planning for storage costs and performance 93 | 94 | ● Lifecycle management of data 95 | 96 | 3.2 Planning for using a data warehouse. Considerations include: 97 | 98 | ● Designing the data model 99 | 100 | ● Deciding the degree of data normalization 101 | 102 | ● Mapping business requirements 103 | 104 | ● Defining architecture to support data access patterns 105 | 106 | 3.3 Using a data lake. Considerations include: 107 | 108 | ● Managing the lake (configuring data discovery, access, and cost controls) 109 | 110 | ● Processing data 111 | 112 | ● Monitoring the data lake 113 | 114 | 3.4 Designing for a data mesh. Considerations include: 115 | 116 | ● Building a data mesh based on requirements by using Google Cloud tools (e.g., Dataplex, Data Catalog, BigQuery, Cloud Storage) 117 | 118 | ● Segmenting data for distributed team usage 119 | 120 | ● Building a federated governance model for distributed data systems 121 | 122 | Section 4: Preparing and using data for analysis (~15% of the exam) 123 | 124 | 4.1 Preparing data for visualization. Considerations include: 125 | 126 | ● Connecting to tools 127 | 128 | ● Precalculating fields 129 | 130 | ● BigQuery materialized views (view logic) 131 | 132 | ● Determining granularity of time data 133 | 134 | ● Troubleshooting poor performing queries 135 | 136 | ● Identity and Access Management (IAM) and Cloud Data Loss Prevention (Cloud DLP) 137 | 138 | 4.2 Sharing data. Considerations include: 139 | 140 | ● Defining rules to share data 141 | 142 | ● Publishing datasets 143 | 144 | ● Publishing reports and visualizations 145 | 146 | ● Analytics Hub 147 | 148 | 4.3 Exploring and analyzing data. Considerations include: 149 | 150 | ● Preparing data for feature engineering (training and serving machine learning models) 151 | 152 | ● Conducting data discovery 153 | 154 | Section 5: Maintaining and automating data workloads (~18% of the exam) 155 | 156 | 5.1 Optimizing resources. Considerations include: 157 | 158 | ● Minimizing costs per required business need for data 159 | 160 | ● Ensuring that enough resources are available for business-critical data processes 161 | 162 | ● Deciding between persistent or job-based data clusters (e.g., Dataproc) 163 | 164 | 5.2 Designing automation and repeatability. Considerations include: 165 | 166 | ● Creating directed acyclic graphs (DAGs) for Cloud Composer 167 | 168 | ● Scheduling jobs in a repeatable way 169 | 170 | 5.3 Organizing workloads based on business requirements. Considerations include: 171 | 172 | ● Flex, on-demand, and flat rate slot pricing (index on flexibility or fixed capacity) 173 | 174 | ● Interactive or batch query jobs 175 | 176 | 5.4 Monitoring and troubleshooting processes. Considerations include: 177 | 178 | ● Observability of data processes (e.g., Cloud Monitoring, Cloud Logging, BigQuery admin panel) 179 | 180 | ● Monitoring planned usage 181 | 182 | ● Troubleshooting error messages, billing issues, and quotas 183 | 184 | ● Manage workloads, such as jobs, queries, and compute capacity (reservations) 185 | 186 | 5.5 Maintaining awareness of failures and mitigating impact. Considerations include: 187 | 188 | ● Designing system for fault tolerance and managing restarts 189 | 190 | ● Running jobs in multiple regions or zones 191 | 192 | ● Preparing for data corruption and missing data 193 | 194 | ● Data replication and failover (e.g., Cloud SQL, Redis clusters) 195 | 196 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 2 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 3 | 4 | Using Creative Commons Public Licenses 5 | 6 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 7 | 8 | Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors. 9 | 10 | Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public. 11 | 12 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License 13 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 14 | 15 | Section 1 – Definitions. 16 | a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 17 | 18 | b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 19 | 20 | c. BY-NC-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. 21 | 22 | d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 23 | 24 | e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 25 | 26 | f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 27 | 28 | g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. 29 | 30 | h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 31 | 32 | i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 33 | 34 | j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. 35 | 36 | k. NonCommercial means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 37 | 38 | l. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 39 | 40 | m. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 41 | 42 | n. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 43 | 44 | Section 2 – Scope. 45 | a. License grant. 46 | 47 | Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 48 | 49 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 50 | 51 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 52 | 53 | Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 54 | 55 | Term. The term of this Public License is specified in Section 6(a). 56 | 57 | Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 58 | 59 | Downstream recipients. 60 | 61 | A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 62 | 63 | B. Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 64 | 65 | C. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 66 | 67 | No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 68 | 69 | b. Other rights. 70 | 71 | Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 72 | 73 | Patent and trademark rights are not licensed under this Public License. 74 | 75 | To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 76 | 77 | Section 3 – License Conditions. 78 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 79 | 80 | a. Attribution. 81 | 82 | If You Share the Licensed Material (including in modified form), You must: 83 | 84 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 85 | 86 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 87 | 88 | ii. a copyright notice; 89 | 90 | iii. a notice that refers to this Public License; 91 | 92 | iv. a notice that refers to the disclaimer of warranties; 93 | 94 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 95 | 96 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 97 | 98 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 99 | 100 | You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 101 | 102 | If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 103 | 104 | b. ShareAlike. 105 | 106 | In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 107 | 108 | The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 109 | 110 | You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 111 | 112 | You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 113 | 114 | Section 4 – Sui Generis Database Rights. 115 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 116 | 117 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 118 | 119 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 120 | 121 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 122 | 123 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 124 | 125 | Section 5 – Disclaimer of Warranties and Limitation of Liability. 126 | a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. 127 | 128 | b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. 129 | 130 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 131 | 132 | Section 6 – Term and Termination. 133 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 134 | 135 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 136 | 137 | automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 138 | 139 | upon express reinstatement by the Licensor. 140 | 141 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 142 | 143 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 144 | 145 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 146 | 147 | Section 7 – Other Terms and Conditions. 148 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 149 | 150 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 151 | 152 | Section 8 – Interpretation. 153 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 154 | 155 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 156 | 157 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 158 | 159 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 160 | 161 | Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 162 | 163 | Creative Commons may be contacted at creativecommons.org -------------------------------------------------------------------------------- /Hands-On/bigquery/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bigquery", 3 | "version": "1.0.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "bigquery", 9 | "version": "1.0.0", 10 | "license": "ISC", 11 | "dependencies": { 12 | "@google-cloud/bigquery": "^7.3.0" 13 | } 14 | }, 15 | "node_modules/@google-cloud/bigquery": { 16 | "version": "7.3.0", 17 | "resolved": "https://registry.npmjs.org/@google-cloud/bigquery/-/bigquery-7.3.0.tgz", 18 | "integrity": "sha512-a/qQBOMlSRIVm262Kp3+Zg2+Et2fl+AwQv8mpVtvrpyk+wYQaY3dTK3U+RE6aWCiq/R1xo1vIXIYCRAQAoBsfw==", 19 | "dependencies": { 20 | "@google-cloud/common": "^5.0.0", 21 | "@google-cloud/paginator": "^5.0.0", 22 | "@google-cloud/precise-date": "^4.0.0", 23 | "@google-cloud/promisify": "^4.0.0", 24 | "arrify": "^2.0.1", 25 | "big.js": "^6.0.0", 26 | "duplexify": "^4.0.0", 27 | "extend": "^3.0.2", 28 | "is": "^3.3.0", 29 | "stream-events": "^1.0.5", 30 | "uuid": "^9.0.0" 31 | }, 32 | "engines": { 33 | "node": ">=14.0.0" 34 | } 35 | }, 36 | "node_modules/@google-cloud/common": { 37 | "version": "5.0.1", 38 | "resolved": "https://registry.npmjs.org/@google-cloud/common/-/common-5.0.1.tgz", 39 | "integrity": "sha512-7NBC5vD0au75nkctVs2vEGpdUPFs1BaHTMpeI+RVEgQSMe5/wEU6dx9p0fmZA0bj4HgdpobMKeegOcLUiEoxng==", 40 | "dependencies": { 41 | "@google-cloud/projectify": "^4.0.0", 42 | "@google-cloud/promisify": "^4.0.0", 43 | "arrify": "^2.0.1", 44 | "duplexify": "^4.1.1", 45 | "ent": "^2.2.0", 46 | "extend": "^3.0.2", 47 | "google-auth-library": "^9.0.0", 48 | "retry-request": "^7.0.0", 49 | "teeny-request": "^9.0.0" 50 | }, 51 | "engines": { 52 | "node": ">=14.0.0" 53 | } 54 | }, 55 | "node_modules/@google-cloud/paginator": { 56 | "version": "5.0.0", 57 | "resolved": "https://registry.npmjs.org/@google-cloud/paginator/-/paginator-5.0.0.tgz", 58 | "integrity": "sha512-87aeg6QQcEPxGCOthnpUjvw4xAZ57G7pL8FS0C4e/81fr3FjkpUpibf1s2v5XGyGhUVGF4Jfg7yEcxqn2iUw1w==", 59 | "dependencies": { 60 | "arrify": "^2.0.0", 61 | "extend": "^3.0.2" 62 | }, 63 | "engines": { 64 | "node": ">=14.0.0" 65 | } 66 | }, 67 | "node_modules/@google-cloud/precise-date": { 68 | "version": "4.0.0", 69 | "resolved": "https://registry.npmjs.org/@google-cloud/precise-date/-/precise-date-4.0.0.tgz", 70 | "integrity": "sha512-1TUx3KdaU3cN7nfCdNf+UVqA/PSX29Cjcox3fZZBtINlRrXVTmUkQnCKv2MbBUbCopbK4olAT1IHl76uZyCiVA==", 71 | "engines": { 72 | "node": ">=14.0.0" 73 | } 74 | }, 75 | "node_modules/@google-cloud/projectify": { 76 | "version": "4.0.0", 77 | "resolved": "https://registry.npmjs.org/@google-cloud/projectify/-/projectify-4.0.0.tgz", 78 | "integrity": "sha512-MmaX6HeSvyPbWGwFq7mXdo0uQZLGBYCwziiLIGq5JVX+/bdI3SAq6bP98trV5eTWfLuvsMcIC1YJOF2vfteLFA==", 79 | "engines": { 80 | "node": ">=14.0.0" 81 | } 82 | }, 83 | "node_modules/@google-cloud/promisify": { 84 | "version": "4.0.0", 85 | "resolved": "https://registry.npmjs.org/@google-cloud/promisify/-/promisify-4.0.0.tgz", 86 | "integrity": "sha512-Orxzlfb9c67A15cq2JQEyVc7wEsmFBmHjZWZYQMUyJ1qivXyMwdyNOs9odi79hze+2zqdTtu1E19IM/FtqZ10g==", 87 | "engines": { 88 | "node": ">=14" 89 | } 90 | }, 91 | "node_modules/@tootallnate/once": { 92 | "version": "2.0.0", 93 | "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", 94 | "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", 95 | "engines": { 96 | "node": ">= 10" 97 | } 98 | }, 99 | "node_modules/@types/caseless": { 100 | "version": "0.12.5", 101 | "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.5.tgz", 102 | "integrity": "sha512-hWtVTC2q7hc7xZ/RLbxapMvDMgUnDvKvMOpKal4DrMyfGBUfB1oKaZlIRr6mJL+If3bAP6sV/QneGzF6tJjZDg==" 103 | }, 104 | "node_modules/@types/node": { 105 | "version": "20.10.4", 106 | "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.4.tgz", 107 | "integrity": "sha512-D08YG6rr8X90YB56tSIuBaddy/UXAA9RKJoFvrsnogAum/0pmjkgi4+2nx96A330FmioegBWmEYQ+syqCFaveg==", 108 | "dependencies": { 109 | "undici-types": "~5.26.4" 110 | } 111 | }, 112 | "node_modules/@types/request": { 113 | "version": "2.48.12", 114 | "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.12.tgz", 115 | "integrity": "sha512-G3sY+NpsA9jnwm0ixhAFQSJ3Q9JkpLZpJbI3GMv0mIAT0y3mRabYeINzal5WOChIiaTEGQYlHOKgkaM9EisWHw==", 116 | "dependencies": { 117 | "@types/caseless": "*", 118 | "@types/node": "*", 119 | "@types/tough-cookie": "*", 120 | "form-data": "^2.5.0" 121 | } 122 | }, 123 | "node_modules/@types/tough-cookie": { 124 | "version": "4.0.5", 125 | "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz", 126 | "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==" 127 | }, 128 | "node_modules/agent-base": { 129 | "version": "7.1.0", 130 | "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", 131 | "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", 132 | "dependencies": { 133 | "debug": "^4.3.4" 134 | }, 135 | "engines": { 136 | "node": ">= 14" 137 | } 138 | }, 139 | "node_modules/arrify": { 140 | "version": "2.0.1", 141 | "resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz", 142 | "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", 143 | "engines": { 144 | "node": ">=8" 145 | } 146 | }, 147 | "node_modules/asynckit": { 148 | "version": "0.4.0", 149 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 150 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" 151 | }, 152 | "node_modules/base64-js": { 153 | "version": "1.5.1", 154 | "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", 155 | "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", 156 | "funding": [ 157 | { 158 | "type": "github", 159 | "url": "https://github.com/sponsors/feross" 160 | }, 161 | { 162 | "type": "patreon", 163 | "url": "https://www.patreon.com/feross" 164 | }, 165 | { 166 | "type": "consulting", 167 | "url": "https://feross.org/support" 168 | } 169 | ] 170 | }, 171 | "node_modules/big.js": { 172 | "version": "6.2.1", 173 | "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz", 174 | "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==", 175 | "engines": { 176 | "node": "*" 177 | }, 178 | "funding": { 179 | "type": "opencollective", 180 | "url": "https://opencollective.com/bigjs" 181 | } 182 | }, 183 | "node_modules/bignumber.js": { 184 | "version": "9.1.2", 185 | "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.1.2.tgz", 186 | "integrity": "sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==", 187 | "engines": { 188 | "node": "*" 189 | } 190 | }, 191 | "node_modules/buffer-equal-constant-time": { 192 | "version": "1.0.1", 193 | "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", 194 | "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" 195 | }, 196 | "node_modules/combined-stream": { 197 | "version": "1.0.8", 198 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 199 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 200 | "dependencies": { 201 | "delayed-stream": "~1.0.0" 202 | }, 203 | "engines": { 204 | "node": ">= 0.8" 205 | } 206 | }, 207 | "node_modules/debug": { 208 | "version": "4.3.4", 209 | "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", 210 | "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", 211 | "dependencies": { 212 | "ms": "2.1.2" 213 | }, 214 | "engines": { 215 | "node": ">=6.0" 216 | }, 217 | "peerDependenciesMeta": { 218 | "supports-color": { 219 | "optional": true 220 | } 221 | } 222 | }, 223 | "node_modules/delayed-stream": { 224 | "version": "1.0.0", 225 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 226 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", 227 | "engines": { 228 | "node": ">=0.4.0" 229 | } 230 | }, 231 | "node_modules/duplexify": { 232 | "version": "4.1.2", 233 | "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-4.1.2.tgz", 234 | "integrity": "sha512-fz3OjcNCHmRP12MJoZMPglx8m4rrFP8rovnk4vT8Fs+aonZoCwGg10dSsQsfP/E62eZcPTMSMP6686fu9Qlqtw==", 235 | "dependencies": { 236 | "end-of-stream": "^1.4.1", 237 | "inherits": "^2.0.3", 238 | "readable-stream": "^3.1.1", 239 | "stream-shift": "^1.0.0" 240 | } 241 | }, 242 | "node_modules/ecdsa-sig-formatter": { 243 | "version": "1.0.11", 244 | "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", 245 | "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", 246 | "dependencies": { 247 | "safe-buffer": "^5.0.1" 248 | } 249 | }, 250 | "node_modules/end-of-stream": { 251 | "version": "1.4.4", 252 | "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", 253 | "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", 254 | "dependencies": { 255 | "once": "^1.4.0" 256 | } 257 | }, 258 | "node_modules/ent": { 259 | "version": "2.2.0", 260 | "resolved": "https://registry.npmjs.org/ent/-/ent-2.2.0.tgz", 261 | "integrity": "sha512-GHrMyVZQWvTIdDtpiEXdHZnFQKzeO09apj8Cbl4pKWy4i0Oprcq17usfDt5aO63swf0JOeMWjWQE/LzgSRuWpA==" 262 | }, 263 | "node_modules/extend": { 264 | "version": "3.0.2", 265 | "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", 266 | "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" 267 | }, 268 | "node_modules/form-data": { 269 | "version": "2.5.1", 270 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", 271 | "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", 272 | "dependencies": { 273 | "asynckit": "^0.4.0", 274 | "combined-stream": "^1.0.6", 275 | "mime-types": "^2.1.12" 276 | }, 277 | "engines": { 278 | "node": ">= 0.12" 279 | } 280 | }, 281 | "node_modules/gaxios": { 282 | "version": "6.1.1", 283 | "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.1.1.tgz", 284 | "integrity": "sha512-bw8smrX+XlAoo9o1JAksBwX+hi/RG15J+NTSxmNPIclKC3ZVK6C2afwY8OSdRvOK0+ZLecUJYtj2MmjOt3Dm0w==", 285 | "dependencies": { 286 | "extend": "^3.0.2", 287 | "https-proxy-agent": "^7.0.1", 288 | "is-stream": "^2.0.0", 289 | "node-fetch": "^2.6.9" 290 | }, 291 | "engines": { 292 | "node": ">=14" 293 | } 294 | }, 295 | "node_modules/gcp-metadata": { 296 | "version": "6.1.0", 297 | "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.0.tgz", 298 | "integrity": "sha512-Jh/AIwwgaxan+7ZUUmRLCjtchyDiqh4KjBJ5tW3plBZb5iL/BPcso8A5DlzeD9qlw0duCamnNdpFjxwaT0KyKg==", 299 | "dependencies": { 300 | "gaxios": "^6.0.0", 301 | "json-bigint": "^1.0.0" 302 | }, 303 | "engines": { 304 | "node": ">=14" 305 | } 306 | }, 307 | "node_modules/google-auth-library": { 308 | "version": "9.4.1", 309 | "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-9.4.1.tgz", 310 | "integrity": "sha512-Chs7cuzDuav8W/BXOoRgSXw4u0zxYtuqAHETDR5Q6dG1RwNwz7NUKjsDDHAsBV3KkiiJBtJqjbzy1XU1L41w1g==", 311 | "dependencies": { 312 | "base64-js": "^1.3.0", 313 | "ecdsa-sig-formatter": "^1.0.11", 314 | "gaxios": "^6.1.1", 315 | "gcp-metadata": "^6.1.0", 316 | "gtoken": "^7.0.0", 317 | "jws": "^4.0.0" 318 | }, 319 | "engines": { 320 | "node": ">=14" 321 | } 322 | }, 323 | "node_modules/gtoken": { 324 | "version": "7.0.1", 325 | "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.0.1.tgz", 326 | "integrity": "sha512-KcFVtoP1CVFtQu0aSk3AyAt2og66PFhZAlkUOuWKwzMLoulHXG5W5wE5xAnHb+yl3/wEFoqGW7/cDGMU8igDZQ==", 327 | "dependencies": { 328 | "gaxios": "^6.0.0", 329 | "jws": "^4.0.0" 330 | }, 331 | "engines": { 332 | "node": ">=14.0.0" 333 | } 334 | }, 335 | "node_modules/http-proxy-agent": { 336 | "version": "5.0.0", 337 | "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", 338 | "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", 339 | "dependencies": { 340 | "@tootallnate/once": "2", 341 | "agent-base": "6", 342 | "debug": "4" 343 | }, 344 | "engines": { 345 | "node": ">= 6" 346 | } 347 | }, 348 | "node_modules/http-proxy-agent/node_modules/agent-base": { 349 | "version": "6.0.2", 350 | "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", 351 | "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", 352 | "dependencies": { 353 | "debug": "4" 354 | }, 355 | "engines": { 356 | "node": ">= 6.0.0" 357 | } 358 | }, 359 | "node_modules/https-proxy-agent": { 360 | "version": "7.0.2", 361 | "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.2.tgz", 362 | "integrity": "sha512-NmLNjm6ucYwtcUmL7JQC1ZQ57LmHP4lT15FQ8D61nak1rO6DH+fz5qNK2Ap5UN4ZapYICE3/0KodcLYSPsPbaA==", 363 | "dependencies": { 364 | "agent-base": "^7.0.2", 365 | "debug": "4" 366 | }, 367 | "engines": { 368 | "node": ">= 14" 369 | } 370 | }, 371 | "node_modules/inherits": { 372 | "version": "2.0.4", 373 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", 374 | "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" 375 | }, 376 | "node_modules/is": { 377 | "version": "3.3.0", 378 | "resolved": "https://registry.npmjs.org/is/-/is-3.3.0.tgz", 379 | "integrity": "sha512-nW24QBoPcFGGHJGUwnfpI7Yc5CdqWNdsyHQszVE/z2pKHXzh7FZ5GWhJqSyaQ9wMkQnsTx+kAI8bHlCX4tKdbg==", 380 | "engines": { 381 | "node": "*" 382 | } 383 | }, 384 | "node_modules/is-stream": { 385 | "version": "2.0.1", 386 | "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", 387 | "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", 388 | "engines": { 389 | "node": ">=8" 390 | }, 391 | "funding": { 392 | "url": "https://github.com/sponsors/sindresorhus" 393 | } 394 | }, 395 | "node_modules/json-bigint": { 396 | "version": "1.0.0", 397 | "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", 398 | "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", 399 | "dependencies": { 400 | "bignumber.js": "^9.0.0" 401 | } 402 | }, 403 | "node_modules/jwa": { 404 | "version": "2.0.0", 405 | "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", 406 | "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", 407 | "dependencies": { 408 | "buffer-equal-constant-time": "1.0.1", 409 | "ecdsa-sig-formatter": "1.0.11", 410 | "safe-buffer": "^5.0.1" 411 | } 412 | }, 413 | "node_modules/jws": { 414 | "version": "4.0.0", 415 | "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", 416 | "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", 417 | "dependencies": { 418 | "jwa": "^2.0.0", 419 | "safe-buffer": "^5.0.1" 420 | } 421 | }, 422 | "node_modules/mime-db": { 423 | "version": "1.52.0", 424 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 425 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", 426 | "engines": { 427 | "node": ">= 0.6" 428 | } 429 | }, 430 | "node_modules/mime-types": { 431 | "version": "2.1.35", 432 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 433 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 434 | "dependencies": { 435 | "mime-db": "1.52.0" 436 | }, 437 | "engines": { 438 | "node": ">= 0.6" 439 | } 440 | }, 441 | "node_modules/ms": { 442 | "version": "2.1.2", 443 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", 444 | "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" 445 | }, 446 | "node_modules/node-fetch": { 447 | "version": "2.7.0", 448 | "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", 449 | "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", 450 | "dependencies": { 451 | "whatwg-url": "^5.0.0" 452 | }, 453 | "engines": { 454 | "node": "4.x || >=6.0.0" 455 | }, 456 | "peerDependencies": { 457 | "encoding": "^0.1.0" 458 | }, 459 | "peerDependenciesMeta": { 460 | "encoding": { 461 | "optional": true 462 | } 463 | } 464 | }, 465 | "node_modules/once": { 466 | "version": "1.4.0", 467 | "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", 468 | "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", 469 | "dependencies": { 470 | "wrappy": "1" 471 | } 472 | }, 473 | "node_modules/readable-stream": { 474 | "version": "3.6.2", 475 | "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", 476 | "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", 477 | "dependencies": { 478 | "inherits": "^2.0.3", 479 | "string_decoder": "^1.1.1", 480 | "util-deprecate": "^1.0.1" 481 | }, 482 | "engines": { 483 | "node": ">= 6" 484 | } 485 | }, 486 | "node_modules/retry-request": { 487 | "version": "7.0.1", 488 | "resolved": "https://registry.npmjs.org/retry-request/-/retry-request-7.0.1.tgz", 489 | "integrity": "sha512-ZI6vJp9rfB71mrZpw+n9p/B6HCsd7QJlSEQftZ+xfJzr3cQ9EPGKw1FF0BnViJ0fYREX6FhymBD2CARpmsFciQ==", 490 | "dependencies": { 491 | "@types/request": "^2.48.8", 492 | "debug": "^4.1.1", 493 | "extend": "^3.0.2", 494 | "teeny-request": "^9.0.0" 495 | }, 496 | "engines": { 497 | "node": ">=14" 498 | } 499 | }, 500 | "node_modules/safe-buffer": { 501 | "version": "5.2.1", 502 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", 503 | "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", 504 | "funding": [ 505 | { 506 | "type": "github", 507 | "url": "https://github.com/sponsors/feross" 508 | }, 509 | { 510 | "type": "patreon", 511 | "url": "https://www.patreon.com/feross" 512 | }, 513 | { 514 | "type": "consulting", 515 | "url": "https://feross.org/support" 516 | } 517 | ] 518 | }, 519 | "node_modules/stream-events": { 520 | "version": "1.0.5", 521 | "resolved": "https://registry.npmjs.org/stream-events/-/stream-events-1.0.5.tgz", 522 | "integrity": "sha512-E1GUzBSgvct8Jsb3v2X15pjzN1tYebtbLaMg+eBOUOAxgbLoSbT2NS91ckc5lJD1KfLjId+jXJRgo0qnV5Nerg==", 523 | "dependencies": { 524 | "stubs": "^3.0.0" 525 | } 526 | }, 527 | "node_modules/stream-shift": { 528 | "version": "1.0.1", 529 | "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", 530 | "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" 531 | }, 532 | "node_modules/string_decoder": { 533 | "version": "1.3.0", 534 | "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", 535 | "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", 536 | "dependencies": { 537 | "safe-buffer": "~5.2.0" 538 | } 539 | }, 540 | "node_modules/stubs": { 541 | "version": "3.0.0", 542 | "resolved": "https://registry.npmjs.org/stubs/-/stubs-3.0.0.tgz", 543 | "integrity": "sha512-PdHt7hHUJKxvTCgbKX9C1V/ftOcjJQgz8BZwNfV5c4B6dcGqlpelTbJ999jBGZ2jYiPAwcX5dP6oBwVlBlUbxw==" 544 | }, 545 | "node_modules/teeny-request": { 546 | "version": "9.0.0", 547 | "resolved": "https://registry.npmjs.org/teeny-request/-/teeny-request-9.0.0.tgz", 548 | "integrity": "sha512-resvxdc6Mgb7YEThw6G6bExlXKkv6+YbuzGg9xuXxSgxJF7Ozs+o8Y9+2R3sArdWdW8nOokoQb1yrpFB0pQK2g==", 549 | "dependencies": { 550 | "http-proxy-agent": "^5.0.0", 551 | "https-proxy-agent": "^5.0.0", 552 | "node-fetch": "^2.6.9", 553 | "stream-events": "^1.0.5", 554 | "uuid": "^9.0.0" 555 | }, 556 | "engines": { 557 | "node": ">=14" 558 | } 559 | }, 560 | "node_modules/teeny-request/node_modules/agent-base": { 561 | "version": "6.0.2", 562 | "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", 563 | "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", 564 | "dependencies": { 565 | "debug": "4" 566 | }, 567 | "engines": { 568 | "node": ">= 6.0.0" 569 | } 570 | }, 571 | "node_modules/teeny-request/node_modules/https-proxy-agent": { 572 | "version": "5.0.1", 573 | "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", 574 | "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", 575 | "dependencies": { 576 | "agent-base": "6", 577 | "debug": "4" 578 | }, 579 | "engines": { 580 | "node": ">= 6" 581 | } 582 | }, 583 | "node_modules/tr46": { 584 | "version": "0.0.3", 585 | "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", 586 | "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" 587 | }, 588 | "node_modules/undici-types": { 589 | "version": "5.26.5", 590 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", 591 | "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" 592 | }, 593 | "node_modules/util-deprecate": { 594 | "version": "1.0.2", 595 | "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", 596 | "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" 597 | }, 598 | "node_modules/uuid": { 599 | "version": "9.0.1", 600 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", 601 | "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", 602 | "funding": [ 603 | "https://github.com/sponsors/broofa", 604 | "https://github.com/sponsors/ctavan" 605 | ], 606 | "bin": { 607 | "uuid": "dist/bin/uuid" 608 | } 609 | }, 610 | "node_modules/webidl-conversions": { 611 | "version": "3.0.1", 612 | "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", 613 | "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" 614 | }, 615 | "node_modules/whatwg-url": { 616 | "version": "5.0.0", 617 | "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", 618 | "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", 619 | "dependencies": { 620 | "tr46": "~0.0.3", 621 | "webidl-conversions": "^3.0.0" 622 | } 623 | }, 624 | "node_modules/wrappy": { 625 | "version": "1.0.2", 626 | "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", 627 | "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" 628 | } 629 | } 630 | } 631 | -------------------------------------------------------------------------------- /Hands-On/cloud_run/services/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cloud_run", 3 | "version": "1.0.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "cloud_run", 9 | "version": "1.0.0", 10 | "license": "ISC", 11 | "dependencies": { 12 | "express": "^4.18.2" 13 | } 14 | }, 15 | "node_modules/accepts": { 16 | "version": "1.3.8", 17 | "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", 18 | "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", 19 | "dependencies": { 20 | "mime-types": "~2.1.34", 21 | "negotiator": "0.6.3" 22 | }, 23 | "engines": { 24 | "node": ">= 0.6" 25 | } 26 | }, 27 | "node_modules/array-flatten": { 28 | "version": "1.1.1", 29 | "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", 30 | "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" 31 | }, 32 | "node_modules/body-parser": { 33 | "version": "1.20.1", 34 | "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", 35 | "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", 36 | "dependencies": { 37 | "bytes": "3.1.2", 38 | "content-type": "~1.0.4", 39 | "debug": "2.6.9", 40 | "depd": "2.0.0", 41 | "destroy": "1.2.0", 42 | "http-errors": "2.0.0", 43 | "iconv-lite": "0.4.24", 44 | "on-finished": "2.4.1", 45 | "qs": "6.11.0", 46 | "raw-body": "2.5.1", 47 | "type-is": "~1.6.18", 48 | "unpipe": "1.0.0" 49 | }, 50 | "engines": { 51 | "node": ">= 0.8", 52 | "npm": "1.2.8000 || >= 1.4.16" 53 | } 54 | }, 55 | "node_modules/bytes": { 56 | "version": "3.1.2", 57 | "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", 58 | "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", 59 | "engines": { 60 | "node": ">= 0.8" 61 | } 62 | }, 63 | "node_modules/call-bind": { 64 | "version": "1.0.5", 65 | "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz", 66 | "integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==", 67 | "dependencies": { 68 | "function-bind": "^1.1.2", 69 | "get-intrinsic": "^1.2.1", 70 | "set-function-length": "^1.1.1" 71 | }, 72 | "funding": { 73 | "url": "https://github.com/sponsors/ljharb" 74 | } 75 | }, 76 | "node_modules/content-disposition": { 77 | "version": "0.5.4", 78 | "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", 79 | "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", 80 | "dependencies": { 81 | "safe-buffer": "5.2.1" 82 | }, 83 | "engines": { 84 | "node": ">= 0.6" 85 | } 86 | }, 87 | "node_modules/content-type": { 88 | "version": "1.0.5", 89 | "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", 90 | "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", 91 | "engines": { 92 | "node": ">= 0.6" 93 | } 94 | }, 95 | "node_modules/cookie": { 96 | "version": "0.5.0", 97 | "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", 98 | "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", 99 | "engines": { 100 | "node": ">= 0.6" 101 | } 102 | }, 103 | "node_modules/cookie-signature": { 104 | "version": "1.0.6", 105 | "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", 106 | "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" 107 | }, 108 | "node_modules/debug": { 109 | "version": "2.6.9", 110 | "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", 111 | "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", 112 | "dependencies": { 113 | "ms": "2.0.0" 114 | } 115 | }, 116 | "node_modules/define-data-property": { 117 | "version": "1.1.1", 118 | "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", 119 | "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", 120 | "dependencies": { 121 | "get-intrinsic": "^1.2.1", 122 | "gopd": "^1.0.1", 123 | "has-property-descriptors": "^1.0.0" 124 | }, 125 | "engines": { 126 | "node": ">= 0.4" 127 | } 128 | }, 129 | "node_modules/depd": { 130 | "version": "2.0.0", 131 | "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", 132 | "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", 133 | "engines": { 134 | "node": ">= 0.8" 135 | } 136 | }, 137 | "node_modules/destroy": { 138 | "version": "1.2.0", 139 | "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", 140 | "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", 141 | "engines": { 142 | "node": ">= 0.8", 143 | "npm": "1.2.8000 || >= 1.4.16" 144 | } 145 | }, 146 | "node_modules/ee-first": { 147 | "version": "1.1.1", 148 | "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", 149 | "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" 150 | }, 151 | "node_modules/encodeurl": { 152 | "version": "1.0.2", 153 | "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", 154 | "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", 155 | "engines": { 156 | "node": ">= 0.8" 157 | } 158 | }, 159 | "node_modules/escape-html": { 160 | "version": "1.0.3", 161 | "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", 162 | "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" 163 | }, 164 | "node_modules/etag": { 165 | "version": "1.8.1", 166 | "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", 167 | "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", 168 | "engines": { 169 | "node": ">= 0.6" 170 | } 171 | }, 172 | "node_modules/express": { 173 | "version": "4.18.2", 174 | "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", 175 | "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", 176 | "dependencies": { 177 | "accepts": "~1.3.8", 178 | "array-flatten": "1.1.1", 179 | "body-parser": "1.20.1", 180 | "content-disposition": "0.5.4", 181 | "content-type": "~1.0.4", 182 | "cookie": "0.5.0", 183 | "cookie-signature": "1.0.6", 184 | "debug": "2.6.9", 185 | "depd": "2.0.0", 186 | "encodeurl": "~1.0.2", 187 | "escape-html": "~1.0.3", 188 | "etag": "~1.8.1", 189 | "finalhandler": "1.2.0", 190 | "fresh": "0.5.2", 191 | "http-errors": "2.0.0", 192 | "merge-descriptors": "1.0.1", 193 | "methods": "~1.1.2", 194 | "on-finished": "2.4.1", 195 | "parseurl": "~1.3.3", 196 | "path-to-regexp": "0.1.7", 197 | "proxy-addr": "~2.0.7", 198 | "qs": "6.11.0", 199 | "range-parser": "~1.2.1", 200 | "safe-buffer": "5.2.1", 201 | "send": "0.18.0", 202 | "serve-static": "1.15.0", 203 | "setprototypeof": "1.2.0", 204 | "statuses": "2.0.1", 205 | "type-is": "~1.6.18", 206 | "utils-merge": "1.0.1", 207 | "vary": "~1.1.2" 208 | }, 209 | "engines": { 210 | "node": ">= 0.10.0" 211 | } 212 | }, 213 | "node_modules/finalhandler": { 214 | "version": "1.2.0", 215 | "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", 216 | "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", 217 | "dependencies": { 218 | "debug": "2.6.9", 219 | "encodeurl": "~1.0.2", 220 | "escape-html": "~1.0.3", 221 | "on-finished": "2.4.1", 222 | "parseurl": "~1.3.3", 223 | "statuses": "2.0.1", 224 | "unpipe": "~1.0.0" 225 | }, 226 | "engines": { 227 | "node": ">= 0.8" 228 | } 229 | }, 230 | "node_modules/forwarded": { 231 | "version": "0.2.0", 232 | "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", 233 | "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", 234 | "engines": { 235 | "node": ">= 0.6" 236 | } 237 | }, 238 | "node_modules/fresh": { 239 | "version": "0.5.2", 240 | "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", 241 | "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", 242 | "engines": { 243 | "node": ">= 0.6" 244 | } 245 | }, 246 | "node_modules/function-bind": { 247 | "version": "1.1.2", 248 | "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", 249 | "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", 250 | "funding": { 251 | "url": "https://github.com/sponsors/ljharb" 252 | } 253 | }, 254 | "node_modules/get-intrinsic": { 255 | "version": "1.2.2", 256 | "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz", 257 | "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==", 258 | "dependencies": { 259 | "function-bind": "^1.1.2", 260 | "has-proto": "^1.0.1", 261 | "has-symbols": "^1.0.3", 262 | "hasown": "^2.0.0" 263 | }, 264 | "funding": { 265 | "url": "https://github.com/sponsors/ljharb" 266 | } 267 | }, 268 | "node_modules/gopd": { 269 | "version": "1.0.1", 270 | "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", 271 | "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", 272 | "dependencies": { 273 | "get-intrinsic": "^1.1.3" 274 | }, 275 | "funding": { 276 | "url": "https://github.com/sponsors/ljharb" 277 | } 278 | }, 279 | "node_modules/has-property-descriptors": { 280 | "version": "1.0.1", 281 | "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz", 282 | "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==", 283 | "dependencies": { 284 | "get-intrinsic": "^1.2.2" 285 | }, 286 | "funding": { 287 | "url": "https://github.com/sponsors/ljharb" 288 | } 289 | }, 290 | "node_modules/has-proto": { 291 | "version": "1.0.1", 292 | "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", 293 | "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", 294 | "engines": { 295 | "node": ">= 0.4" 296 | }, 297 | "funding": { 298 | "url": "https://github.com/sponsors/ljharb" 299 | } 300 | }, 301 | "node_modules/has-symbols": { 302 | "version": "1.0.3", 303 | "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", 304 | "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", 305 | "engines": { 306 | "node": ">= 0.4" 307 | }, 308 | "funding": { 309 | "url": "https://github.com/sponsors/ljharb" 310 | } 311 | }, 312 | "node_modules/hasown": { 313 | "version": "2.0.0", 314 | "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", 315 | "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", 316 | "dependencies": { 317 | "function-bind": "^1.1.2" 318 | }, 319 | "engines": { 320 | "node": ">= 0.4" 321 | } 322 | }, 323 | "node_modules/http-errors": { 324 | "version": "2.0.0", 325 | "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", 326 | "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", 327 | "dependencies": { 328 | "depd": "2.0.0", 329 | "inherits": "2.0.4", 330 | "setprototypeof": "1.2.0", 331 | "statuses": "2.0.1", 332 | "toidentifier": "1.0.1" 333 | }, 334 | "engines": { 335 | "node": ">= 0.8" 336 | } 337 | }, 338 | "node_modules/iconv-lite": { 339 | "version": "0.4.24", 340 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", 341 | "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", 342 | "dependencies": { 343 | "safer-buffer": ">= 2.1.2 < 3" 344 | }, 345 | "engines": { 346 | "node": ">=0.10.0" 347 | } 348 | }, 349 | "node_modules/inherits": { 350 | "version": "2.0.4", 351 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", 352 | "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" 353 | }, 354 | "node_modules/ipaddr.js": { 355 | "version": "1.9.1", 356 | "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", 357 | "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", 358 | "engines": { 359 | "node": ">= 0.10" 360 | } 361 | }, 362 | "node_modules/media-typer": { 363 | "version": "0.3.0", 364 | "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", 365 | "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", 366 | "engines": { 367 | "node": ">= 0.6" 368 | } 369 | }, 370 | "node_modules/merge-descriptors": { 371 | "version": "1.0.1", 372 | "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", 373 | "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" 374 | }, 375 | "node_modules/methods": { 376 | "version": "1.1.2", 377 | "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", 378 | "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", 379 | "engines": { 380 | "node": ">= 0.6" 381 | } 382 | }, 383 | "node_modules/mime": { 384 | "version": "1.6.0", 385 | "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", 386 | "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", 387 | "bin": { 388 | "mime": "cli.js" 389 | }, 390 | "engines": { 391 | "node": ">=4" 392 | } 393 | }, 394 | "node_modules/mime-db": { 395 | "version": "1.52.0", 396 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 397 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", 398 | "engines": { 399 | "node": ">= 0.6" 400 | } 401 | }, 402 | "node_modules/mime-types": { 403 | "version": "2.1.35", 404 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 405 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 406 | "dependencies": { 407 | "mime-db": "1.52.0" 408 | }, 409 | "engines": { 410 | "node": ">= 0.6" 411 | } 412 | }, 413 | "node_modules/ms": { 414 | "version": "2.0.0", 415 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", 416 | "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" 417 | }, 418 | "node_modules/negotiator": { 419 | "version": "0.6.3", 420 | "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", 421 | "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", 422 | "engines": { 423 | "node": ">= 0.6" 424 | } 425 | }, 426 | "node_modules/object-inspect": { 427 | "version": "1.13.1", 428 | "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", 429 | "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", 430 | "funding": { 431 | "url": "https://github.com/sponsors/ljharb" 432 | } 433 | }, 434 | "node_modules/on-finished": { 435 | "version": "2.4.1", 436 | "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", 437 | "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", 438 | "dependencies": { 439 | "ee-first": "1.1.1" 440 | }, 441 | "engines": { 442 | "node": ">= 0.8" 443 | } 444 | }, 445 | "node_modules/parseurl": { 446 | "version": "1.3.3", 447 | "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", 448 | "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", 449 | "engines": { 450 | "node": ">= 0.8" 451 | } 452 | }, 453 | "node_modules/path-to-regexp": { 454 | "version": "0.1.7", 455 | "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", 456 | "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" 457 | }, 458 | "node_modules/proxy-addr": { 459 | "version": "2.0.7", 460 | "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", 461 | "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", 462 | "dependencies": { 463 | "forwarded": "0.2.0", 464 | "ipaddr.js": "1.9.1" 465 | }, 466 | "engines": { 467 | "node": ">= 0.10" 468 | } 469 | }, 470 | "node_modules/qs": { 471 | "version": "6.11.0", 472 | "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", 473 | "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", 474 | "dependencies": { 475 | "side-channel": "^1.0.4" 476 | }, 477 | "engines": { 478 | "node": ">=0.6" 479 | }, 480 | "funding": { 481 | "url": "https://github.com/sponsors/ljharb" 482 | } 483 | }, 484 | "node_modules/range-parser": { 485 | "version": "1.2.1", 486 | "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", 487 | "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", 488 | "engines": { 489 | "node": ">= 0.6" 490 | } 491 | }, 492 | "node_modules/raw-body": { 493 | "version": "2.5.1", 494 | "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", 495 | "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", 496 | "dependencies": { 497 | "bytes": "3.1.2", 498 | "http-errors": "2.0.0", 499 | "iconv-lite": "0.4.24", 500 | "unpipe": "1.0.0" 501 | }, 502 | "engines": { 503 | "node": ">= 0.8" 504 | } 505 | }, 506 | "node_modules/safe-buffer": { 507 | "version": "5.2.1", 508 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", 509 | "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", 510 | "funding": [ 511 | { 512 | "type": "github", 513 | "url": "https://github.com/sponsors/feross" 514 | }, 515 | { 516 | "type": "patreon", 517 | "url": "https://www.patreon.com/feross" 518 | }, 519 | { 520 | "type": "consulting", 521 | "url": "https://feross.org/support" 522 | } 523 | ] 524 | }, 525 | "node_modules/safer-buffer": { 526 | "version": "2.1.2", 527 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", 528 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" 529 | }, 530 | "node_modules/send": { 531 | "version": "0.18.0", 532 | "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", 533 | "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", 534 | "dependencies": { 535 | "debug": "2.6.9", 536 | "depd": "2.0.0", 537 | "destroy": "1.2.0", 538 | "encodeurl": "~1.0.2", 539 | "escape-html": "~1.0.3", 540 | "etag": "~1.8.1", 541 | "fresh": "0.5.2", 542 | "http-errors": "2.0.0", 543 | "mime": "1.6.0", 544 | "ms": "2.1.3", 545 | "on-finished": "2.4.1", 546 | "range-parser": "~1.2.1", 547 | "statuses": "2.0.1" 548 | }, 549 | "engines": { 550 | "node": ">= 0.8.0" 551 | } 552 | }, 553 | "node_modules/send/node_modules/ms": { 554 | "version": "2.1.3", 555 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", 556 | "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" 557 | }, 558 | "node_modules/serve-static": { 559 | "version": "1.15.0", 560 | "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", 561 | "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", 562 | "dependencies": { 563 | "encodeurl": "~1.0.2", 564 | "escape-html": "~1.0.3", 565 | "parseurl": "~1.3.3", 566 | "send": "0.18.0" 567 | }, 568 | "engines": { 569 | "node": ">= 0.8.0" 570 | } 571 | }, 572 | "node_modules/set-function-length": { 573 | "version": "1.1.1", 574 | "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.1.1.tgz", 575 | "integrity": "sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==", 576 | "dependencies": { 577 | "define-data-property": "^1.1.1", 578 | "get-intrinsic": "^1.2.1", 579 | "gopd": "^1.0.1", 580 | "has-property-descriptors": "^1.0.0" 581 | }, 582 | "engines": { 583 | "node": ">= 0.4" 584 | } 585 | }, 586 | "node_modules/setprototypeof": { 587 | "version": "1.2.0", 588 | "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", 589 | "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" 590 | }, 591 | "node_modules/side-channel": { 592 | "version": "1.0.4", 593 | "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", 594 | "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", 595 | "dependencies": { 596 | "call-bind": "^1.0.0", 597 | "get-intrinsic": "^1.0.2", 598 | "object-inspect": "^1.9.0" 599 | }, 600 | "funding": { 601 | "url": "https://github.com/sponsors/ljharb" 602 | } 603 | }, 604 | "node_modules/statuses": { 605 | "version": "2.0.1", 606 | "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", 607 | "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", 608 | "engines": { 609 | "node": ">= 0.8" 610 | } 611 | }, 612 | "node_modules/toidentifier": { 613 | "version": "1.0.1", 614 | "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", 615 | "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", 616 | "engines": { 617 | "node": ">=0.6" 618 | } 619 | }, 620 | "node_modules/type-is": { 621 | "version": "1.6.18", 622 | "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", 623 | "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", 624 | "dependencies": { 625 | "media-typer": "0.3.0", 626 | "mime-types": "~2.1.24" 627 | }, 628 | "engines": { 629 | "node": ">= 0.6" 630 | } 631 | }, 632 | "node_modules/unpipe": { 633 | "version": "1.0.0", 634 | "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", 635 | "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", 636 | "engines": { 637 | "node": ">= 0.8" 638 | } 639 | }, 640 | "node_modules/utils-merge": { 641 | "version": "1.0.1", 642 | "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", 643 | "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", 644 | "engines": { 645 | "node": ">= 0.4.0" 646 | } 647 | }, 648 | "node_modules/vary": { 649 | "version": "1.1.2", 650 | "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", 651 | "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", 652 | "engines": { 653 | "node": ">= 0.8" 654 | } 655 | } 656 | } 657 | } 658 | --------------------------------------------------------------------------------