├── .dockerignore ├── .gcloudignore ├── .gitattributes ├── .gitignore ├── Dockerfile ├── app.yaml ├── buildContainer.sh ├── index.js ├── k8sapp.yaml ├── loadService.sh ├── loadgenerator ├── .dockerignore ├── Dockerfile ├── buildLoadGeneContainer.sh ├── k8s-loadgen.yaml ├── locustfile.py ├── readme.md └── requirements.txt ├── package.json ├── readme.md └── rebuildService.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | readme.md 3 | node_modules 4 | *.log 5 | cloudbuld.yaml 6 | *.sh 7 | app.yaml 8 | package-lock.json 9 | .gitignore 10 | loadgenerator 11 | -------------------------------------------------------------------------------- /.gcloudignore: -------------------------------------------------------------------------------- 1 | # This file specifies files that are *not* uploaded to Google Cloud Platform 2 | # using gcloud. It follows the same syntax as .gitignore, with the addition of 3 | # "#!include" directives (which insert the entries of the given .gitignore-style 4 | # file at that point). 5 | # 6 | # For more information, run: 7 | # $ gcloud topic gcloudignore 8 | # 9 | .gcloudignore 10 | # If you would like to upload your .git directory, .gitignore file or files 11 | # from your .gitignore file, remove the corresponding line 12 | # below: 13 | .git 14 | .gitignore 15 | .dockerignore 16 | *.sh 17 | *.md 18 | package-lock.json 19 | 20 | # Node.js dependencies: 21 | node_modules/ 22 | loadgenerator/ 23 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.log 3 | package-lock.json 4 | .vscode 5 | .pytest_cache 6 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-bookworm-slim 2 | 3 | WORKDIR /usr/src/app 4 | 5 | COPY package.json ./ 6 | 7 | RUN npm install --omit=dev 8 | 9 | COPY . . 10 | 11 | CMD [ "npm", "start" ] -------------------------------------------------------------------------------- /app.yaml: -------------------------------------------------------------------------------- 1 | runtime: nodejs22 2 | -------------------------------------------------------------------------------- /buildContainer.sh: -------------------------------------------------------------------------------- 1 | 2 | PROJECT_ID=$GOOGLE_CLOUD_PROJECT # Replace with your actual project ID 3 | LOCATION="us" # Replace with your desired region (e.g., "us-central1") 4 | REPOSITORY="demos" # Replace with your desired repository name 5 | FULL_REPO="${LOCATION}-docker.pkg.dev/${PROJECT_ID}/${REPOSITORY}" 6 | IMAGE_NAME="hello-operations-demo" 7 | # Check if the repository exists 8 | if ! gcloud artifacts repositories describe "${REPOSITORY}" --location="${LOCATION}" --project="${PROJECT_ID}" &>/dev/null; then 9 | echo "Repository '$REPOSITORY' does not exist. Creating..." 10 | 11 | # Create the repository 12 | gcloud artifacts repositories create "${REPOSITORY}" \ 13 | --repository-format=docker \ 14 | --location="${LOCATION}" \ 15 | --project="${PROJECT_ID}" 16 | echo "Repository '$FULL_REPO' created successfully." 17 | else 18 | echo "Repository '$FULL_REPO' already exists." 19 | fi 20 | 21 | 22 | gcloud builds submit --tag $FULL_REPO/$IMAGE_NAME:latest . -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | //This is a simple NodeJS app designed to explore various Google Cloud 2 | //operations suite products 3 | 4 | // ****** 5 | // Setting up Cloud Trace 6 | // ****** 7 | // If using the older Cloud Trace API 8 | //const tracer = require('@google-cloud/trace-agent').start(); 9 | 10 | //Using the newer OpenTelemetry API 11 | const opentelemetry = require("@opentelemetry/api"); 12 | const { NodeTracerProvider } = require("@opentelemetry/sdk-trace-node"); 13 | const { SimpleSpanProcessor, BatchSpanProcessor } = require("@opentelemetry/sdk-trace-base"); 14 | const { 15 | TraceExporter, 16 | } = require("@google-cloud/opentelemetry-cloud-trace-exporter"); 17 | 18 | // Enable OpenTelemetry exporters to export traces to Google Cloud Trace. 19 | const provider = new NodeTracerProvider(); 20 | // Initialize the exporter. When your application is running on Google Cloud, 21 | // you don't need to provide auth credentials or a project id. 22 | const exporter = new TraceExporter(); 23 | // Configure the span processor to send spans to the exporter 24 | provider.addSpanProcessor(new BatchSpanProcessor(exporter)); 25 | 26 | // ****** 27 | // Enable Error Reporting 28 | // ****** 29 | // Import the GCP ErrorReporting library 30 | const {ErrorReporting} = require('@google-cloud/error-reporting'); 31 | const errors = new ErrorReporting({ 32 | reportMode: 'always', //as opposed to only while in production 33 | serviceContext: { 34 | service: 'hello-logging-js', 35 | version: '1.0', 36 | } 37 | }); 38 | 39 | // ****** 40 | // Enable the Profiler 41 | // ****** 42 | require('@google-cloud/profiler').start({ 43 | serviceContext: { 44 | service: 'hello-logging-js', 45 | version: '1.0', 46 | }, 47 | }); 48 | 49 | //Setup a listener to catch all uncaught exceptions 50 | process.on('uncaughtException', (e) => { 51 | // Write the error to stderr. 52 | console.error(e); 53 | // Report that same error the Error Service 54 | errors.report(e); 55 | }); 56 | 57 | // ****** 58 | // Setup a Winston logger adding GCP support 59 | // ****** 60 | //Not a best practice, but some coders still prefer 61 | //using logging libraries 62 | const winston = require('winston'); 63 | //Here's the GCP addon 64 | const {LoggingWinston} = require('@google-cloud/logging-winston'); 65 | const loggingWinston = new LoggingWinston(); 66 | const logger = winston.createLogger({ 67 | level: 'info', //default logging level 68 | transports: [ 69 | // Add GCP Logging 70 | loggingWinston, 71 | ], 72 | }); 73 | 74 | // ****** 75 | //Load the express server 76 | // ****** 77 | const express = require('express'); 78 | const app = express(); 79 | app.disable('etag'); //so it will return 200 and 304 etc. codes. 80 | 81 | //Setup some values used later in code 82 | const { v1: uuidv1 } = require('uuid'); 83 | const containerID = uuidv1(); 84 | const funFactor = Math.floor(Math.random() * 5) + 1; //just for fun 85 | 86 | // ****** 87 | // The web routes start here 88 | // ****** 89 | 90 | //A classic Hello World, not using our logger 91 | //but it is doing a classic console.log 92 | app.get('/', (req, res) => { 93 | console.log('/ version of Hello world received a request'); 94 | 95 | const target = process.env.TARGET || 'World'; 96 | res.send(`Hello ${target}!`); 97 | }); 98 | 99 | //Another classic Hello World, this one using Winston to GCP 100 | app.get('/log', (req, res) => { 101 | logger.info("/log version of Hello World received a request") 102 | const target = process.env.TARGET || 'World'; 103 | res.send(`Hello ${target}, from /log!`); 104 | }); 105 | 106 | //Basic NodeJS app built with the express server 107 | app.get('/score', (req, res) => { 108 | 109 | //Random score, the contaierID is a UUID unique to each 110 | //runtime container (testing was done in Cloud Run). 111 | //funFactor is a random number 1-100 112 | let score = Math.floor(Math.random() * 100) + 1; 113 | 114 | console.log(`/score called, score:${score}, containerID:${containerID}, funFactor:${funFactor}`); 115 | 116 | //Basic message back to browser 117 | res.send(`Your score is a ${score}. Happy?`); 118 | }); 119 | 120 | 121 | 122 | 123 | 124 | 125 | //Generates an uncaught exception every 1000 requests 126 | app.get('/random-error', (req, res) => { 127 | error_rate = parseInt(req.query.error_rate) || 1000 128 | let errorNum = (Math.floor(Math.random() * error_rate) + 1); 129 | if (errorNum==1) { 130 | console.log("Called /random-error, and it's about to error"); 131 | doesNotExist(); 132 | } 133 | console.log("Called /random-error, and it worked"); 134 | res.send("Worked this time."); 135 | }); 136 | 137 | 138 | 139 | //Manually report an error 140 | app.get('/error', (req, res) => { 141 | try{ 142 | doesNotExist(); 143 | } 144 | catch(e){ 145 | //This is a log, will not show in Error Reporter 146 | console.error("Error processing /error " + e); 147 | //Let's manually pass it to Error Reporter 148 | errors.report("Error processing /error " + e); 149 | } 150 | res.send("Broken now, come back later.") 151 | }); 152 | 153 | //Uncaught exception, auto reported 154 | app.get('/uncaught', (req, res) => { 155 | doesNotExist(); 156 | //won't ever get to: 157 | res.send("Broken now, come back later.") 158 | }); 159 | 160 | //Generates a slow request 161 | //If you wanted more trace detail, you could use tracer and create some 162 | //extra spans, like 163 | //const customSpan1 = tracer.createChildSpan({name: 'slowPi'}); 164 | //customSpan1.endSpan(); 165 | app.get('/slow', (req, res) => { 166 | let pi1=slowPi(); 167 | 168 | let pi2=slowPi2(); 169 | 170 | res.send(`Took it's time. pi to 1,000 places: ${pi1}, pi to 10,000 places: ${pi2}`); 171 | }); 172 | 173 | function slowPi(){ 174 | let pi = piCalc(1000n); 175 | console.log(`How's this pi? ${pi}`); 176 | return pi; 177 | } 178 | 179 | function slowPi2(){ 180 | let pi = piCalc(10000n); 181 | console.log(`Here's a bigger pi? ${pi}`) 182 | return pi; 183 | } 184 | 185 | //Use one of the many techniques to calculate 186 | //pi to "count" places. This is a variation of 187 | //Ramanujan's formula. 188 | function piCalc(count){ 189 | let i = 1n; 190 | count = count + 20n 191 | let x = 3n * (10n ** count); 192 | let pi = x; 193 | while (x > 0) { 194 | x = x * i / ((i + 1n) * 4n); 195 | pi += x / (i + 2n); 196 | i += 2n; 197 | } 198 | pi = pi / (10n ** 20n); 199 | console.log(pi); 200 | return pi; 201 | } 202 | 203 | // Expose the environment variables so these can demo ENV variable injection in Cloud Run, GKE etc 204 | app.get('/env', (req, res) => { 205 | console.log(`Getting env vars`) 206 | res.setHeader('content-type', 'text/plain'); 207 | res.send(JSON.stringify(process.env, null, 4)) 208 | }); 209 | 210 | // Note that express error handling middleware should be attached after all 211 | // the other routes and use() calls. 212 | app.use(errors.express); 213 | 214 | const port = process.env.PORT || 8080; 215 | app.listen(port, () => { 216 | console.log('Hello world listening on port', port); 217 | }); 218 | -------------------------------------------------------------------------------- /k8sapp.yaml: -------------------------------------------------------------------------------- 1 | # This file configures the HelloLoggingJS app which serves public web traffic. 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: hello-logging-js-deployment 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: hello-logging 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-logging 15 | spec: 16 | containers: 17 | - name: hello-logging-container 18 | # Replace $PROJECT with your project ID 19 | image: us-docker.pkg.dev/$PROJECT/demos/hello-operations-demo:latest 20 | # This app listens on port 8080 for web traffic by default. 21 | ports: 22 | - containerPort: 8080 23 | env: 24 | - name: PORT 25 | value: "8080" 26 | resources: 27 | requests: 28 | cpu: 100m 29 | memory: 128Mi 30 | limits: 31 | cpu: 200m 32 | memory: 256Mi 33 | --- 34 | 35 | # The hello-logging service provides a load-balancing proxy over the hello-logging 36 | # pods. By specifying the type as a 'LoadBalancer', Kubernetes Engine will 37 | # create an external HTTP load balancer. 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | name: hello-logging-service 42 | spec: 43 | type: LoadBalancer 44 | selector: 45 | app: hello-logging 46 | ports: 47 | - port: 80 48 | targetPort: 8080 49 | -------------------------------------------------------------------------------- /loadService.sh: -------------------------------------------------------------------------------- 1 | #Don't leave the square brackets when updating the following 2 | URL=[URL here] 3 | #Do a sanity check. If the above URL is dropped into the below 4 | #command, does the resulting URL make sense? Work? 5 | #Have too many slashes? 6 | ab -n 100000 -c 100 $URL/ -------------------------------------------------------------------------------- /loadgenerator/.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | buildLoadGeneContainer.sh 3 | *.yaml -------------------------------------------------------------------------------- /loadgenerator/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM python:3.12.3-slim@sha256:2be8daddbb82756f7d1f2c7ece706aadcb284bf6ab6d769ea695cc3ed6016743 as base 3 | 4 | FROM base as builder 5 | 6 | COPY requirements.txt . 7 | 8 | RUN pip install --prefix="/install" -r requirements.txt 9 | 10 | FROM base 11 | 12 | WORKDIR /loadgen 13 | 14 | COPY --from=builder /install /usr/local 15 | 16 | # Add application code. 17 | COPY locustfile.py . 18 | 19 | # enable gevent support in debugger 20 | ENV GEVENT_SUPPORT=True 21 | 22 | ENTRYPOINT locust --headless --users "${USERS:-10}" -H "${FRONTEND_ADDR}" 2>&1 -------------------------------------------------------------------------------- /loadgenerator/buildLoadGeneContainer.sh: -------------------------------------------------------------------------------- 1 | PROJECT_ID=$GOOGLE_CLOUD_PROJECT # Replace with your actual project ID 2 | LOCATION="us" # Replace with your desired region (e.g., "us-central1") 3 | REPOSITORY="demos" # Replace with your desired repository name 4 | FULL_REPO="${LOCATION}-docker.pkg.dev/${PROJECT_ID}/${REPOSITORY}" 5 | IMAGE_NAME="load-generator" 6 | # Check if the repository exists 7 | if ! gcloud artifacts repositories describe "${REPOSITORY}" --location="${LOCATION}" --project="${PROJECT_ID}" &>/dev/null; then 8 | echo "Repository '$REPOSITORY' does not exist. Creating..." 9 | 10 | # Create the repository 11 | gcloud artifacts repositories create "${REPOSITORY}" \ 12 | --repository-format=docker \ 13 | --location="${LOCATION}" \ 14 | --project="${PROJECT_ID}" 15 | echo "Repository '$FULL_REPO' created successfully." 16 | else 17 | echo "Repository '$FULL_REPO' already exists." 18 | fi 19 | 20 | 21 | gcloud builds submit --tag $FULL_REPO/$IMAGE_NAME:latest . 22 | -------------------------------------------------------------------------------- /loadgenerator/k8s-loadgen.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: loadgenerator-hw 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: loadgenerator-hw 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: loadgenerator-hw 14 | annotations: 15 | sidecar.istio.io/rewriteAppHTTPProbers: "true" 16 | spec: 17 | serviceAccountName: default 18 | terminationGracePeriodSeconds: 5 19 | restartPolicy: Always 20 | containers: 21 | - name: main 22 | # Replace below with path to your load generator container 23 | image: us-docker.pkg.dev/${PROJECT}/demos/hello-operations-demo:latest 24 | env: 25 | - name: FRONTEND_ADDR 26 | value: "http://hello-logging-service:80" 27 | - name: APP_ROUTE 28 | value: "/random-error" 29 | # Current configuration will generate 1 RPS per user, per pod 30 | - name: USERS 31 | value: "50" 32 | resources: 33 | requests: 34 | cpu: 300m 35 | memory: 256Mi 36 | limits: 37 | cpu: 500m 38 | memory: 512Mi 39 | -------------------------------------------------------------------------------- /loadgenerator/locustfile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import random 18 | from locust import FastHttpUser, TaskSet, between, constant 19 | import datetime 20 | import os 21 | 22 | def index(l): 23 | l.client.get(os.getenv('APP_ROUTE')) 24 | 25 | 26 | class UserBehavior(TaskSet): 27 | 28 | def on_start(self): 29 | index(self) 30 | 31 | tasks = {index: 1} 32 | 33 | class WebsiteUser(FastHttpUser): 34 | tasks = [UserBehavior] 35 | wait_time = constant(1) -------------------------------------------------------------------------------- /loadgenerator/readme.md: -------------------------------------------------------------------------------- 1 | # Load Generator 2 | 3 | This is a basic containerized application that uses Locust to generate load on the main Hello Logging Nodejs app's /random-error route, by default. It assumes that you have already deployed HelloLoggingJS to the same Kubernetes cluster where you will deploy the load generator. It will generate a starting load of approximately 30 requests per second. 4 | 5 | ## Building and deploying the load generator to Container Registry 6 | 7 | The easiest way to build the load generation container is to use the supplied `buildLoadGeneContainer.sh` script. The script will prompt for version number, 1.0 will be used by default. 8 | 9 | 1. Run `buildLoadGeneContainer.sh`, provide a version number (1.0?). 10 | 11 | ``` bash 12 | sh buildLoadGeneContainer.sh 13 | ``` 14 | 15 | 2. At the end of the output, Cloud Build will display the path to the newly generated container. It should look like: "gcr.io/some-project-name/load-generator:1.0". Copy the path. 16 | 17 | 3. Open the `k8s-loadgen.yaml` file for editing. Update the container image path using the value you just copied. 18 | 19 | 4. Deploy the load generator to Kubernetes. 20 | 21 | ``` bash 22 | kubectl apply -f k8s-loadgen.yaml 23 | ``` 24 | 25 | ## Customizing load generation 26 | 27 | By default, the load generator throws load on the `/random-error` path of the HelloLoggingJS application. In the `k8s-loadgen.yaml` file, you can change the `APP_ROUTE` env variable to redirect traffic to a different route. You can also change the simulated number of `USERS`, and the `replicas` to increase or decrease the load. Each user generates a request per second. If you like, you can also update the `FRONTEND_ADDR` to throw load on a totally different application. 28 | -------------------------------------------------------------------------------- /loadgenerator/requirements.txt: -------------------------------------------------------------------------------- 1 | locust==2.31.3 -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hello-logging-fun", 3 | "version": "1.0.0", 4 | "description": "Simple Hello World example written in NodeJS which uses Winston and the GCP Winston plugin to do some logging into GCP", 5 | "main": "index.js", 6 | "scripts": { 7 | "start": "node index.js" 8 | }, 9 | "author": "", 10 | "license": "Apache-2.0", 11 | "devDependencies": { 12 | "npm-check-updates": "^16.10.12" 13 | }, 14 | "dependencies": { 15 | "@google-cloud/error-reporting": "^3.0.5", 16 | "@google-cloud/logging-winston": "^5.3.0", 17 | "@google-cloud/opentelemetry-cloud-trace-exporter": "^2.0.0", 18 | "@google-cloud/profiler": "^5.0.4", 19 | "@google-cloud/trace-agent": "^7.1.2", 20 | "@opentelemetry/api": "^1.4.1", 21 | "@opentelemetry/sdk-trace-base": "^1.14.0", 22 | "@opentelemetry/sdk-trace-node": "^1.14.0", 23 | "express": "^4.18.2", 24 | "uuid": "^9.0.0", 25 | "winston": "^3.9.0" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Hello Logging NodeJS 2 | 3 | This is a basic Hello World example application written in NodeJS and designed to explore some of Google's Operations Suite features. You can run it in App Engine, Kubernetes Engine, or Cloud Run. 4 | 5 | Note: If running the example in a GKE cluster, you will need to enable full security scope access for the cluster when creating, or add appropriate roles to your cluster Service Account, or you will receive errors from some of the operations related agents. 6 | 7 | ## Load the dependencies and test locally 8 | 9 | Install the dependencies 10 | 11 | ``` bash 12 | npm install 13 | ``` 14 | 15 | Start the server 16 | 17 | ``` bash 18 | npm start 19 | ``` 20 | 21 | If you are running this on Google Cloud with the expectation that the libraries will work, please specify the project using: 22 | 23 | ``` bash 24 | export GCLOUD_PROJECT=$DEVSHELL_PROJECT_ID 25 | ``` 26 | 27 | Test by visiting [http://localhost:8080](http://localhost:8080) 28 | 29 | If it's working, shut down the server (ctrl-c) 30 | 31 | ## Building and deploying to Artifact Registry 32 | 33 | Use Cloud Build to build the Docker image and push to Artifact Registry. Use the buildContainer.sh script 34 | 35 | ``` bash 36 | . buildContainer.sh 37 | ``` 38 | 39 | ## Running the app in Cloud Run 40 | 41 | Now create a new Cloud Run app named *hello-logging* based on the just pushed image 42 | 43 | ``` bash 44 | gcloud run deploy hello-logging --image us-docker.pkg.dev/${PROJECT}/demos/hello-operations-demo:latest --region us-central1 --platform managed --quiet --allow-unauthenticated 45 | ``` 46 | 47 | ## Rebuilding the Cloud Run app 48 | 49 | Remember, if you change the code you'll have to save the change, Cloud Build the image into the GCR, and push a new Cloud Run revision. This command will fail if the $PROJECT env variable isn't set, FYI. 50 | 51 | ``` bash 52 | . buildContainer.sh 53 | gcloud run deploy hello-logging --image us-docker.pkg.dev/${PROJECT}/demos/hello-operations-demo:latest --region us-central1 --platform managed --quiet --allow-unauthenticated 54 | ``` 55 | 56 | ## Deploying to App Engine 57 | 58 | There's already an `app.yaml` file so no need to create one yourself. Create the App Engine app (if needed): 59 | 60 | ``` bash 61 | gcloud app create 62 | ``` 63 | 64 | Then deploy the application to it: 65 | 66 | ``` bash 67 | gcloud app deploy 68 | ``` 69 | 70 | ## GKE 71 | 72 | If you'd like to deploy to GKE, make sure you have the appropriate permissions enabled on the cluster (see note at top of this file). There's a `k8sapp.yaml` containing configurations to create a Deployment, and build a LoadBalancer for it. You will need to edit the file before applying to set the proper path to the container in GCR. Then simply apply the file: 73 | 74 | ``` bash 75 | kubectl apply -f k8sapp.yaml 76 | ``` 77 | -------------------------------------------------------------------------------- /rebuildService.sh: -------------------------------------------------------------------------------- 1 | gcloud builds submit --tag gcr.io/$GOOGLE_CLOUD_PROJECT/hello-logging:1.0 2 | gcloud run deploy hello-logging --image gcr.io/$GOOGLE_CLOUD_PROJECT/hello-logging:1.0 --region us-central1 --quiet --allow-unauthenticated --concurrency 80 --max-instances 3 --labels stage=dev,department=training 3 | --------------------------------------------------------------------------------