├── NOTICE
├── es_sink
├── es_sink
│ ├── __version__.py
│ ├── __init__.py
│ ├── transport_exceptions.py
│ ├── transport_result.py
│ ├── sqs_transport.py
│ ├── examples.py
│ ├── es_auth.py
│ ├── transport_utils.py
│ ├── flushing_buffer.py
│ ├── line_buffer.py
│ ├── es_transport.py
│ └── descriptor.py
├── LICENSE
├── __version__.py
├── setup.py
├── examples.py
├── README.md
└── tests
│ └── test_descriptor.py
├── jwt-tokens
├── token-gen
│ ├── README
│ ├── .idea
│ │ ├── modules.xml
│ │ ├── misc.xml
│ │ ├── libraries
│ │ │ ├── Maven__org_json_json_20180130.xml
│ │ │ ├── Maven__io_jsonwebtoken_jjwt_api_0_10_5.xml
│ │ │ ├── Maven__io_jsonwebtoken_jjwt_impl_0_10_5.xml
│ │ │ ├── Maven__io_jsonwebtoken_jjwt_jackson_0_10_5.xml
│ │ │ ├── Maven__io_jsonwebtoken_jjwt_orgjson_0_10_5.xml
│ │ │ ├── Maven__com_fasterxml_jackson_core_jackson_core_2_9_6.xml
│ │ │ ├── Maven__com_fasterxml_jackson_core_jackson_databind_2_9_6.xml
│ │ │ └── Maven__com_fasterxml_jackson_core_jackson_annotations_2_9_0.xml
│ │ └── compiler.xml
│ ├── src
│ │ └── main
│ │ │ └── java
│ │ │ └── JWTTestTokens.java
│ ├── jwttesttokens.iml
│ └── pom.xml
├── kibana.yml
├── LICENSE
├── README.md
└── docker-compose.yml
├── pa-to-es
├── requirements.txt
├── template7.json
├── template.json
├── LICENSE
├── README.md
├── node_tracker.py
├── result_parser.py
└── main.py
├── cloudformation-deployment
├── arch.png
├── package-to-s3.sh
├── README.md
└── od4es.json
├── CODE_OF_CONDUCT.md
├── open-distro-elasticsearch-kubernetes
├── helm
│ ├── CONTRIBUTORS.md
│ └── opendistro-es
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ └── templates
│ │ ├── kibana
│ │ ├── kibana-serviceaccount.yaml
│ │ ├── kibana-config-secret.yaml
│ │ ├── role.yaml
│ │ ├── rolebinding.yaml
│ │ ├── kibana-service.yaml
│ │ ├── kibana-ingress.yml
│ │ └── kibana-deployment.yaml
│ │ ├── elasticsearch
│ │ ├── elasticsearch-serviceaccount.yaml
│ │ ├── role.yaml
│ │ ├── master-svc.yaml
│ │ ├── es-data-svc.yaml
│ │ ├── rolebinding.yaml
│ │ ├── es-config-secret.yaml
│ │ ├── es-service.yaml
│ │ ├── es-data-pdb.yaml
│ │ ├── es-client-pdb.yaml
│ │ ├── es-master-pdb.yaml
│ │ ├── es-client-ingress.yaml
│ │ └── es-client-deploy.yaml
│ │ ├── psp.yml
│ │ └── _helpers.tpl
├── kibana
│ ├── 10-kb-namespace.yml
│ ├── 25-kb-bootstrap-secrets.yml
│ ├── 20-kb-configmap.yml
│ ├── 40-kb-service.yml
│ └── 30-kb-deploy.yml
├── elasticsearch
│ ├── 10-es-namespace.yml
│ ├── 20-es-service-account.yml
│ ├── 25-es-sc-gp2.yml
│ ├── 20-es-svc-discovery.yml
│ ├── 60-es-data-svc.yml
│ ├── 35-es-bootstrap-secrets.yml
│ ├── 35-es-service.yml
│ ├── 30-es-configmap.yml
│ ├── 40-es-master-deploy.yml
│ ├── 50-es-client-deploy.yml
│ └── 70-es-data-sts.yml
└── LICENSE
├── BGG
├── requirements.txt
├── LICENSE
├── README.md
├── es_manager.py
├── bgg_mapping.json
└── bgg.py
├── README.md
├── .github
└── PULL_REQUEST_TEMPLATE.md
├── iot_device_simulator
├── README.md
├── device.py
├── main.py
└── sensor.py
└── CONTRIBUTING.md
/NOTICE:
--------------------------------------------------------------------------------
1 | Community
2 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 |
--------------------------------------------------------------------------------
/es_sink/es_sink/__version__.py:
--------------------------------------------------------------------------------
1 | VERSION = (0, 1, 2)
2 |
3 | __version__ = '.'.join(map(str, VERSION))
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/README:
--------------------------------------------------------------------------------
1 | To run:
2 | 1. mvn clean install
3 | 2. java -jar target/jwt-test-tokens-1.0-SNAPSHOT-jar-with-dependencies.jar
4 |
--------------------------------------------------------------------------------
/pa-to-es/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2019.6.16
2 | chardet==3.0.4
3 | idna==2.8
4 | pytz==2019.2
5 | requests==2.22.0
6 | urllib3==1.25.3
7 |
--------------------------------------------------------------------------------
/cloudformation-deployment/arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendistro-for-elasticsearch/sample-code/HEAD/cloudformation-deployment/arch.png
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 | This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html).
3 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # Contributors
2 | A special thanks to the following developers for their contributions to the initial helm chart PR (sorted alphabetically):
3 |
4 | - cryptk
5 | - DandyDeveloper
6 | - dcuenot
7 | - miro-grapeup
8 |
--------------------------------------------------------------------------------
/es_sink/es_sink/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ['descriptor',
2 | 'es_transport',
3 | 'flushing_buffer',
4 | 'line_buffer',
5 | 'sqs_transport',
6 | 'transport_exceptions',
7 | 'transport_result',
8 | 'transport_utils']
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/kibana/10-kb-namespace.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: Namespace
7 | metadata:
8 | name: kibana
9 | labels:
10 | name: kibana
11 |
--------------------------------------------------------------------------------
/cloudformation-deployment/package-to-s3.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | bucket=_replace_with_your_bucket_name_
3 |
4 | for file in od4es.json network.json seed.json data-nodes.json master-nodes.json client-nodes.json; do
5 | echo "Sending $file"
6 | aws s3 rm s3://$bucket/$file
7 | aws s3 cp $file s3://$bucket/$file
8 | done
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/10-es-namespace.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: Namespace
7 | metadata:
8 | name: elasticsearch
9 | labels:
10 | name: elasticsearch
11 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/20-es-service-account.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: ServiceAccount
7 | metadata:
8 | labels:
9 | app: elasticsearch
10 | name: elasticsearch
11 | namespace: elasticsearch
12 |
--------------------------------------------------------------------------------
/BGG/requirements.txt:
--------------------------------------------------------------------------------
1 | boardgamegeek2==1.0.1
2 | boto3==1.12.14
3 | botocore==1.15.14
4 | certifi==2019.11.28
5 | chardet==3.0.4
6 | docutils==0.15.2
7 | idna==2.9
8 | jmespath==0.9.5
9 | python-dateutil==2.8.1
10 | pytz==2019.3
11 | requests==2.23.0
12 | requests-aws4auth==0.9
13 | requests-cache==0.5.2
14 | s3transfer==0.3.3
15 | six==1.14.0
16 | urllib3==1.25.8
17 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/kibana/25-kb-bootstrap-secrets.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: kibana-tls-data
5 | namespace: kibana
6 | type: Opaque
7 | stringData:
8 | kibana-crt.pem: |-
9 |
10 |
11 | kibana-key.pem: |-
12 |
13 |
14 | kibana-root-ca.pem: |-
15 |
16 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/25-es-sc-gp2.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | kind: StorageClass
6 | apiVersion: storage.k8s.io/v1
7 | metadata:
8 | name: elk-gp2
9 | namespace: elasticsearch
10 | provisioner: kubernetes.io/aws-ebs
11 | parameters:
12 | type: gp2
13 | reclaimPolicy: Delete
14 |
--------------------------------------------------------------------------------
/pa-to-es/template7.json:
--------------------------------------------------------------------------------
1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: MIT-0
3 |
4 | {
5 | "index_patterns": ["pa-*"],
6 | "settings": {
7 | "number_of_shards": 1
8 | },
9 | "mappings": {
10 | "properties": {
11 | "@timestamp": {
12 | "type": "date"
13 | },
14 | "node_ip": {
15 | "type": "ip"
16 | }
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 | *.tgz
23 | # Helm files
24 | OWNERS
25 |
--------------------------------------------------------------------------------
/pa-to-es/template.json:
--------------------------------------------------------------------------------
1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: MIT-0
3 |
4 | {
5 | "index_patterns": ["pa-*"],
6 | "settings": {
7 | "number_of_shards": 1
8 | },
9 | "mappings": {
10 | "log": {
11 | "properties": {
12 | "@timestamp": {
13 | "type": "date"
14 | },
15 | "node_ip": {
16 | "type": "ip"
17 | }
18 | }
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/20-es-svc-discovery.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: Service
7 | metadata:
8 | name: elasticsearch-discovery
9 | namespace: elasticsearch
10 | labels:
11 | component: elasticsearch
12 | role: master
13 | spec:
14 | selector:
15 | component: elasticsearch
16 | role: master
17 | ports:
18 | - name: transport
19 | port: 9300
20 | protocol: TCP
21 | clusterIP: None
22 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__org_json_json_20180130.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/60-es-data-svc.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: Service
7 | metadata:
8 | name: elasticsearch-data
9 | labels:
10 | component: elasticsearch
11 | role: data
12 | namespace: elasticsearch
13 | spec:
14 | ports:
15 | - port: 9300
16 | name: transport
17 | - port: 9200
18 | name: http
19 | - port: 9600
20 | name: metrics
21 | clusterIP: None
22 | selector:
23 | component: elasticsearch
24 | role: data
25 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/35-es-bootstrap-secrets.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: elasticsearch-tls-data
5 | namespace: elasticsearch
6 | type: Opaque
7 | stringData:
8 | elk-crt.pem: |-
9 |
10 |
11 | elk-key.pem: |-
12 |
13 |
14 | elk-root-ca.pem: |-
15 |
16 |
17 | admin-crt.pem: |-
18 |
19 |
20 | admin-key.pem: |-
21 |
22 |
23 | admin-root-ca.pem: |-
24 |
25 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__io_jsonwebtoken_jjwt_api_0_10_5.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__io_jsonwebtoken_jjwt_impl_0_10_5.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/es_sink/es_sink/transport_exceptions.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 | '''
7 | class TransportException(Exception):
8 | '''Raised by the transport layer for most issues.'''
9 |
10 |
11 | class BadHTTPMethod(Exception):
12 | '''Raised for methods missing from the requests library.'''
13 |
14 |
15 | class BadSink(Exception):
16 | '''Raised when the target descriptor for transport is not ESDescriptor or
17 | SQSDescriptor.'''
18 |
19 |
20 | class BadAuth(Exception):
21 | '''Raised if the transport client gets both SigV4 signing and HTTP Auth'''
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__io_jsonwebtoken_jjwt_jackson_0_10_5.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__io_jsonwebtoken_jjwt_orgjson_0_10_5.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/jwt-tokens/kibana.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | # Default Kibana configuration from kibana-docker.
6 |
7 | server.name: kibana
8 | server.host: "0"
9 | elasticsearch.url: https://localhost:9200
10 | elasticsearch.ssl.verificationMode: none
11 | elasticsearch.username: kibanaserver
12 | elasticsearch.password: kibanaserver
13 | elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
14 |
15 | opendistro_security.multitenancy.enabled: true
16 | opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
17 | opendistro_security.readonly_mode.roles: ["kibana_read_only"]
18 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_9_6.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/compiler.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_9_6.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_9_0.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/es_sink/es_sink/transport_result.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
16 | Python 3
17 | '''
18 |
19 | from collections import namedtuple
20 |
21 | TransportResult = namedtuple('TransportResult', ['status', 'result_text',
22 | 'took_s', 'size'])
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Sample-code
2 |
3 | We are using the community repo to track issues and requests for Open Distro for Elasticsearch, that are not covered by the existing repos that support the project. We welcome your ideas and suggestions for features or enhancements.
4 |
5 | ## Code of Conduct
6 |
7 | This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html).
8 |
9 |
10 | ## Security issue notifications
11 |
12 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue.
13 |
14 |
15 | ## Licensing
16 |
17 | See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
18 |
--------------------------------------------------------------------------------
/pa-to-es/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright 2019, Amazon Web Services
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this
6 | software and associated documentation files (the "Software"), to deal in the Software
7 | without restriction, including without limitation the rights to use, copy, modify,
8 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
9 | permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
12 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
13 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
14 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
15 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
16 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/jwt-tokens/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright 2019, Amazon Web Services
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this
6 | software and associated documentation files (the "Software"), to deal in the Software
7 | without restriction, including without limitation the rights to use, copy, modify,
8 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
9 | permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
12 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
13 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
14 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
15 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
16 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this
4 | software and associated documentation files (the "Software"), to deal in the Software
5 | without restriction, including without limitation the rights to use, copy, modify,
6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
7 | permit persons to whom the Software is furnished to do so.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
15 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/Chart.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | appVersion: 1.0.0
15 | description: 'Open Distro for Elasticsearch'
16 | engine: gotpl
17 | kubeVersion: ^1.10.0-0
18 | maintainers:
19 | - email: derek.heldt-werle@viasat.com
20 | name: Derek Heldt-Werle
21 | - email: kalvin.chau@viasat.com
22 | name: Kalvin Chau
23 | name: opendistro-es
24 | sources:
25 | - https://pages.git.viasat.com/ATG/charts
26 | version: 1.1.0
27 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/kibana-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{ if and .Values.kibana.serviceAccount.create .Values.kibana.enabled }}
15 | apiVersion: v1
16 | kind: ServiceAccount
17 | metadata:
18 | name: {{ template "opendistro-es.kibana.serviceAccountName" . }}
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | {{ end -}}
23 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/elasticsearch-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{ if .Values.elasticsearch.serviceAccount.create }}
16 | apiVersion: v1
17 | kind: ServiceAccount
18 | metadata:
19 | name: {{ template "opendistro-es.elasticsearch.serviceAccountName" . }}
20 | namespace: {{ .Release.Namespace }}
21 | labels:
22 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
23 | {{ end }}
24 |
25 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/kibana/20-kb-configmap.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: ConfigMap
7 | metadata:
8 | name: kibana
9 | namespace: kibana
10 | labels:
11 | app: kibana
12 | data:
13 | kibana.yml: |-
14 | ---
15 | # Default Kibana configuration from kibana-docker.
16 | server.name: kibana
17 | server.host: "0"
18 |
19 | # Replace with Elasticsearch DNS name picked during Service deployment
20 | elasticsearch.url: ${ELASTICSEARCH_URL}
21 | elasticsearch.requestTimeout: 360000
22 |
23 | # Kibana TLS Config
24 | server.ssl.enabled: true
25 | server.ssl.key: /usr/share/kibana/config/kibana-key.pem
26 | server.ssl.certificate: /usr/share/kibana/config/kibana-crt.pem
27 | server.ssl.keyPassphrase: ${KEY_PASSPHRASE}
28 | elasticsearch.ssl.certificateAuthorities: /usr/share/kibana/config/kibana-root-ca.pem
29 |
30 | opendistro_security.cookie.secure: true
31 | opendistro_security.cookie.password: ${COOKIE_PASS}
32 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/kibana-config-secret.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if and .Values.kibana.enabled .Values.kibana.config }}
16 | apiVersion: v1
17 | kind: Secret
18 | metadata:
19 | name: {{ template "opendistro-es.fullname" . }}-kibana-config
20 | namespace: {{ .Release.Namespace }}
21 | labels:
22 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
23 | data:
24 | kibana.yml: {{ toYaml .Values.kibana.config | b64enc }}
25 | {{- end }}
26 |
--------------------------------------------------------------------------------
/BGG/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Amazon Web Services
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/es_sink/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Amazon Web Services
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/role.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if .Values.global.rbac.enabled }}
15 | apiVersion: rbac.authorization.k8s.io/v1beta1
16 | kind: Role
17 | metadata:
18 | name: {{ template "opendistro-es.elasticsearch.serviceAccountName" . }}
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | rules:
23 | - apiGroups: ['extensions']
24 | resources: ['podsecuritypolicies']
25 | verbs: ['use']
26 | resourceNames:
27 | - {{ template "opendistro-es.fullname" . }}-psp
28 | {{- end }}
29 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/role.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.global.rbac.enabled .Values.kibana.enabled }}
15 | apiVersion: rbac.authorization.k8s.io/v1beta1
16 | kind: Role
17 | metadata:
18 | name: {{ template "opendistro-es.kibana.serviceAccountName" . }}
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | rules:
23 | - apiGroups: ['extensions']
24 | resources: ['podsecuritypolicies']
25 | verbs: ['use']
26 | resourceNames:
27 | - {{ template "opendistro-es.fullname" . }}-psp
28 | {{- end }}
29 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/master-svc.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if .Values.elasticsearch.master.enabled }}
16 | apiVersion: v1
17 | kind: Service
18 | metadata:
19 | labels:
20 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
21 | role: master
22 | name: {{ template "opendistro-es.fullname" . }}-discovery
23 | namespace: {{ .Release.Namespace }}
24 | spec:
25 | ports:
26 | - port: 9300
27 | protocol: TCP
28 | clusterIP: None
29 | selector:
30 | role: master
31 | {{- end }}
32 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-data-svc.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if .Values.elasticsearch.data.enabled }}
16 | apiVersion: v1
17 | kind: Service
18 | metadata:
19 | labels:
20 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
21 | role: data
22 | name: {{ template "opendistro-es.fullname" . }}-data-svc
23 | namespace: {{ .Release.Namespace }}
24 | spec:
25 | ports:
26 | - port: 9300
27 | name: transport
28 | - port: 9200
29 | name: http
30 | - port: 9600
31 | name: metrics
32 | clusterIP: None
33 | selector:
34 | role: data
35 | {{- end }}
36 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/rolebinding.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if .Values.global.rbac.enabled }}
15 | kind: RoleBinding
16 | apiVersion: rbac.authorization.k8s.io/v1
17 | metadata:
18 | labels:
19 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
20 | name: {{ template "opendistro-es.fullname" . }}-elastic-rolebinding
21 | roleRef:
22 | kind: Role
23 | name: {{ template "opendistro-es.elasticsearch.serviceAccountName" . }}
24 | apiGroup: rbac.authorization.k8s.io
25 | subjects:
26 | - kind: ServiceAccount
27 | name: {{ template "opendistro-es.elasticsearch.serviceAccountName" . }}
28 | namespace: {{ .Release.Namespace }}
29 | {{- end }}
30 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/rolebinding.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.global.rbac.enabled .Values.kibana.enabled }}
15 | kind: RoleBinding
16 | apiVersion: rbac.authorization.k8s.io/v1
17 | metadata:
18 | labels:
19 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
20 | name: {{ template "opendistro-es.fullname" . }}-kibana-rolebinding
21 | roleRef:
22 | kind: Role
23 | name: {{ template "opendistro-es.kibana.serviceAccountName" . }}
24 | apiGroup: rbac.authorization.k8s.io
25 | subjects:
26 | - kind: ServiceAccount
27 | name: {{ template "opendistro-es.kibana.serviceAccountName" . }}
28 | namespace: {{ .Release.Namespace }}
29 | {{- end }}
30 |
--------------------------------------------------------------------------------
/es_sink/__version__.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2020 Amazon Web Services
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | VERSION = (0, 1, 2)
24 |
25 | __version__ = '.'.join(map(str, VERSION))
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | *Issue #, if available:*
2 |
3 | *Description of changes:*
4 |
5 |
6 | By making a contribution to this project, I certify that:
7 |
8 | (a) The contribution was created in whole or in part by me and I
9 | have the right to submit it under the open source license
10 | indicated in the file; or
11 |
12 | (b) The contribution is based upon previous work that, to the best
13 | of my knowledge, is covered under an appropriate open source
14 | license and I have the right under that license to submit that
15 | work with modifications, whether created in whole or in part
16 | by me, under the same open source license (unless I am
17 | permitted to submit under a different license), as indicated
18 | in the file; or
19 |
20 | (c) The contribution was provided directly to me by some other
21 | person who certified (a), (b) or (c) and I have not modified
22 | it.
23 |
24 | (d) I understand and agree that this project and the contribution
25 | are public and that a record of the contribution (including all
26 | personal information I submit with it, including my sign-off) is
27 | maintained indefinitely and may be redistributed consistent with
28 | this project or the open source license(s) involved.
29 |
30 |
--------------------------------------------------------------------------------
/jwt-tokens/README.md:
--------------------------------------------------------------------------------
1 | # JWT Tokens, sample code and config
2 |
3 | This folder contains the sample code for the blog post titled Use Java web Tokens
4 | to authenticate in Open Distro for Elasticsearch and Kibana.
5 |
6 | The top level files include the config files and docker-compose file to
7 | bring up an Elasticsearch cluster with a single node and a container with Kibana.
8 | The token-gen folder contains java code and build file for an application that
9 | generates jwt tokens.
10 |
11 | ## Requirements
12 |
13 | The code requires
14 | JDK 12
15 | Apache Maven
16 |
17 | ## Code of Conduct
18 |
19 | This project has adopted an [Open Source Code of
20 | Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html).
21 |
22 |
23 | ## Security issue notifications
24 |
25 | If you discover a potential security issue in this project we ask that you
26 | notify AWS/Amazon Security via our [vulnerability reporting
27 | page](http://aws.amazon.com/security/vulnerability-reporting/). Please do
28 | **not** create a public GitHub issue.
29 |
30 | ## Licensing
31 |
32 | See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you
33 | to confirm the licensing of your contribution.
34 |
35 |
36 | ## Copyright
37 |
38 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
39 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/kibana-service.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if .Values.kibana.enabled }}
16 | apiVersion: v1
17 | kind: Service
18 | metadata:
19 | annotations:
20 | {{ toYaml .Values.kibana.service.annotations | indent 4 }}
21 | labels:
22 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
23 | name: {{ template "opendistro-es.fullname" . }}-kibana-svc
24 | spec:
25 | ports:
26 | - name: kibana-svc
27 | port: {{ .Values.kibana.externalPort }}
28 | targetPort: {{ .Values.kibana.port }}
29 | selector:
30 | app: {{ template "opendistro-es.fullname" . }}-kibana
31 | type: {{ .Values.kibana.service.type }}
32 | {{- end }}
33 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-config-secret.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | apiVersion: v1
16 | kind: Secret
17 | metadata:
18 | name: {{ template "opendistro-es.fullname" . }}-es-config
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | type: Opaque
23 | data:
24 | {{- if .Values.elasticsearch.config }}
25 | elasticsearch.yml: {{ toYaml .Values.elasticsearch.config | b64enc | quote }}
26 | {{- end }}
27 | {{- if .Values.elasticsearch.log4jConfig }}
28 | log4j2.properties: {{ .Values.elasticsearch.log4jConfig | b64enc | quote }}
29 | {{- end }}
30 | logging.yml: {{ toYaml .Values.elasticsearch.loggingConfig | b64enc | quote }}
31 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/src/main/java/JWTTestTokens.java:
--------------------------------------------------------------------------------
1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: MIT-0
3 | import io.jsonwebtoken.Jwts;
4 | import io.jsonwebtoken.SignatureAlgorithm;
5 | import io.jsonwebtoken.io.Encoders;
6 | import io.jsonwebtoken.security.Keys;
7 | import java.security.Key;
8 | import java.util.Date;
9 | import java.util.HashMap;
10 |
11 | public class JWTTestTokens {
12 | public static void main(String[] args) {
13 | Key key = Keys.secretKeyFor(SignatureAlgorithm.HS256);
14 | Date exp = new Date(System.currentTimeMillis() + 1000000);
15 | HashMap hm = new HashMap<>();
16 | hm.put("roles","admin");
17 | String jws = Jwts.builder()
18 | .setClaims(hm)
19 | .setIssuer("https://localhost")
20 | .setSubject("admin")
21 | .setExpiration(exp)
22 | .signWith(key).compact();
23 | System.out.println("Token:");
24 | System.out.println(jws);
25 | if(Jwts.parser().setSigningKey(key).parseClaimsJws(jws).getBody().getSubject().equals("test")) {
26 | System.out.println("test");
27 | }
28 | String encoded = Encoders.BASE64.encode(key.getEncoded());
29 | System.out.println("Shared secret:");
30 | System.out.println(encoded);
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/jwt-tokens/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | version: '3'
5 | services:
6 | odfe-node1:
7 | image: amazon/opendistro-for-elasticsearch:latest
8 | container_name: odfe-node1
9 | environment:
10 | - cluster.name=odfe-cluster
11 | - bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
12 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
13 | ulimits:
14 | memlock:
15 | soft: -1
16 | hard: -1
17 | volumes:
18 | - odfe-data1:/usr/share/elasticsearch/data
19 | - ./config.yml:/usr/share/elasticsearch/plugins/opendistro_security/securityconfig/config.yml
20 | ports:
21 | - 9200:9200
22 | - 9600:9600 # required for Performance Analyzer
23 | networks:
24 | - odfe-net
25 | kibana:
26 | image: amazon/opendistro-for-elasticsearch-kibana:latest
27 | container_name: odfe-kibana
28 | ports:
29 | - 5601:5601
30 | expose:
31 | - "5601"
32 | environment:
33 | ELASTICSEARCH_URL: https://odfe-node1:9200
34 | volumes:
35 | - ./kibana.yml:/usr/share/kibana/config/kibana.yml
36 | networks:
37 | - odfe-net
38 |
39 | volumes:
40 | odfe-data1:
41 |
42 | networks:
43 | odfe-net:
44 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-service.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if .Values.elasticsearch.client.enabled }}
16 | kind: Service
17 | apiVersion: v1
18 | metadata:
19 | annotations:
20 | {{ toYaml .Values.elasticsearch.client.service.annotations | indent 4 }}
21 | labels:
22 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
23 | role: client
24 | name: {{ template "opendistro-es.fullname" . }}-client-service
25 | namespace: {{ .Release.Namespace }}
26 | spec:
27 | ports:
28 | - name: http
29 | port: 9200
30 | - name: transport
31 | port: 9300
32 | - name: metrics
33 | port: 9600
34 | selector:
35 | role: client
36 | type: {{ .Values.elasticsearch.client.service.type }}
37 | {{- end }}
38 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-data-pdb.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.elasticsearch.data.podDisruptionBudget.enabled .Values.elasticsearch.data.enabled }}
15 | apiVersion: policy/v1beta1
16 | kind: PodDisruptionBudget
17 | metadata:
18 | name: {{ template "opendistro-es.fullname" . }}-data-pdb
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | spec:
23 | {{- if .Values.elasticsearch.data.podDisruptionBudget.minAvailable }}
24 | minAvailable: {{ .Values.data.podDisruptionBudget.minAvailable }}
25 | {{- end }}
26 | {{- if .Values.elasticsearch.data.podDisruptionBudget.maxUnavailable }}
27 | maxUnavailable: {{ .Values.elasticsearch.data.podDisruptionBudget.maxUnavailable }}
28 | {{- end }}
29 | selector:
30 | matchLabels:
31 | app: {{ template "opendistro-es.fullname" . }}
32 | role: data
33 | {{- end }}
34 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-client-pdb.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.elasticsearch.client.podDisruptionBudget.enabled .Values.elasticsearch.client.enabled }}
15 | apiVersion: policy/v1beta1
16 | kind: PodDisruptionBudget
17 | metadata:
18 | name: {{ template "opendistro-es.fullname" . }}-client-pdb
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | spec:
23 | {{- if .Values.elasticsearch.client.podDisruptionBudget.minAvailable }}
24 | minAvailable: {{ .Values.client.podDisruptionBudget.minAvailable }}
25 | {{- end }}
26 | {{- if .Values.elasticsearch.client.podDisruptionBudget.maxUnavailable }}
27 | maxUnavailable: {{ .Values.elasticsearch.client.podDisruptionBudget.maxUnavailable }}
28 | {{- end }}
29 | selector:
30 | matchLabels:
31 | app: {{ template "opendistro-es.fullname" . }}
32 | role: client
33 | {{- end }}
34 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-master-pdb.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.elasticsearch.master.podDisruptionBudget.enabled .Values.elasticsearch.master.enabled }}
15 | apiVersion: policy/v1beta1
16 | kind: PodDisruptionBudget
17 | metadata:
18 | name: {{ template "opendistro-es.fullname" . }}-master-pdb
19 | namespace: {{ .Release.Namespace }}
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | spec:
23 | {{- if .Values.elasticsearch.master.podDisruptionBudget.minAvailable }}
24 | minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }}
25 | {{- end }}
26 | {{- if .Values.elasticsearch.master.podDisruptionBudget.maxUnavailable }}
27 | maxUnavailable: {{ .Values.elasticsearch.master.podDisruptionBudget.maxUnavailable }}
28 | {{- end }}
29 | selector:
30 | matchLabels:
31 | app: {{ template "opendistro-es.fullname" . }}
32 | role: master
33 | {{- end }}
34 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/psp.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if .Values.global.psp.create }}
15 | apiVersion: extensions/v1beta1
16 | kind: PodSecurityPolicy
17 | metadata:
18 | labels:
19 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
20 | name: {{ template "opendistro-es.fullname" . }}-psp
21 | spec:
22 | privileged: true
23 | #requiredDropCapabilities:
24 | volumes:
25 | - 'configMap'
26 | - 'emptyDir'
27 | - 'projected'
28 | - 'secret'
29 | - 'downwardAPI'
30 | - 'persistentVolumeClaim'
31 | hostNetwork: false
32 | hostIPC: false
33 | hostPID: false
34 | runAsUser:
35 | rule: 'RunAsAny'
36 | seLinux:
37 | rule: 'RunAsAny'
38 | supplementalGroups:
39 | rule: 'MustRunAs'
40 | ranges:
41 | - min: 1
42 | max: 65535
43 | fsGroup:
44 | rule: 'MustRunAs'
45 | ranges:
46 | - min: 1
47 | max: 65535
48 | readOnlyRootFilesystem: false
49 | {{- end }}
50 |
--------------------------------------------------------------------------------
/pa-to-es/README.md:
--------------------------------------------------------------------------------
1 | # Performance analyzer output to Elasticsearch
2 |
3 | This library provides a main.py script that collects all of the metrics
4 | surfaced by Performance Analyzer, across their dimensions and aggregations. It
5 | pushes those metrics to Elasticsearch for visualization with Kibana.
6 |
7 | This initial version works with localhost only, presumably targeted at Open
8 | Distro for Elasticsearch, running locally in containers.
9 |
10 | ## Requirements
11 |
12 | The code requires Python 3
13 | Libraries:
14 | HTTP Requests
15 | pytz
16 |
17 | ## Basic Usage
18 |
19 | python3 main.py
20 |
21 | NOTE: before running, apply the template in template.json to your cluster. If
22 | you don't set the template, Elasticsearch will interpret the timestamp as a
23 | long integer.
24 |
25 | ## Code of Conduct
26 |
27 | This project has adopted an [Open Source Code of
28 | Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html).
29 |
30 |
31 | ## Security issue notifications
32 |
33 | If you discover a potential security issue in this project we ask that you
34 | notify AWS/Amazon Security via our [vulnerability reporting
35 | page](http://aws.amazon.com/security/vulnerability-reporting/). Please do
36 | **not** create a public GitHub issue.
37 |
38 |
39 | ## Licensing
40 |
41 | See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you
42 | to confirm the licensing of your contribution.
43 |
44 |
45 | ## Copyright
46 |
47 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
48 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/kibana/40-kb-service.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | kind: Service
5 | apiVersion: v1
6 | metadata:
7 | name: kibana
8 | namespace: kibana
9 | labels:
10 | app: kibana
11 | annotations:
12 | # Service external-dns has to be deployed for this A record to be created in AWS Route53
13 | external-dns.alpha.kubernetes.io/hostname: kibana.sec.example.com
14 |
15 | # Load Balancer type that will be launched in AWS, ELB or NLB.
16 | service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
17 |
18 | # Use ACM cert that is authorized for domain chosen above
19 | # Replace with ARN of the working ACM cert
20 | service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-east-1:111222333444:certificate/7ace9dc0-84fb-4eb6-b70b-059fc489cd20"
21 | service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "https"
22 |
23 | # Maps traffic to 443
24 | service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
25 |
26 | # You may want to increment ELB idle conn timeout for problems with WebSockets and Server-Side Events
27 | service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
28 | spec:
29 | loadBalancerSourceRanges:
30 | # Open to the world since Kibana is now auth-ed
31 | - 0.0.0.0/0
32 | type: LoadBalancer
33 | selector:
34 | component: kibana
35 | ports:
36 | - name: https
37 | port: 443
38 | targetPort: 5601
39 |
--------------------------------------------------------------------------------
/es_sink/es_sink/sqs_transport.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | PyLint complains about the Queue member of the boto3 SQS resource. If also
8 | complains the SQSTransport class has too few methods. This disables both.'''
9 | # pylint: disable=no-member,R0903
10 |
11 |
12 | import boto3
13 | from es_sink.transport_result import TransportResult
14 | from es_sink.transport_utils import wall_time
15 |
16 | class SQSTransport():
17 | ''' Transport class, wrapping the requests library to add auth when needed
18 | and to provide a facade for Amazon ES domains and local Elasticsearch
19 | instances.'''
20 |
21 | def __init__(self, target_descriptor):
22 | ''' Set signed=True to use the sigV4 signing. False to send without.'''
23 | self.target_descriptor = target_descriptor
24 |
25 | def send(self, body):
26 | '''Send a message to SQS. Returns a TransportResult'''
27 | sqs = boto3.resource('sqs', region_name=self.target_descriptor.region)
28 | queue = sqs.Queue(self.target_descriptor.sqs_url)
29 | (result, took_time) = wall_time(queue.send_message, MessageBody=body)
30 | metadata = result['ResponseMetadata']
31 | status = int(metadata['HTTPStatusCode'])
32 | size = int(metadata['HTTPHeaders']['content-length'])
33 | print(result)
34 | return TransportResult(status=status,
35 | result_text='',
36 | took_s=took_time,
37 | size=size)
38 |
--------------------------------------------------------------------------------
/BGG/README.md:
--------------------------------------------------------------------------------
1 | # BGG - Download and search board game data from BoardgameGeek.com
2 |
3 | This sample application uses the boardgamegeek2 library (available on PyPi) to
4 | download names, ids, and full descriptions for board games from
5 | boardgamegeek.com's API. It uses the es_sink library to send the board game
6 | details to an Open Distro for Elasticsearch cluster running on localhost. Of
7 | course, you can change that and send it to any Elasticsearch cluster.
8 |
9 | # Prerequisite
10 |
11 | This example uses the es_sink library, provided in this repo. To install,
12 |
13 | ```
14 | > pip install /es_sink
15 | ```
16 |
17 |
18 | # Usage
19 |
20 | First, run bgg.py to download names and details from boardgamegeek.
21 |
22 | ```$ python bgg.py --names --ids --details-file --download-ids --download-details```
23 |
24 | The ```--names``` parameter lets you specify a file that will hold the names
25 | from boardgamegeek, the ```--ids``` parameter lets you specify a file to
26 | hold the boardgamegeek ids, and the ```--details-file``` parameter lets you
27 | specify a file to hold the game details. Note: these files must be different.
28 |
29 | When you're ready to upload to Elasticsearch, run
30 |
31 | ```$ python bgg.py --names --ids --details-file --send-to-es```
32 |
33 | There is currently no command line support for changing the endpoint. You can
34 | edit the ESDescriptor in es_manager.py to change the destination location,
35 | authentication method, and ES version. See the __init__ function in
36 | es_manager.py. See the es_sink library for details on how to change the
37 | parameters.
38 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/kibana-ingress.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.kibana.ingress.enabled .Values.kibana.enabled }}
15 | {{- $serviceName:= printf "%s-%s" (include "opendistro-es.fullname" .) "kibana-svc" }}
16 | {{- $servicePort := .Values.kibana.externalPort }}
17 | apiVersion: extensions/v1beta1
18 | kind: Ingress
19 | metadata:
20 | labels:
21 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
22 | name: {{ template "opendistro-es.fullname" . }}-kibana
23 | annotations:
24 | {{- range $key, $value := .Values.kibana.ingress.annotations }}
25 | {{ $key }}: {{ $value | quote }}
26 | {{- end }}
27 | spec:
28 | rules:
29 | {{- range .Values.kibana.ingress.hosts }}
30 | {{- $url := splitList "/" . }}
31 | - host: {{ first $url }}
32 | http:
33 | paths:
34 | - path: /{{ rest $url | join "/" }}
35 | backend:
36 | serviceName: {{ $serviceName }}
37 | servicePort: {{ $servicePort }}
38 | {{- end -}}
39 | {{- if .Values.kibana.ingress.tls }}
40 | tls:
41 | {{ toYaml .Values.kibana.ingress.tls | indent 4 }}
42 | {{- end }}
43 | {{- end }}
44 |
45 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/jwttesttokens.iml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-client-ingress.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License").
4 | # You may not use this file except in compliance with the License.
5 | # A copy of the License is located at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # or in the "license" file accompanying this file. This file is distributed
10 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 | # express or implied. See the License for the specific language governing
12 | # permissions and limitations under the License.
13 |
14 | {{- if and .Values.elasticsearch.client.ingress.enabled .Values.elasticsearch.client.enabled }}
15 | {{ $fullName := printf "%s-%s" (include "opendistro-es.fullname" .) "client-service" }}
16 | {{ $ingressPath := .Values.elasticsearch.client.ingress.path }}
17 | kind: Ingress
18 | apiVersion: extensions/v1beta1
19 | metadata:
20 | name: {{ $fullName }}
21 | labels:
22 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
23 | {{- with .Values.elasticsearch.client.ingress.annotations }}
24 | annotations:
25 | {{ toYaml . | indent 4 }}
26 | {{- end }}
27 | spec:
28 | {{- if .Values.elasticsearch.client.ingress.tls }}
29 | tls:
30 | {{- range .Values.elasticsearch.client.ingress.tls }}
31 | - hosts:
32 | {{- range .hosts }}
33 | - {{ . | quote }}
34 | {{- end }}
35 | secretName: {{ .secretName }}
36 | {{- end }}
37 | {{- end }}
38 | rules:
39 | {{- range .Values.elasticsearch.client.ingress.hosts }}
40 | - host: {{ . | quote }}
41 | http:
42 | paths:
43 | - path: {{ $ingressPath }}
44 | backend:
45 | serviceName: {{ $fullName }}
46 | servicePort: http
47 | {{- end }}
48 | {{- end }}
49 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/35-es-service.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: Service
7 | metadata:
8 | annotations:
9 | # Service external-dns has to be deployed for this A record to be created in AWS Route53
10 | external-dns.alpha.kubernetes.io/hostname: elk.sec.example.com
11 |
12 | # Defined ELB backend protocol as HTTPS to allow connection to Elasticsearch API
13 | service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https
14 |
15 | # Load Balancer type that will be launched in AWS, ELB or NLB.
16 | service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
17 |
18 | # ARN of ACM certificate registered to the deployed ELB for handling connections over TLS
19 | # ACM certificate should be issued to the DNS hostname defined earlier (elk.sec.example.com)
20 | service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-east-1:111222333444:certificate/c69f6022-b24f-43d9-b9c8-dfe288d9443d"
21 | service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
22 | service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true"
23 | service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60"
24 | service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
25 |
26 | # Annotation to create internal only ELB
27 | service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
28 | labels:
29 | component: elasticsearch
30 | role: client
31 | name: elasticsearch
32 | namespace: elasticsearch
33 | spec:
34 | ports:
35 | - name: http
36 | port: 9200
37 | - name: transport
38 | port: 9300
39 | - name: metrics
40 | port: 9600
41 | selector:
42 | component: elasticsearch
43 | role: client
44 | type: LoadBalancer
45 |
--------------------------------------------------------------------------------
/es_sink/es_sink/examples.py:
--------------------------------------------------------------------------------
1 | import flushing_buffer
2 | from descriptor import ESDescriptor, IndexDescriptor
3 | import es_auth
4 | from es_transport import ESTransport
5 |
6 |
7 | ################################################################################
8 | # Example connecting to localhost with http auth
9 | auth = es_auth.ESHttpAuth('admin', 'admin')
10 | index_descriptor = IndexDescriptor(es_index='logs', es_v7=True, timestamped=True)
11 | LOCALHOST_ESDESCRIPTOR = ESDescriptor("https://localhost:9200/", index_descriptor,
12 | auth=auth)
13 |
14 | buffer = flushing_buffer.flushing_buffer_factory(LOCALHOST_ESDESCRIPTOR,
15 | flush_trigger=1)
16 |
17 | buffer.add_log_line('{"field1": "value1", "field2": "value2"}')
18 |
19 | raw_transport = ESTransport(LOCALHOST_ESDESCRIPTOR)
20 | result = raw_transport.send('get', "https://localhost:9200/logs*/_search")
21 | print(result)
22 |
23 |
24 | ################################################################################
25 | # Example connecting to Amazon Elasticsearch Service with signed requests
26 |
27 | AMAZON_ES_ENDPOINT = "https://your endpoint here"
28 | amzn_auth = es_auth.ESSigV4Auth()
29 | amzn_index_descriptor = IndexDescriptor(es_index='logs', es_v7=True,
30 | timestamped=True)
31 | AMAZON_ES_DESCRIPTOR = ESDescriptor(AMAZON_ES_ENDPOINT, amzn_index_descriptor,
32 | auth=amzn_auth)
33 |
34 | buffer2 = flushing_buffer.flushing_buffer_factory(AMAZON_ES_DESCRIPTOR,
35 | flush_trigger=1)
36 |
37 | print('Sending 1 doc to Amazon ES')
38 | buffer2.add_log_line('{"field1": "value1", "field2": "value2"}')
39 |
40 | print('Searching')
41 | raw_transport2 = ESTransport(AMAZON_ES_DESCRIPTOR)
42 | result = raw_transport2.send(
43 | 'get',
44 | "https:///logs*/_search")
45 | print(result)
46 |
47 |
--------------------------------------------------------------------------------
/iot_device_simulator/README.md:
--------------------------------------------------------------------------------
1 | # IoT Device and Sensor Simulator
2 |
3 | This library provides a set of Python classes that simulates sensors and
4 | devices. Devices are collections of sensors. The library also includes classes
5 | for buffering log lines and sending to Elasticsearch.
6 |
7 |
8 | ## Requirements
9 |
10 | The code requires Python 3
11 |
12 | This example uses the es_sink library, provided in this repo. To install,
13 |
14 | ```
15 | > pip install /es_sink
16 | ```
17 |
18 |
19 | ## Basic Usage
20 |
21 | main.py illustrates the basic usage of the library. Construct and use a
22 | FlushingESBuffer, pointing at the Elasticsearch endpoint. Construct devices with
23 | Sensors. Call each Device's report() method in a loop, and send the result
24 | to the FlushingESBuffer.
25 |
26 | There are 4 Sensor types:
27 |
28 | 1. SineSensor - Produces a sin wave, based on the system clock between a bottom
29 | and a top value. You can add random fuzz.
30 | 2. ConstSensor - Produces a constant value. Can be modified with Random fuzz.
31 | 3. DriftingConstSensor - A ConstSensor that drifts randomly from its starting
32 | value.
33 | 4. MonotonicSensor - Produces a monotonically changing value, based on a start
34 | point, delta amount, and fuzz.
35 |
36 |
37 | ## Code of Conduct
38 |
39 | This project has adopted an [Open Source Code of
40 | Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html).
41 |
42 |
43 | ## Security issue notifications
44 |
45 | If you discover a potential security issue in this project we ask that you
46 | notify AWS/Amazon Security via our [vulnerability reporting
47 | page](http://aws.amazon.com/security/vulnerability-reporting/). Please do
48 | **not** create a public GitHub issue.
49 |
50 |
51 | ## Licensing
52 |
53 | See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you
54 | to confirm the licensing of your contribution.
55 |
56 |
57 | ## Copyright
58 |
59 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
60 |
--------------------------------------------------------------------------------
/pa-to-es/node_tracker.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | SPDX-License-Identifier: MIT-0
4 |
5 | This walks all of the combinations of metrics, dimensions, and aggregations.
6 | METRICS - contains descriptions of the metric to be pulled and the dimensions
7 | for that metric. See also the docs here:
8 | https://opendistro.github.io/for-elasticsearch-docs/docs/pa/reference/.
9 | '''
10 | import json
11 |
12 | import requests
13 |
14 | class NodeTracker():
15 | ''' Discovers nodes in the cluster, and holds a map from node name to
16 | ip address. Construct the object, then use ip() to retrieve the
17 | address from the node name.'''
18 |
19 | def __init__(self, args):
20 | ''' Constructs a local dict, and fills it.'''
21 | self._args = args
22 | self._nodes_map = dict()
23 | self._retrieve_node_ids_and_ips()
24 |
25 | def _retrieve_node_ids_and_ips(self):
26 | ''' Use _cat/nodes to pull the name and IP for all of the nodes in
27 | the cluster. '''
28 | url = 'https://{}:9200/_nodes'.format(self._args.endpoint)
29 | response = requests.get(url,
30 | ### HACK ALERT !!! TODO TODO TODO!!! Add real auth ###
31 | auth=('admin', 'admin'),
32 | verify=False)
33 | if int(response.status_code) >= 300:
34 | raise Exception('Bad response code trying to get node names and ips')
35 | json_response = json.loads(response.text)
36 | if 'nodes' not in json_response:
37 | raise Exception('Bad response - no nodes')
38 | for node_id, values in json_response['nodes'].items():
39 | self._nodes_map[node_id] = values['ip']
40 |
41 | def ip(self, node_name):
42 | if node_name in self._nodes_map:
43 | return self._nodes_map[node_name]
44 | raise ValueError('{} is not a recognized node name'.format(node_name))
45 |
46 | def print_table(self):
47 | for name, ip in self._nodes_map.items():
48 | print(' {} {}'.format(name, ip))
49 |
--------------------------------------------------------------------------------
/iot_device_simulator/device.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019, Amazon Web Services Inc.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
16 | Python 3
17 | '''
18 |
19 | import json
20 | import time
21 | import uuid
22 |
23 | import boto3
24 | from pytz import timezone
25 | from sensor import SineSensor, ConstSensor, DriftingSensor
26 |
27 | class Device():
28 | '''A device has a list of sensors and generates report()s'''
29 | def __init__(self, device_id=None, target_tz='US/Pacific'):
30 | if device_id:
31 | self.device_id = device_id
32 | else:
33 | self.device_id = "device_%s" % str(uuid.uuid1())
34 | self.timezone = timezone(target_tz)
35 | self.sensors = list()
36 |
37 | def add_sensor(self, sensor_to_add):
38 | '''Add a sensor to the list'''
39 | self.sensors.append(sensor_to_add)
40 |
41 | def remove_sensor(self, sensor_name):
42 |
43 | self.sensors = \
44 | [sensor for sensor in self.sensors \
45 | if sensor.get_value_name != sensor_name]
46 |
47 | def report(self):
48 | '''Generate a list of records for this device. Each element is a
49 | dict with a single sensor report.
50 | TODO The return should be hierarchical rather than flat.'''
51 | ret = list()
52 | for sensor in self.sensors:
53 | rep = sensor.report()
54 | rep['device_id'] = self.device_id
55 | rep['unique_id'] = "%s:%s" % (rep['device_id'], \
56 | rep['sensor_id'])
57 | ret.append(rep)
58 | return ret
59 |
--------------------------------------------------------------------------------
/es_sink/es_sink/es_auth.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | Provides a class hierarchy to specify authentication method for the
8 | ESDescriptor.
9 |
10 | I'm not clear that the class hierarchy is buying me anything here. I
11 | thought about a dict or namedtuple for these as well. The one thing
12 | I get is in the transport layer, I can dispatch on the auth type. This
13 | way is also more self-documenting.
14 |
15 | '''
16 |
17 |
18 | from abc import ABC, abstractmethod
19 |
20 |
21 | class ESAuth(ABC):
22 | ''' Base class for the hierarchy. Nothing to do here.'''
23 | @abstractmethod
24 | def __init__(self):
25 | pass
26 |
27 | @abstractmethod
28 | def auth_creds(self):
29 | pass
30 |
31 |
32 | class ESNoAuth(ESAuth):
33 | ''' Use to specify that no authentication will be added to the low-level
34 | transport.'''
35 | def __init__(self):
36 | super(ESNoAuth, self).__init__()
37 |
38 | def auth_creds(self):
39 | return None
40 |
41 |
42 | class ESSigV4Auth(ESAuth):
43 | ''' Use this to have the transport layer grab credentials via Boto. '''
44 | # Placeholder - eventually should support all of the different auth methods
45 | # of specifying access/secret and tokens.
46 | # Possibly this could do something like: boto3.Session().get_credentials()
47 |
48 | # TODO: Move the aws region into this class. Add a test case for region=None
49 | def __init__(self):
50 | super(ESSigV4Auth, self).__init__()
51 |
52 | def auth_creds(self):
53 | '''Placeholder... this should implement boto-like determination of AWS
54 | creds.'''
55 | return None
56 |
57 |
58 | class ESHttpAuth(ESAuth):
59 | ''' Use with username/password for auth '''
60 | def __init__(self, user, password):
61 | super(ESHttpAuth, self).__init__()
62 | self._user = user
63 | self._password = password
64 |
65 | @property
66 | def user(self):
67 | return self._user
68 |
69 | @property
70 | def password(self):
71 | return self._password
72 |
73 | def auth_creds(self):
74 | return (self._user, self._password)
75 |
76 |
77 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/kibana/30-kb-deploy.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: apps/v1
6 | kind: Deployment
7 | metadata:
8 | name: kibana
9 | namespace: kibana
10 | labels:
11 | component: kibana
12 | spec:
13 | replicas: 1
14 | selector:
15 | matchLabels:
16 | component: kibana
17 | template:
18 | metadata:
19 | labels:
20 | component: kibana
21 | spec:
22 | containers:
23 | - name: kibana
24 | # Official Image from Open Distro Team
25 | image: amazon/opendistro-for-elasticsearch-kibana:0.9.0
26 | env:
27 | - name: CLUSTER_NAME
28 | value: logs
29 | - name: ELASTICSEARCH_USERNAME
30 | value: kibanaserver
31 | # Replace with URL of Elasticsearch API
32 | - name: ELASTICSEARCH_URL
33 | value:
34 | # Replace with password chosen during cluster initialization
35 | - name: ELASTICSEARCH_PASSWORD
36 | value:
37 | # Replace with key passphrase for key used to generate Kibana TLS cert
38 | - name: KEY_PASSPHRASE
39 | value:
40 | # 32-character random string to be used as cookie password by security plugin
41 | - name: COOKIE_PASS
42 | value:
43 | resources:
44 | limits:
45 | cpu: 4
46 | memory: 16Gi
47 | requests:
48 | cpu: 4
49 | memory: 16Gi
50 | readinessProbe:
51 | tcpSocket:
52 | port: http
53 | initialDelaySeconds: 20
54 | periodSeconds: 10
55 | livenessProbe:
56 | tcpSocket:
57 | port: http
58 | initialDelaySeconds: 20
59 | periodSeconds: 10
60 | ports:
61 | - containerPort: 5601
62 | name: http
63 | volumeMounts:
64 | - mountPath: /usr/share/kibana/config/kibana.yml
65 | name: config
66 | subPath: kibana.yml
67 | - mountPath: /usr/share/kibana/config/kibana-crt.pem
68 | name: certs
69 | subPath: kibana-crt.pem
70 | - mountPath: /usr/share/kibana/config/kibana-key.pem
71 | name: certs
72 | subPath: kibana-key.pem
73 | - mountPath: /usr/share/kibana/config/kibana-root-ca.pem
74 | name: certs
75 | subPath: kibana-root-ca.pem
76 | volumes:
77 | - name: config
78 | configMap:
79 | name: kibana
80 | - name: certs
81 | secret:
82 | secretName: kibana-tls-data
83 |
--------------------------------------------------------------------------------
/jwt-tokens/token-gen/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
7 | 4.0.0
8 |
9 | odfe
10 | jwt-test-tokens
11 | 1.0-SNAPSHOT
12 | jar
13 |
14 |
15 |
16 |
17 | org.apache.maven.plugins
18 | maven-compiler-plugin
19 |
20 | 7
21 | 7
22 |
23 |
24 |
25 | maven-assembly-plugin
26 |
27 |
28 |
29 | JWTTestTokens
30 |
31 |
32 |
33 | jar-with-dependencies
34 |
35 |
36 |
37 |
38 | make-assembly
39 | package
40 |
41 | single
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | io.jsonwebtoken
52 | jjwt-api
53 | 0.10.5
54 |
55 |
56 | io.jsonwebtoken
57 | jjwt-orgjson
58 | 0.10.5
59 |
60 |
61 | io.jsonwebtoken
62 | jjwt-jackson
63 | 0.10.5
64 |
65 |
66 | io.jsonwebtoken
67 | jjwt-impl
68 | 0.10.5
69 |
70 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/es_sink/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # Note: To use the 'upload' functionality of this file, you must:
5 | # $ pipenv install twine --dev
6 |
7 | # This code was modified from https://github.com/navdeep-G/setup.py
8 |
9 | import io
10 | import os
11 | import sys
12 | from shutil import rmtree
13 |
14 | from setuptools import find_packages, setup, Command
15 |
16 | # Package meta-data.
17 | NAME = 'es_sink'
18 | DESCRIPTION = 'Send bulk data to Elasticsearch or SQS.'
19 | URL = 'https://github.com/opendistro-for-elasticsearch/community/es_sink'
20 | EMAIL = 'handler@amazon.com'
21 | AUTHOR = 'Jon Handler'
22 | REQUIRES_PYTHON = '>=3.7.0'
23 | VERSION = '0.1.0'
24 |
25 | # What packages are required for this module to be executed?
26 | REQUIRED = [
27 | 'boto3',
28 | 'pytz',
29 | 'requests',
30 | 'requests_aws4auth'
31 | ]
32 |
33 | # What packages are optional?
34 | EXTRAS = {
35 | # 'fancy feature': ['django'],
36 | }
37 |
38 | # The rest you shouldn't have to touch too much :)
39 | # ------------------------------------------------
40 | # Except, perhaps the License and Trove Classifiers!
41 | # If you do change the License, remember to change the Trove Classifier for that!
42 |
43 | here = os.path.abspath(os.path.dirname(__file__))
44 |
45 | # Import the README and use it as the long-description.
46 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file!
47 | try:
48 | with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
49 | long_description = '\n' + f.read()
50 | except FileNotFoundError:
51 | long_description = DESCRIPTION
52 |
53 | # Load the package's __version__.py module as a dictionary.
54 | about = {}
55 | if not VERSION:
56 | project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
57 | with open(os.path.join(here, project_slug, '__version__.py')) as f:
58 | exec(f.read(), about)
59 | else:
60 | about['__version__'] = VERSION
61 |
62 |
63 | # Where the magic happens:
64 | setup(
65 | name=NAME,
66 | version=about['__version__'],
67 | description=DESCRIPTION,
68 | long_description=long_description,
69 | long_description_content_type='text/markdown',
70 | author=AUTHOR,
71 | author_email=EMAIL,
72 | python_requires=REQUIRES_PYTHON,
73 | url=URL,
74 | packages=['es_sink', ],
75 | install_requires=REQUIRED,
76 | extras_require=EXTRAS,
77 | include_package_data=True,
78 | license='MIT',
79 | classifiers=[
80 | # Trove classifiers
81 | # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
82 | 'License :: OSI Approved :: MIT License',
83 | 'Programming Language :: Python',
84 | 'Programming Language :: Python :: 3',
85 | 'Programming Language :: Python :: 3.6',
86 | 'Programming Language :: Python :: Implementation :: CPython',
87 | 'Programming Language :: Python :: Implementation :: PyPy'
88 | ],
89 | )
90 |
--------------------------------------------------------------------------------
/es_sink/examples.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2020 Amazon Web Services
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | import flushing_buffer
24 | from descriptor import ESDescriptor
25 | from es_transport import ESTransport
26 |
27 |
28 | ################################################################################
29 | # Example connecting to localhost with http auth
30 | LOCALHOST_ESDESCRIPTOR = ESDescriptor("https://localhost:9200/", es_v7=True,
31 | es_index='logs', timestamped=True,
32 | signed=False, http_auth=('admin', 'admin'))
33 |
34 | buffer = flushing_buffer.flushing_buffer_factory(LOCALHOST_ESDESCRIPTOR,
35 | flush_trigger=1)
36 |
37 | buffer.add_log_line('{"field1": "value1", "field2": "value2"}')
38 |
39 | raw_transport = ESTransport(LOCALHOST_ESDESCRIPTOR)
40 | result = raw_transport.send('get', "https://localhost:9200/logs*/_search")
41 | print(result)
42 |
43 |
44 | ################################################################################
45 | # Example connecting to Amazon Elasticsearch Service with signed requests
46 |
47 | AMAZON_ES_ENDPOINT = "https://your endpoint here"
48 | AMAZON_ES_DESCRIPTOR = ESDescriptor(AMAZON_ES_ENDPOINT, es_v7=True,
49 | es_index='logs', signed=True,
50 | region='us-west-2', timestamped=True)
51 |
52 | buffer2 = flushing_buffer.flushing_buffer_factory(AMAZON_ES_DESCRIPTOR,
53 | flush_trigger=1)
54 |
55 | print('Sending 1 doc to Amazon ES')
56 | buffer2.add_log_line('{"field1": "value1", "field2": "value2"}')
57 |
58 | print('Searching')
59 | raw_transport2 = ESTransport(AMAZON_ES_DESCRIPTOR)
60 | result = raw_transport2.send(
61 | 'get',
62 | "https://search-test-es-sink-nrobz6a4gwulmlh6kh6kdzer6u.us-west-2.es.amazonaws.com/logs*/_search")
63 | print(result)
64 |
65 |
--------------------------------------------------------------------------------
/es_sink/es_sink/transport_utils.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | Provides a buffer object that holds log lines in Elasticsearch _bulk
8 | format. As each line is added, the buffer stores the control line
9 | as well as the log line.
10 | '''
11 |
12 | import re
13 | import time
14 |
15 |
16 | from datetime import datetime
17 | from dateutil import tz
18 | from pytz import timezone
19 |
20 |
21 | def now_pst():
22 | '''Return the current time in PST timezone'''
23 | now_utc = datetime.now(timezone('UTC'))
24 | return now_utc.astimezone(timezone('US/Pacific'))
25 |
26 |
27 | def utc_to_local_datetime(timestamp):
28 | ''' Takes a UTC timestamp (as seconds since the epoch) and converts to a
29 | local datetime object '''
30 | # Could validate data type
31 | tdt = datetime.fromtimestamp(timestamp, tz=tz.tzlocal())
32 | tdt = tdt.replace(tzinfo=tz.gettz('UTC'))
33 | return tdt.astimezone(tz.tzlocal())
34 |
35 |
36 | def has_path(dic, path_elts):
37 | '''Given dict dic, and path path_elts, successively dereference the keys
38 | from path_elts, returning True. Returns False if the path is not in the
39 | dictionary'''
40 | if not isinstance(dic, dict) and path_elts:
41 | return False
42 | if not path_elts:
43 | return True
44 | if path_elts[0] in dic:
45 | return has_path(dic[path_elts[0]],
46 | path_elts[1:])
47 | return False
48 |
49 |
50 | def valid_key(key_in):
51 | '''Mutates key_in, making it a valid field name for Elasticsearch (and
52 | hence, a suitable key for a dict.)'''
53 | pattern = re.compile('[^a-zA-Z0-9@_]')
54 | return pattern.sub('_', key_in)
55 |
56 |
57 | def flatten(current, key, result):
58 | '''Takes a path to an element in a nested dict (e.g., JSON) and recursively
59 | walks the whole tree, returning a 1-layer dict, with elements where the
60 | keys are the path elements joined with '_' and the values are the leaf
61 | values from the dict.
62 | flatten({'a': {'b':'c', 'd': 'e'}}, '', {}) =>
63 | {'a_b': 'c', 'a_d': 'e'}'''
64 | if isinstance(current, dict):
65 | for thiskey in current:
66 | valid_k = valid_key(str(thiskey))
67 | new_key = "{0}_{1}".format(key, valid_k) if len(key) > 0 else valid_k
68 | flatten(current[thiskey], new_key, result)
69 | else:
70 | result[key] = current
71 | return result
72 |
73 |
74 | def valid_request_body(body):
75 | ''' Helper function to ensure request bodies terminate with a new line
76 | and to replace None with the empty string.'''
77 | if body and not body.endswith("\n"):
78 | body += "\n"
79 | elif not body:
80 | body = ""
81 | return body
82 |
83 |
84 | def wall_time(func, *args, **kwargs):
85 | ''' Helper function to wrap the request and return wall time along with
86 | the result of the call. Not using clock() since the processing
87 | happens remotely.'''
88 | start = time.time()
89 | result = func(*args, **kwargs)
90 | end = time.time()
91 | return (result, end - start)
92 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check [existing open](../../issues), or [recently closed](../../issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *master* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](../../labels/help%20wanted) issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 |
49 | This project has adopted an [Open Source Code of Conduct](https://opendistro.github.io/for-elasticsearch/codeofconduct.html).
50 |
51 |
52 | ## Security issue notifications
53 |
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](./LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/30-es-configmap.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: ConfigMap
7 | metadata:
8 | name: elasticsearch
9 | namespace: elasticsearch
10 | labels:
11 | app: elasticsearch
12 | data:
13 | elasticsearch.yml: |-
14 | cluster:
15 | name: ${CLUSTER_NAME}
16 | node:
17 | master: ${NODE_MASTER}
18 | data: ${NODE_DATA}
19 | name: ${NODE_NAME}
20 | ingest: ${NODE_INGEST}
21 | max_local_storage_nodes: 1
22 | attr.box_type: hot
23 |
24 | processors: ${PROCESSORS:1}
25 |
26 | network.host: ${NETWORK_HOST}
27 |
28 | path:
29 | data: /usr/share/elasticsearch/data
30 | logs: /usr/share/elasticsearch/logs
31 |
32 | http:
33 | compression: true
34 |
35 | discovery:
36 | zen:
37 | ping.unicast.hosts: ${DISCOVERY_SERVICE}
38 | minimum_master_nodes: ${NUMBER_OF_MASTERS}
39 |
40 | # TLS Configuration Transport Layer
41 | opendistro_security.ssl.transport.pemcert_filepath: elk-crt.pem
42 | opendistro_security.ssl.transport.pemkey_filepath: elk-key.pem
43 | opendistro_security.ssl.transport.pemtrustedcas_filepath: elk-root-ca.pem
44 | opendistro_security.ssl.transport.pemkey_password: ${TRANSPORT_TLS_PEM_PASS}
45 | opendistro_security.ssl.transport.enforce_hostname_verification: false
46 |
47 | # TLS Configuration REST Layer
48 | opendistro_security.ssl.http.enabled: true
49 | opendistro_security.ssl.http.pemcert_filepath: elk-crt.pem
50 | opendistro_security.ssl.http.pemkey_filepath: elk-key.pem
51 | opendistro_security.ssl.http.pemtrustedcas_filepath: elk-root-ca.pem
52 | opendistro_security.ssl.http.pemkey_password: ${HTTP_TLS_PEM_PASS}
53 |
54 | # Demo Certificate Option Disabled
55 | opendistro_security.allow_unsafe_democertificates: false
56 |
57 | opendistro_security.allow_default_init_securityindex: false
58 |
59 | opendistro_security.authcz.admin_dn:
60 | - CN=admin.example.com
61 | opendistro_security.nodes_dn:
62 | - 'CN=sec.other.com,OU=SSL,O=Test,L=Test,C=DE'
63 | - 'CN=*.example.com,OU=SSL,O=Test,L=Test,C=DE'
64 | - 'CN=*.sec.example.com'
65 | - 'CN=sec.examples.com'
66 |
67 | opendistro_security.audit.type: internal_elasticsearch
68 | opendistro_security.enable_snapshot_restore_privilege: true
69 | opendistro_security.check_snapshot_restore_write_privileges: true
70 | opendistro_security.restapi.roles_enabled: ["all_access", "security_rest_api_access"]
71 | cluster.routing.allocation.disk.threshold_enabled: false
72 | opendistro_security.audit.config.disabled_rest_categories: NONE
73 | opendistro_security.audit.config.disabled_transport_categories: NONE
74 |
75 | logging.yml: |-
76 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG
77 | es.logger.level: INFO
78 | rootLogger: ${es.logger.level}, console
79 | logger:
80 | # log action execution errors for easier debugging
81 | action: DEBUG
82 | # reduce the logging for aws, too much is logged under the default INFO
83 | com.amazonaws: WARN
84 | appender:
85 | console:
86 | type: console
87 | layout:
88 | type: consolePattern
89 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
90 |
--------------------------------------------------------------------------------
/pa-to-es/result_parser.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | SPDX-License-Identifier: MIT-0
4 |
5 | result_parser provides a class that takes the results of calling the performance
6 | analyzer and putting together a document suitable for sending to Elasticsearch
7 | '''
8 |
9 | import datetime
10 | import pytz
11 |
12 | import json
13 |
14 | class ResultParser():
15 | ''' Construct with the text response from calling performance analyzer. Use
16 | the records() method to iterate over the response, retrieving a single
17 | Elasticsearch doc with each call. '''
18 |
19 | def __init__(self, metric, response_text, node_tracker):
20 | '''response_text is the body of the response to the GET request.'''
21 | self.response_json = json.loads(response_text)
22 | self.metric = metric
23 | self.node_tracker = node_tracker
24 |
25 | def _unpack_record(self, fields, record):
26 | ''' Match the field names with their values in the record. If there's no
27 | applicable value for the field (it's "null"), don't add the field to
28 | the doc. Returns a dict, which is the basis for the doc.'''
29 | ret = {'metric': self.metric.name}
30 | for field_name, value in zip(fields, record):
31 | if value is None or value == 'null':
32 | continue
33 | ret[field_name] = value
34 | return ret
35 |
36 | @staticmethod
37 | def pacific_time(unix_time):
38 | '''Convert a timestamp (in microseconds) to an isoformat string.
39 | Assumes the timestamp is US/Pacific time, which will be wrong
40 | for a lot of people. TODO: figure out how to make PA return
41 | UTC and then do this conversion correctly.
42 |
43 | unix_time is an integer unix time in microseconds
44 | '''
45 | timestamp = unix_time / 1000
46 | timestamp = datetime.datetime.fromtimestamp(timestamp)
47 | timezone = pytz.timezone("America/Los_Angeles")
48 | timestamp = timezone.localize(timestamp)
49 | return timestamp.isoformat()
50 |
51 | def records(self):
52 | ''' Iterates the response, yielding one dict at a time with a single
53 | metric and dimension
54 |
55 | A word on the API. PA returns a record for each combination
56 | of the requested dimensions. If a dimension doesn't bear on that
57 | particular metric, PA returns the string "null". To create the
58 | ES doc, you want to expose the combinations of dimensions that
59 | have values for that metric, skipping dimensions that have
60 | "null". The null dimensions are stripped out in _unpack_record. '''
61 | for node_name, values in self.response_json.items():
62 | node_ip = self.node_tracker.ip(node_name)
63 | data = values['data']
64 | if not data:
65 | break
66 | timestamp = ResultParser.pacific_time(int(values['timestamp']))
67 | field_names = [x['name'] for x in data['fields']]
68 | records = data['records']
69 | for record in records:
70 | doc = self._unpack_record(field_names, record)
71 | if not doc:
72 | continue
73 | doc['node_name'] = node_name
74 | doc['node_ip'] = node_ip
75 | doc['@timestamp'] = timestamp
76 | doc['agg'] = self.metric.agg
77 | yield doc
78 |
--------------------------------------------------------------------------------
/BGG/es_manager.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2020 Amazon Web Services
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 |
24 | ''' Provides the ESManager class. This class provides the interface to write the
25 | boardgame documents to Elasticsearch.
26 |
27 | It uses the es_sink library to send the data and handle all auth. At the moment
28 | the endpoint and other parameters are hard coded in the __init__ method via the
29 | ESDescriptor object. See descriptor.py in the es_sink package for details on the
30 | possible values for these parameters.
31 |
32 | It might be worth it to make those parameters available on the command line, but
33 | hey, it's demo code!
34 | '''
35 |
36 |
37 | from es_sink import flushing_buffer
38 | from es_sink.descriptor import ESDescriptor, IndexDescriptor
39 | from es_sink.es_transport import ESTransport
40 | from es_sink.es_auth import ESHttpAuth
41 |
42 |
43 | class ESManager():
44 | ''' Provides an interface to es_sink to connect and send data to
45 | Elasticsearch. '''
46 |
47 | def __init__(self):
48 | ''' Hard-coded the descriptor. This instantiates a FlushingBuffer to
49 | enable ES transport. '''
50 | index_descriptor = IndexDescriptor(es_index='games',
51 | es_v7=True,
52 | timestamped=False)
53 | self.descriptor = ESDescriptor("https://localhost:9200/",
54 | index_descriptor=index_descriptor,
55 | auth=ESHttpAuth('admin', 'admin'))
56 | self.buffer = flushing_buffer.flushing_buffer_factory(
57 | self.descriptor, flush_trigger=10000)
58 | self.raw_transport = ESTransport(self.descriptor)
59 |
60 | def add_document(self, line):
61 | ''' Add a document to the buffer. Can be a dict or string. '''
62 | self.buffer.add_log_line(line)
63 |
64 | def remove_index(self, index_name):
65 | ''' Supports removal of the games index to prepare for a clean upload.
66 | The index name is hard-coded here and in the descriptor. Probably
67 | not the best choice. '''
68 | result = self.raw_transport.send('delete',
69 | 'https://localhost:9200/games')
70 | print("Deleted games index", result)
71 |
72 | def create_index(self, index_name, settings):
73 | ''' Create an index to hold the stored games. The index name is hard-
74 | coded, which is not the best choice. If you change it here, be sure
75 | to change __init__ and remove_index to match. '''
76 | result = self.raw_transport.send('put',
77 | 'https://localhost:9200/games',
78 | body=settings)
79 |
80 | def flush(self):
81 | ''' When the number of documents % flush_trigger is not 0, you need to
82 | do a final flush on the buffer. '''
83 | self.buffer.flush()
84 |
--------------------------------------------------------------------------------
/cloudformation-deployment/README.md:
--------------------------------------------------------------------------------
1 | # Open Distro for Elasticsearch CloudFormation Templates
2 |
3 | These templates create a full Open Distro for Elasticsearch cluster, including secure networking provided through VPC, configurable data nodes, master nodes, and client nodes. The client nodes provide also run Kibana server, providing Kibana access with a public IP address.
4 |
5 | ## Template descriptions
6 |
7 | The deployment uses CloudFormation's [nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) to deploy a number of sub stacks. When complete, the architecture will look like this
8 |
9 | 
10 |
11 | ## od4es.json
12 |
13 | This is the root stack, that you deploy directly via the CloudFormation console. It contains links to the other stacks that will create a VPC, create a seed node for bootstrapping an ES 7 cluster, create master nodes, create data nodes, and create a client node with a public IP address.
14 |
15 | ## network.json
16 |
17 | Deploys an [Amazon VPC](https://aws.amazon.com/vpc/) to provide secure networking for the Open Distro for Elasticsearch cluster. The VPC spans 2 availability zones, with a public and a private subnet in each of those zones. The stack adds an Internet Gateway for outbound traffic and a NAT gateway for inbound traffic. EC2 instances in the public subnet can have public IP addresses; the seed node, and the client nodes are publicly accessible.
18 |
19 | ## seed.json
20 |
21 | Deploys a single, seed instance at a known IP address that is the seed to bootstrap the Elasticsearch cluster.
22 |
23 | ## data-nodes.json
24 |
25 | Deploys an auto scaled group of data nodes into the private subnet of the VPC
26 |
27 | ## master-nodes.json
28 |
29 | Deploys an auto scaled group of master nodes. Initially it deploys 2 instances. The seed node remains in the cluster as the 3rd master.
30 |
31 | ## client-nodes.json
32 |
33 | Deploys an auto scaled group of client nodes with public IP addresses in the public subnet of the VPC. These instances also join the cluster as client nodes. The client nodes run Kibana server.
34 |
35 | # To use this stack
36 |
37 | ### Create an S3 bucket
38 |
39 | \- Clone or download the repository.
40 | \- Create an S3 bucket to hold the templates, in the region you want to deploy the stack. You can use the AWS Console to create a bucket. Or, if you have [installed and configured the AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html), you can run the command
41 |
42 | ```aws s3 mb --region ```
43 |
44 | ### Modify `od4es.json`, editing S3 bucket references
45 |
46 | `od4es.json` has 5 `AWS::CloudFormation::Stack` resources. Each of these has an S3 location for the stack. For example, the seed node's stack is defined like this
47 |
48 | ```
49 | "Seed":{
50 | "Type": "AWS::CloudFormation::Stack",
51 | "Properties": {
52 | "TemplateURL": "https://s3-us-west-2.amazonaws.com/odfe-cfn/seed.json",
53 | "Parameters": {
54 | "NetworkStackName" : { "Fn::GetAtt" : [ "Network", "Outputs.StackName" ] },
55 | "KeyName" : {"Ref" : "KeyName"},
56 | "MasterInstanceType": { "Ref": "MasterInstanceType" }
57 | }
58 | }
59 | }
60 | ```
61 |
62 | Edit `od4es.json`, replacing the `TemplateURL`'s bucket `odfe-cfn` with the [region endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) and name of the bucket you created above.
63 |
64 | ### Put the templates in your bucket
65 |
66 | \- Edit `package-to-s3.sh`, replacing the bucket name in the first line `bucket=_bucket_name_` with the bucket name from the bucket you created above.
67 | \- Make the script executable `chmod u+x package-to-s3.sh`
68 | \- Run `./package-to-s3.sh` to send all the templates to your bucket.
69 |
70 | ### Create the stack
71 |
72 | \- Navigate to the AWS CloudFormation console.
73 | \- Click *Create Stack*
74 | \- Use the S3 URL `https://s3-.amazonaws.com//od4es.json`
75 |
--------------------------------------------------------------------------------
/es_sink/README.md:
--------------------------------------------------------------------------------
1 | # es_sink Package
2 |
3 | This package provides a set of modules for sending bulk requests to Elasticsearch. It can deliver _bulk requests to Elasticsearch versions < 7x and > 7x. It can deliver to any Elasticsearch cluster - open source, Open Distro for Elasticsearch, and Amazon Elasticsearch Service. It handles authentication in a number of different modalities.
4 |
5 | ## Examples
6 |
7 | ```
8 | ################################################################################
9 | # Example connecting to localhost with http auth
10 | LOCALHOST_ESDESCRIPTOR = ESDescriptor("https://localhost:9200/", es_v7=True,
11 | es_index='logs', timestamped=True,
12 | signed=False, http_auth=('admin', 'admin'))
13 |
14 | buffer = flushing_buffer.flushing_buffer_factory(LOCALHOST_ESDESCRIPTOR,
15 | flush_trigger=1)
16 |
17 | buffer.add_log_line('{"field1": "value1", "field2": "value2"}')
18 | # For raw transport
19 | raw_transport = ESTransport(LOCALHOST_ESDESCRIPTOR)
20 | result = raw_transport.send('get', "https://localhost:9200/logs*/_search")
21 |
22 |
23 | ################################################################################
24 | # Example connecting to Amazon Elasticsearch Service with signed requests
25 |
26 | AMAZON_ES_ENDPOINT = "https://your endpoint here"
27 | AMAZON_ES_DESCRIPTOR = ESDescriptor(AMAZON_ES_ENDPOINT, es_v7=True,
28 | es_index='logs', signed=True,
29 | region='us-west-2', timestamped=True)
30 |
31 | buffer2 = flushing_buffer.flushing_buffer_factory(AMAZON_ES_DESCRIPTOR,
32 | flush_trigger=1)
33 |
34 | buffer2.add_log_line('{"field1": "value1", "field2": "value2"}')
35 |
36 | # Raw transport for Amazon ES
37 | raw_transport2 = ESTransport(AMAZON_ES_DESCRIPTOR)
38 | result = raw_transport2.send(
39 | 'get',
40 | "https://search-test-es-sink-nrobz6a4gwulmlh6kh6kdzer6u.us-west-2.es.amazonaws.com/logs*/_search")
41 | print(result)
42 |
43 | ```
44 |
45 | # Usage
46 |
47 | Create an ESDescriptor for your domain. The ESDescriptor contains the following parameters:
48 |
49 | - endpoint - The base url to send REST API calls
50 | - region - For Amazon ES domains, the AWS region. E.g. us-west-2
51 | - es_v7 - Use ES V7 APIs (no _type, mostly)
52 | - es_index - For API calls that use an index
53 | - es_type - For ES V6 clusters and calls that use a _type
54 | - timestamped - For ES API calls, mostly writes, append _YY.MM.DD to the index name
55 | - signed - For Amazon ES domains, use SigV4 signing for REST API calls. Uses Boto3 and requests-aws4auth to pull credentials from the environment
56 | - http_auth - A tuple with (username, password) to use in sending
57 |
58 | Pass the ESDescriptor to flushing_buffer_factory to create a buffer for your data.
59 |
60 | ## FlushingBuffer
61 |
62 | FlushingBuffer holds log lines until it reaches the flush_trigger, when it sends the full buffer to ES. Currently, there is no error checking or retries.
63 |
64 | Use the add_log_line method of the FlushingBuffer to add single lines. You can pass in a string or a dict. If you send a dict, FlushingBuffer converts it to a string before sending.
65 |
66 | ## Transport
67 |
68 | Underlying the communication is the ESTransport class. Use the send method directly to send REST requests to ES.
69 |
70 | ## Elasticsearch and SQS
71 |
72 | The es_sink package also includes classes that support sending log lines to Amazon Simple Queue Service (SQS). When sending to SQS, the buffer is queued as a multi-line string.
73 |
74 | Queueing in SQS supports a pattern of loading log lines into SQS and then employing the es_sink package in the worker nodes to deliver to Elasticsearch.
75 |
76 | # Deployment
77 |
78 | This package is not currently uploaded to PyPi. To install, download the package from GitHub. Assuming your folder structure looks like this
79 |
80 | ```
81 | es_sink
82 | \- setup.py
83 | \- ...
84 | \- es_sink
85 | \- __init__.py
86 | \- descriptor.py
87 | \- ...
88 | ```
89 |
90 | Use ```pip install ```
91 |
92 |
--------------------------------------------------------------------------------
/es_sink/es_sink/flushing_buffer.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | Provides a buffer object that holds log lines in Elasticsearch _bulk
8 | format. As each line is added, the buffer stores the control line
9 | as well as the log line.
10 |
11 | Employs an line_buffer to hold log lines as they are added. Optionally
12 | sends monitor information to an ES cluster. Set the flush_trigger to
13 | control how many lines are buffered before each flush.
14 | '''
15 |
16 | import time
17 |
18 | from es_sink.descriptor import ESDescriptor, SQSDescriptor
19 | from es_sink.line_buffer import ESLineBuffer, SQSLineBuffer
20 | from es_sink.es_transport import ESTransport
21 | from es_sink.sqs_transport import SQSTransport
22 | from es_sink.transport_exceptions import BadSink
23 |
24 | class FlushingESBuffer():
25 | '''Wraps an ESLineBuffer object to provide _bulk flushing when the
26 | flush_trigger is hit.'''
27 |
28 | def __init__(self, descriptor, flush_trigger=1):
29 | ''' target_descriptor must be an ESDescriptor'''
30 | self.transport = ESTransport(descriptor)
31 | self.target_descriptor = descriptor
32 | self.flush_trigger = flush_trigger
33 | self.buffer = ESLineBuffer(descriptor)
34 |
35 | def add_log_line(self, log_line):
36 | '''Add a single log line to the internal buffer. If the flush trigger
37 | is hit, send the bulk request.'''
38 | self.buffer.add_log_line(log_line)
39 | if self.buffer.es_doc_count() >= self.flush_trigger:
40 | self.flush() # swallows the result. Do something with it?
41 |
42 | def flush(self):
43 | '''Flushes the line_buffer, sending all to the _bulk API'''
44 | if self.buffer.es_doc_count() > 0:
45 | try:
46 | url = self.target_descriptor.bulk_url()
47 | print("Flushing {} documents {} to {}".format(
48 | self.buffer.es_doc_count(),
49 | time.time(),
50 | url))
51 | result = self.transport.send('post', url, body=str(self.buffer))
52 | result = result._asdict()
53 | result['docs'] = self.buffer.es_doc_count()
54 | self.buffer.clear()
55 | return result
56 | except Exception as exc:
57 | message = "Exception sending request '{}'"
58 | print(message.format(str(exc)))
59 | raise exc
60 | return None
61 |
62 |
63 | class FlushingSQSBuffer():
64 | '''Use to send ES _bulk data to SQS in batches.'''
65 |
66 | def __init__(self, descriptor, flush_trigger=1):
67 | self.target_descriptor = descriptor
68 | self.flush_trigger = flush_trigger
69 | self.transport = SQSTransport(descriptor)
70 | self.buffer = SQSLineBuffer()
71 |
72 | def add_log_line(self, line):
73 | '''Add a single log line to the internal buffer. If the flush trigger
74 | is hit, send the bulk request.'''
75 | self.buffer.add_log_line(line)
76 | if self.buffer.es_doc_count() >= self.flush_trigger:
77 | self.flush() # swallows the result. Do something with it?
78 |
79 | def flush(self):
80 | '''Flushes the line_buffer, sending all to the _bulk API'''
81 | print("Flushing {} documents {}".format(self.buffer.es_doc_count(),
82 | time.time()))
83 | if self.buffer.es_doc_count() > 0:
84 | result = self.transport.send(str(self.buffer))
85 | result = result._asdict()
86 | result['docs'] = self.buffer.es_doc_count()
87 | self.buffer.clear()
88 | print(result)
89 | return result
90 | return None
91 |
92 |
93 | def flushing_buffer_factory(descriptor, flush_trigger=1):
94 | '''Call with a descriptor to receive a buffer object.'''
95 | if isinstance(descriptor, ESDescriptor):
96 | return FlushingESBuffer(descriptor, flush_trigger)
97 |
98 | if isinstance(descriptor, SQSDescriptor):
99 | return FlushingSQSBuffer(descriptor, flush_trigger)
100 |
101 | raise BadSink()
102 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Copyright 2019 Viasat, Inc.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License").
6 | You may not use this file except in compliance with the License.
7 | A copy of the License is located at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | or in the "license" file accompanying this file. This file is distributed
12 | on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
13 | express or implied. See the License for the specific language governing
14 | permissions and limitations under the License.
15 | */}}
16 |
17 | {{/*
18 | Expand the name of the chart.
19 | */}}
20 | {{- define "opendistro-es.name" -}}
21 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
22 | {{- end -}}
23 |
24 | {{/*
25 | Create a default fully qualified app name.
26 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
27 | If release name contains chart name it will be used as a full name.
28 | */}}
29 | {{- define "opendistro-es.fullname" -}}
30 | {{- if .Values.fullnameOverride -}}
31 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
32 | {{- else -}}
33 | {{- $name := default .Chart.Name .Values.nameOverride -}}
34 | {{- if contains $name .Release.Name -}}
35 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
36 | {{- else -}}
37 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
38 | {{- end -}}
39 | {{- end -}}
40 | {{- end -}}
41 |
42 |
43 | {{/*
44 | Define standard labels for frequently used metadata.
45 | */}}
46 | {{- define "opendistro-es.labels.standard" -}}
47 | app: {{ template "opendistro-es.fullname" . }}
48 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
49 | release: "{{ .Release.Name }}"
50 | heritage: "{{ .Release.Service }}"
51 | {{- end -}}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "opendistro-es.kibana.serviceAccountName" -}}
57 | {{- if .Values.kibana.serviceAccount.create -}}
58 | {{ default (include "opendistro-es.fullname" .) .Values.kibana.serviceAccount.name }}-kibana
59 | {{- else -}}
60 | {{ default "default" .Values.kibana.serviceAccount.name }}
61 | {{- end -}}
62 | {{- end -}}
63 |
64 | {{/*
65 | Create the name of the service account to use
66 | */}}
67 | {{- define "opendistro-es.elasticsearch.serviceAccountName" -}}
68 | {{- if .Values.elasticsearch.serviceAccount.create -}}
69 | {{ default (include "opendistro-es.fullname" .) .Values.elasticsearch.serviceAccount.name }}-es
70 | {{- else -}}
71 | {{ default "default" .Values.elasticsearch.serviceAccount.name }}
72 | {{- end -}}
73 | {{- end -}}
74 |
75 | {{/*
76 | Return the proper Docker Image Registry Secret Names
77 | */}}
78 | {{- define "opendistro-es.imagePullSecrets" -}}
79 | {{/*
80 | Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
81 | but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
82 | Also, we can not use a single if because lazy evaluation is not an option
83 | */}}
84 | {{- if .Values.global }}
85 | {{- if .Values.global.imagePullSecrets }}
86 | imagePullSecrets:
87 | {{- range .Values.global.imagePullSecrets }}
88 | - name: {{ . }}
89 | {{- end }}
90 | {{- else if or .Values.kibana.imagePullSecrets .Values.elasticsearch.imagePullSecrets .Values.elasticsearch.initContainer.imagePullSecrets }}
91 | imagePullSecrets:
92 | {{- range .Values.kibana.imagePullSecrets }}
93 | - name: {{ . }}
94 | {{- end }}
95 | {{- range .Values.elasticsearch.imagePullSecrets }}
96 | - name: {{ . }}
97 | {{- end }}
98 | {{- range .Values.elasticsearch.initContainer.imagePullSecrets }}
99 | - name: {{ . }}
100 | {{- end }}
101 | {{- end -}}
102 | {{- else if or .Values.kibana.imagePullSecrets .Values.elasticsearch.imagePullSecrets .Values.elasticsearch.initContainer.imagePullSecrets }}
103 | imagePullSecrets:
104 | {{- range .Values.kibana.imagePullSecrets }}
105 | - name: {{ . }}
106 | {{- end }}
107 | {{- range .Values.elasticsearch.imagePullSecrets }}
108 | - name: {{ . }}
109 | {{- end }}
110 | {{- range .Values.elasticsearch.initContainer.imagePullSecrets }}
111 | - name: {{ . }}
112 | {{- end }}
113 | {{- end -}}
114 | {{- end -}}
115 |
116 | {{- define "master-nodes" -}}
117 | {{- template "opendistro-es.fullname" . -}}-master
118 | {{- end -}}
119 |
120 | {{- define "initial-master-nodes" -}}
121 | {{- $replicas := .Values.elasticsearch.master.replicas | int }}
122 | {{- range $i, $e := untilStep 0 $replicas 1 -}}
123 | {{ template "master-nodes" $ }}-{{ $i }},
124 | {{- end -}}
125 | {{- end -}}
126 |
--------------------------------------------------------------------------------
/es_sink/es_sink/line_buffer.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | Provides a buffer object that holds log lines in Elasticsearch _bulk
8 | format. As each line is added, the buffer stores the control line
9 | as well as the log line.
10 | '''
11 |
12 | import abc
13 | import json
14 |
15 |
16 | class LineBuffer():
17 | ''' An abstract base class for buffering log lines'''
18 |
19 | __metaclass__ = abc.ABCMeta
20 |
21 | def __init__(self):
22 | self._line_buffer = list()
23 |
24 | @abc.abstractmethod
25 | def add_line_dict(self, dic):
26 | '''Children should add the log line to their internal buffer'''
27 |
28 | @abc.abstractmethod
29 | def add_line_str(self, line):
30 | '''Children should add the log line to their internal buffer'''
31 |
32 | @abc.abstractmethod
33 | def es_docs(self):
34 | '''Children should override to return a multi-line string with only the
35 | ES documents, not the control lines.'''
36 |
37 | @staticmethod
38 | def _dict_to_string(dic):
39 | ''' Encode a dict as a string. Silently swallows errors '''
40 | try:
41 | line = json.JSONEncoder().encode(dic)
42 | return line
43 | except UnicodeDecodeError as exc:
44 | msg = "unicode problem {}, skipping line: {}"
45 | print(msg.format(str(exc), dic))
46 | return ''
47 |
48 | def add_log_line(self, log_line):
49 | '''Send all log lines to this function.'''
50 | if isinstance(log_line, dict):
51 | self.add_line_dict(log_line)
52 | elif isinstance(log_line, str):
53 | self.add_line_str(log_line)
54 | else:
55 | raise ValueError('{} is neither str nor dict'.format(log_line))
56 |
57 | def clear(self):
58 | '''Empty the buffer.'''
59 | self._line_buffer = list()
60 |
61 | def es_docs_bytes(self):
62 | '''Return the byte count for the log lines in the buffer'''
63 | return len(self.es_docs().encode("utf8"))
64 |
65 | def buffer_bytes(self):
66 | '''Return the total size of the objects in the buffer. This includes
67 | the size of the control lines.'''
68 | return len(str(self).encode("utf8"))
69 |
70 | def __str__(self):
71 | return "\n".join(self._line_buffer) + "\n"
72 |
73 | def __repr__(self):
74 | return str(self)
75 |
76 |
77 | class SQSLineBuffer(LineBuffer):
78 | '''Implementation of LineBuffer to buffer data for SQS output. SQS doesn't
79 | use ES control lines, of course. The workers reading the queue need to
80 | add those lines.'''
81 | def __init__(self):
82 | super().__init__(self) # PyLint claims this is useless. Is it?
83 |
84 | def add_line_str(self, line):
85 | self._line_buffer.append(line)
86 |
87 | def add_line_dict(self, dic):
88 | line = LineBuffer._dict_to_string(dic)
89 | self._line_buffer.append(line)
90 |
91 | def es_docs(self):
92 | '''Return a flattened string with the log lines in the buffer.'''
93 | return "\n".join(self._line_buffer) + "\n"
94 |
95 | def es_doc_count(self):
96 | '''Return the count of log lines in the buffer.'''
97 | return len(self._line_buffer)
98 |
99 |
100 | class ESLineBuffer(LineBuffer):
101 | '''Send lines to this class as either dicts or strs and it will buffer
102 | a control line along with the log line. Use str() to retrieve the
103 | post body to be used with a _bulk request.'''
104 |
105 | def __init__(self, es_descriptor):
106 | '''Initialize with the ES index name root as well as the ES type. These
107 | are embedded in the control line.'''
108 | super().__init__()
109 | self.es_descriptor = es_descriptor
110 |
111 | def add_line_str(self, line):
112 | '''Buffer a log line and an indexing command for that line'''
113 | control_line = self.es_descriptor.bulk_control_line()
114 | self._line_buffer.append(control_line)
115 | self._line_buffer.append(line)
116 |
117 | def add_line_dict(self, dic):
118 | '''Buffer a log line and an indexing command for that line'''
119 | line = LineBuffer._dict_to_string(dic)
120 | self.add_line_str(line)
121 |
122 | def es_docs(self):
123 | '''Return just the log lines in the buffer.'''
124 | return "\n".join(self._line_buffer[1::2]) + "\n"
125 |
126 | def es_doc_count(self):
127 | '''Return the count of log lines in the buffer.'''
128 | return len(self._line_buffer) / 2
129 |
--------------------------------------------------------------------------------
/BGG/bgg_mapping.json:
--------------------------------------------------------------------------------
1 | {
2 | "settings": {
3 | "number_of_shards": 1,
4 | "number_of_replicas": 1
5 | },
6 | "mappings": {
7 | "properties": {
8 | "accessory": {
9 | "type": "boolean"
10 | },
11 | "alternative_names": {
12 | "type": "keyword",
13 | "fields": {
14 | "text": {
15 | "type": "text"
16 | }
17 | }
18 | },
19 | "artists": {
20 | "type": "text"
21 | },
22 | "bgg_rank": {
23 | "type": "integer"
24 | },
25 | "boardgame_rank": {
26 | "type": "integer"
27 | },
28 | "categories": {
29 | "type": "keyword"
30 | },
31 | "comments": {
32 | "type": "text"
33 | },
34 | "description": {
35 | "type": "text"
36 | },
37 | "designers": {
38 | "type": "text",
39 | "fields": {
40 | "keyword": {
41 | "type": "keyword"
42 | }
43 | }
44 | },
45 | "expansion": {
46 | "type": "boolean"
47 | },
48 | "families": {
49 | "type": "keyword",
50 | "fields": {
51 | "text": {
52 | "type": "keyword"
53 | }
54 | }
55 | },
56 | "id": {
57 | "type": "integer"
58 | },
59 | "image": {
60 | "type": "keyword"
61 | },
62 | "implementations": {
63 | "type": "keyword",
64 | "fields": {
65 | "text": {
66 | "type": "text"
67 | }
68 | }
69 | },
70 | "max_players": {
71 | "type": "integer"
72 | },
73 | "max_playing_time": {
74 | "type": "integer"
75 | },
76 | "mechanics": {
77 | "type": "keyword",
78 | "fields": {
79 | "text": {
80 | "type": "text"
81 | }
82 | }
83 | },
84 | "min_age": {
85 | "type": "integer"
86 | },
87 | "min_players": {
88 | "type": "integer"
89 | },
90 | "min_playing_time": {
91 | "type": "integer"
92 | },
93 | "name": {
94 | "type": "keyword",
95 | "fields": {
96 | "text": {
97 | "type": "text"
98 | }
99 | }
100 | },
101 | "playing_time": {
102 | "type": "integer"
103 | },
104 | "publishers": {
105 | "type": "keyword",
106 | "fields": {
107 | "text": {
108 | "type": "text"
109 | }
110 | }
111 | },
112 | "rating_average": {
113 | "type": "float"
114 | },
115 | "rating_average_weight": {
116 | "type": "float"
117 | },
118 | "rating_bayes_average": {
119 | "type": "float"
120 | },
121 | "rating_median": {
122 | "type": "float"
123 | },
124 | "rating_num_weights": {
125 | "type": "integer"
126 | },
127 | "rating_stddev": {
128 | "type": "float"
129 | },
130 | "thumbnail": {
131 | "type": "keyword"
132 | },
133 | "users_commented": {
134 | "type": "integer"
135 | },
136 | "users_owned": {
137 | "type": "integer"
138 | },
139 | "users_rated": {
140 | "type": "integer"
141 | },
142 | "versions": {
143 | "type": "keyword",
144 | "fields": {
145 | "text": {
146 | "type": "text"
147 | }
148 | }
149 | },
150 | "videos": {
151 | "type": "keyword",
152 | "fields": {
153 | "text": {
154 | "type": "text"
155 | }
156 | }
157 | },
158 | "year": {
159 | "type": "integer"
160 | }
161 | }
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/iot_device_simulator/main.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019, Amazon Web Services Inc.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
16 | Python 3
17 |
18 | Generate load against an Amazon ES endpoint. Needs lots of work to add
19 | parameterizations around endpoint, request signing, number of devices,
20 | kinds of sensors, etc. etc.
21 |
22 | Improvements:
23 | - Don't hardcode the device construction
24 | - Don't hardcode the sensors per device
25 | '''
26 |
27 | import argparse
28 | import time
29 |
30 | from device import Device
31 | from sensor import SineSensor, ConstSensor, DriftingSensor, MonotonicSensor
32 | from es_sink import flushing_buffer
33 | from es_sink.descriptor import ESDescriptor, IndexDescriptor
34 | from es_sink.es_transport import ESTransport
35 | from es_sink.es_auth import ESHttpAuth
36 |
37 | def get_args():
38 | description = 'Simulate devices with sensors and send data to Elasticsearch'
39 | parser = argparse.ArgumentParser(description=description)
40 | parser.add_argument('-d', '--devices', type=int, default=10,
41 | help='the number of devices', action='store')
42 | parser.add_argument('-s', '--samples', type=int, default=1000,
43 | help='the number of samples to Elasticsearch', action='store')
44 | parser.add_argument('-b', '--batch-size', type=int, default=1000,
45 | help='Number of log lines in each _bulk request', action='store')
46 | parser.add_argument('-e', '--endpoint', type=str, action='store',
47 | default='https://127.0.0.1:9200',
48 | help='Elasticsearch REST endpoint for _bulk requests.')
49 | parser.add_argument('--signed_requests', dest='signed_requests', default=False,
50 | action='store_true',
51 | help='''use AWS sigV4 signing for requests. Requires that '''
52 | '''you have installed and configured the AWS command '''
53 | '''line interface. Default is False.''')
54 | args = parser.parse_args()
55 | if args.devices < 0:
56 | raise ValueError('Number of devices must be positive')
57 | if args.samples < 0:
58 | raise ValueError('Number of samples must be positive')
59 | if args.batch_size < 0:
60 | raise ValueError('Batch size must be positive')
61 | descriptive_text = ('Building {} devices with 5 sensors each and sending {} '
62 | 'samples to Elasticsearch. Total of {} log lines')
63 | descriptive_text = descriptive_text.format(args.devices, args.samples,
64 | 5 * args.devices * args.samples)
65 | print(descriptive_text)
66 | return args
67 |
68 | def make_device():
69 | d = Device()
70 | d.add_sensor(ConstSensor('humidity', value=50, fuzz=20.0))
71 | d.add_sensor(DriftingSensor('drift', seed=50, threshold=10,
72 | reset_threshold=100, drift_amt=0.5))
73 | d.add_sensor(SineSensor('temperature', 43, 78, 4, fuzz=1.0))
74 | d.add_sensor(DriftingSensor('CPU', seed=50, threshold=10,
75 | reset_threshold=100, drift_amt=0.1))
76 | return d
77 |
78 |
79 | if __name__ == '__main__':
80 | args = get_args()
81 |
82 | # Create the flushing buffer
83 | index_descriptor = IndexDescriptor(es_index='logs', es_v7=True)
84 | es_descriptor = ESDescriptor(args.endpoint, index_descriptor=index_descriptor,
85 | auth=ESHttpAuth('admin', 'admin'))
86 | buffer = flushing_buffer.flushing_buffer_factory(es_descriptor,
87 | flush_trigger=args.batch_size)
88 |
89 | # Create the set of devices
90 | devices = list() # pylint: disable=invalid-name
91 | for i in range(args.devices - 1):
92 | d = make_device()
93 | devices.append(d)
94 |
95 | # Add a special device that has a "malfunctioning" CPU that ramps to 100.
96 | d = make_device()
97 | d.remove_sensor('CPU')
98 | d.add_sensor(MonotonicSensor('CPU', value=50, delta=0.1, ceiling=100,
99 | fuzz=0.01))
100 | devices.append(d)
101 |
102 | for i in range(args.samples):
103 | for device in devices:
104 | dev_report = device.report()
105 | for sens_report in dev_report:
106 | buffer.add_log_line(sens_report)
107 | time.sleep(1)
108 | buffer.flush()
109 |
--------------------------------------------------------------------------------
/es_sink/es_sink/es_transport.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | ESTransport class
8 | Wrapper around the requests library that supports sending requests to
9 | Elasticsearch.
10 |
11 | Use the signed initializer to control whether requests are signed with
12 | sigV4 auth (via the requests_aws4auth library). When requests are signed
13 | Transport gets credentials from the environment via Boto.
14 | '''
15 |
16 | import boto3
17 | import requests
18 | from requests_aws4auth import AWS4Auth
19 |
20 | from es_sink.transport_result import TransportResult
21 | from es_sink.transport_exceptions import BadHTTPMethod
22 | from es_sink.transport_utils import wall_time, valid_request_body
23 | from es_sink.es_auth import ESAuth
24 |
25 |
26 | def _get_requests_function(method):
27 | ''' Pull the right method from requests. '''
28 | try:
29 | func = getattr(requests, method)
30 | return func
31 | except AttributeError:
32 | msg = "{} not a recognized HTTP method".format(method)
33 | raise BadHTTPMethod(msg)
34 |
35 |
36 | def _send_signed(method, url, service='es', region='us-west-2', body=None):
37 | '''Internal method that uses sigV4 signing to send the request.'''
38 | credentials = boto3.Session().get_credentials()
39 | auth = AWS4Auth(credentials.access_key, credentials.secret_key, region,
40 | service, session_token=credentials.token)
41 | func = _get_requests_function(method)
42 | (result, took_time) = \
43 | wall_time(func, url, auth=auth, data=valid_request_body(body),
44 | headers={"Content-Type":"application/json"})
45 | return TransportResult(status=int(result.status_code),
46 | result_text=result.text, took_s=took_time,
47 | size=len(body))
48 |
49 |
50 | def _send_unsigned(method, url, body=None, http_auth=None):
51 | ''' Internal method to pass the request through. '''
52 | body = valid_request_body(body)
53 | func = _get_requests_function(method)
54 | if http_auth:
55 | (result, took_time) = \
56 | wall_time(func, url, data=body,
57 | headers={"Content-Type":"application/json"},
58 | auth=http_auth,
59 | verify=False)
60 | return TransportResult(status=int(result.status_code),
61 | result_text=result.text, took_s=took_time,
62 | size=len(body))
63 | (result, took_time) = \
64 | wall_time(func, url, data=body,
65 | headers={"Content-Type":"application/json"},
66 | verify=False)
67 | return TransportResult(status=int(result.status_code),
68 | result_text=result.text, took_s=took_time,
69 | size=len(body))
70 |
71 |
72 | class ESTransport():
73 | ''' Transport class, wrapping the requests library to add auth when needed
74 | and to provide a facade for Amazon ES domains and local Elasticsearch
75 | instances.'''
76 |
77 | def __init__(self, descriptor):
78 | '''A transport object to send requests to Elasticsearch. Since the class
79 | supports both Amazon ES domains and vanilla ES clusters, this needs
80 | to provide request signing as well as HTTP auth. The ESDescriptor
81 | specifies which of these to use. At present, there's no way to
82 | add http auth AND sign requests.
83 | TODO: implement lower-level request signing for signed HTTP auth
84 |
85 | descriptor.signed: Set True to use SigV4 signing only
86 | Set False for HTTP Auth or no auth
87 | descriptor.http_auth: User name, password tuple '''
88 |
89 | self._descriptor = descriptor
90 |
91 | if descriptor.is_signed() and descriptor.is_http_auth():
92 | raise BadAuth('You can\'t specify both HTTP auth and signed requests')
93 |
94 | if descriptor.is_signed() and not descriptor.region:
95 | raise ValueError('If you specify signed requests, you must also specify region')
96 |
97 | @property
98 | def is_signed(self):
99 | ''' Tracks whether to send signed requests '''
100 | return self._descriptor.is_signed()
101 |
102 | def send(self, method, url, service='es', body=''):
103 | '''Public method to dispatch between signed and unsigned.
104 |
105 | Specify the full URL, including endpoint.
106 | TODO: make the endpoint implicit, as determined by
107 | descriptor.base_url(). This might be easier, but introduces
108 | complexity in using the class (how to know how much of the URL to
109 | specify)'''
110 | if self.is_signed:
111 | return _send_signed(method, url, service, self._descriptor.region,
112 | body=body)
113 | return _send_unsigned(method, url, body=body,
114 | http_auth=self._descriptor._auth.auth_creds())
115 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/40-es-master-deploy.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: apps/v1
6 | kind: Deployment
7 | metadata:
8 | labels:
9 | component: elasticsearch
10 | role: master
11 | name: es-master
12 | namespace: elasticsearch
13 | spec:
14 | replicas: 3 # Number of Elasticsearch master nodes to deploy
15 | selector:
16 | matchLabels:
17 | component: elasticsearch
18 | role: master
19 | template:
20 | metadata:
21 | labels:
22 | component: elasticsearch
23 | role: master
24 | annotations:
25 | iam.amazonaws.com/role:
26 | spec:
27 | # Add toleration for not scheduling on dedicated node
28 | tolerations:
29 | - key: dedicated
30 | value: "true"
31 | effect: NoSchedule
32 | # Anti-affinity to disallow deploying client and master nodes on the same worker node
33 | affinity:
34 | podAntiAffinity:
35 | requiredDuringSchedulingIgnoredDuringExecution:
36 | - topologyKey: "kubernetes.io/hostname"
37 | labelSelector:
38 | matchLabels:
39 | component: elasticsearch
40 | role: master
41 | # Node Affinity to attract this Deployment's pods to a specific set of worker nodes
42 | nodeAffinity:
43 | requiredDuringSchedulingIgnoredDuringExecution:
44 | nodeSelectorTerms:
45 | - matchExpressions:
46 | - key: type # Replace this with corresponding worker node label's key
47 | operator: In
48 | values:
49 | - general # Replace this with corresponding worker node label's value
50 | initContainers:
51 | - name: init-sysctl
52 | image: busybox:1.27.2
53 | command:
54 | - sysctl
55 | - -w
56 | - vm.max_map_count=262144
57 | securityContext:
58 | privileged: true
59 | containers:
60 | - name: elasticsearch
61 | env:
62 | - name: CLUSTER_NAME
63 | value: logs
64 | - name: NUMBER_OF_MASTERS
65 | value: "3"
66 | - name: NODE_MASTER
67 | value: "true"
68 | - name: NODE_INGEST
69 | value: "false"
70 | - name: NODE_DATA
71 | value: "false"
72 | - name: NETWORK_HOST
73 | value: "0.0.0.0"
74 | - name: TRANSPORT_TLS_PEM_PASS
75 | value: "REPLACE_WITH_TLS_PRIVATE_KEY_PASSPHRASE" # Replace this with the passphrase for the TLS private key
76 | - name: HTTP_TLS_PEM_PASS
77 | value: "REPLACE_WITH_TLS_PRIVATE_KEY_PASSPHRASE" # Replace this with the passphrase for the TLS private key
78 | - name: NODE_NAME
79 | valueFrom:
80 | fieldRef:
81 | fieldPath: metadata.name
82 | - name: DISCOVERY_SERVICE
83 | value: elasticsearch-discovery
84 | - name: KUBERNETES_NAMESPACE
85 | valueFrom:
86 | fieldRef:
87 | fieldPath: metadata.namespace
88 | - name: PROCESSORS
89 | valueFrom:
90 | resourceFieldRef:
91 | resource: limits.cpu
92 | - name: ES_JAVA_OPTS
93 | value: -Xms6g -Xmx6g
94 | resources:
95 | requests:
96 | memory: 12Gi
97 | cpu: 2
98 | limits:
99 | memory: 12Gi
100 | cpu: 2
101 | livenessProbe:
102 | tcpSocket:
103 | port: transport
104 | initialDelaySeconds: 60
105 | periodSeconds: 10
106 | # Official Image from Open Distro Team
107 | image: amazon/opendistro-for-elasticsearch:0.9.0
108 | imagePullPolicy: Always
109 | ports:
110 | - containerPort: 9300
111 | name: transport
112 | - containerPort: 9200
113 | name: http
114 | - containerPort: 9600
115 | name: metrics
116 | volumeMounts:
117 | - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
118 | name: config
119 | subPath: elasticsearch.yml
120 | - mountPath: /usr/share/elasticsearch/config/logging.yml
121 | name: config
122 | subPath: logging.yml
123 | - mountPath: /usr/share/elasticsearch/config/elk-crt.pem
124 | name: certs
125 | subPath: elk-crt.pem
126 | readOnly: true
127 | - mountPath: /usr/share/elasticsearch/config/elk-key.pem
128 | name: certs
129 | subPath: elk-key.pem
130 | readOnly: true
131 | - mountPath: /usr/share/elasticsearch/config/elk-root-ca.pem
132 | name: certs
133 | subPath: elk-root-ca.pem
134 | readOnly: true
135 | - mountPath: /usr/share/elasticsearch/config/admin-crt.pem
136 | name: certs
137 | subPath: admin-crt.pem
138 | readOnly: true
139 | - mountPath: /usr/share/elasticsearch/config/admin-key.pem
140 | name: certs
141 | subPath: admin-key.pem
142 | readOnly: true
143 | - mountPath: /usr/share/elasticsearch/config/admin-root-ca.pem
144 | name: certs
145 | subPath: admin-root-ca.pem
146 | readOnly: true
147 | volumes:
148 | - name: config
149 | configMap:
150 | name: elasticsearch
151 | - name: certs
152 | secret:
153 | secretName: elasticsearch-tls-data
154 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/50-es-client-deploy.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: apps/v1
6 | kind: Deployment
7 | metadata:
8 | labels:
9 | component: elasticsearch
10 | role: client
11 | name: es-client
12 | namespace: elasticsearch
13 | spec:
14 | replicas: 2
15 | selector:
16 | matchLabels:
17 | component: elasticsearch
18 | role: client
19 | template:
20 | metadata:
21 | labels:
22 | component: elasticsearch
23 | role: client
24 | annotations:
25 | iam.amazonaws.com/role:
26 | spec:
27 | serviceAccountName: elasticsearch
28 | # Add toleration for not scheduling on dedicated node
29 | tolerations:
30 | - key: dedicated
31 | value: "true"
32 | effect: NoSchedule
33 | # Weighted anti-affinity to disallow deploying client node to the same worker node as master node
34 | affinity:
35 | podAntiAffinity:
36 | preferredDuringSchedulingIgnoredDuringExecution:
37 | - weight: 1
38 | podAffinityTerm:
39 | topologyKey: "kubernetes.io/hostname"
40 | labelSelector:
41 | matchLabels:
42 | component: elasticsearch
43 | role: client
44 | # Node Affinity to attract this Deployment's pods to a specific set of worker nodes
45 | nodeAffinity:
46 | requiredDuringSchedulingIgnoredDuringExecution:
47 | nodeSelectorTerms:
48 | - matchExpressions:
49 | - key: type # Replace this with corresponding worker node label's key
50 | operator: In
51 | values:
52 | - general # Replace this with corresponding worker node label's value
53 | initContainers:
54 | - name: init-sysctl
55 | image: busybox:1.27.2
56 | command:
57 | - sysctl
58 | - -w
59 | - vm.max_map_count=262144
60 | securityContext:
61 | privileged: true
62 | containers:
63 | - name: elasticsearch
64 | env:
65 | - name: CLUSTER_NAME
66 | value: logs
67 | - name: NUMBER_OF_MASTERS
68 | value: "3"
69 | - name: NODE_MASTER
70 | value: "false"
71 | - name: NODE_INGEST
72 | value: "true"
73 | - name: NODE_DATA
74 | value: "false"
75 | - name: TRANSPORT_TLS_PEM_PASS
76 | value: "REPLACE_WITH_TLS_PRIVATE_KEY_PASSPHRASE" # Replace this with the passphrase for the TLS private key
77 | - name: HTTP_TLS_PEM_PASS
78 | value: "REPLACE_WITH_TLS_PRIVATE_KEY_PASSPHRASE" # Replace this with the passphrase for the TLS private key
79 | - name: NETWORK_HOST
80 | value: "_eth0_"
81 | - name: NODE_NAME
82 | valueFrom:
83 | fieldRef:
84 | fieldPath: metadata.name
85 | - name: DISCOVERY_SERVICE
86 | value: elasticsearch-discovery
87 | - name: KUBERNETES_NAMESPACE
88 | valueFrom:
89 | fieldRef:
90 | fieldPath: metadata.namespace
91 | - name: PROCESSORS
92 | valueFrom:
93 | resourceFieldRef:
94 | resource: limits.cpu
95 | - name: ES_JAVA_OPTS
96 | value: -Xms6g -Xmx6g
97 | resources:
98 | requests:
99 | cpu: 2
100 | memory: 12Gi
101 | limits:
102 | cpu: 2
103 | memory: 12Gi
104 | # Official Image from Open Distro Team
105 | image: amazon/opendistro-for-elasticsearch:0.9.0
106 | imagePullPolicy: Always
107 | ports:
108 | - containerPort: 9200
109 | name: http
110 | - containerPort: 9300
111 | name: transport
112 | - containerPort: 9600
113 | name: metrics
114 | livenessProbe:
115 | tcpSocket:
116 | port: transport
117 | initialDelaySeconds: 60
118 | volumeMounts:
119 | - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
120 | name: config
121 | subPath: elasticsearch.yml
122 | - mountPath: /usr/share/elasticsearch/config/logging.yml
123 | name: config
124 | subPath: logging.yml
125 | - mountPath: /usr/share/elasticsearch/config/elk-crt.pem
126 | name: certs
127 | subPath: elk-crt.pem
128 | readOnly: true
129 | - mountPath: /usr/share/elasticsearch/config/elk-key.pem
130 | name: certs
131 | subPath: elk-key.pem
132 | readOnly: true
133 | - mountPath: /usr/share/elasticsearch/config/elk-root-ca.pem
134 | name: certs
135 | subPath: elk-root-ca.pem
136 | readOnly: true
137 | - mountPath: /usr/share/elasticsearch/config/admin-crt.pem
138 | name: certs
139 | subPath: admin-crt.pem
140 | readOnly: true
141 | - mountPath: /usr/share/elasticsearch/config/admin-key.pem
142 | name: certs
143 | subPath: admin-key.pem
144 | readOnly: true
145 | - mountPath: /usr/share/elasticsearch/config/admin-root-ca.pem
146 | name: certs
147 | subPath: admin-root-ca.pem
148 | readOnly: true
149 | volumes:
150 | - name: config
151 | configMap:
152 | name: elasticsearch
153 | - name: certs
154 | secret:
155 | secretName: elasticsearch-tls-data
156 |
--------------------------------------------------------------------------------
/BGG/bgg.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2020 Amazon Web Services
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 |
24 | '''
25 | BGG - Download and search board game data from BoardgameGeek.com
26 |
27 | This sample application uses the boardgamegeek2 library (available on PyPi) to
28 | download names, ids, and full descriptions for board games from
29 | boardgamegeek.com's API. It uses the es_sink library to send the board game
30 | details to an Open Distro for Elasticsearch cluster running on localhost. Of
31 | course, you can change that and send it to any Elasticsearch cluster.
32 |
33 | Prerequisite
34 |
35 | You must download and install the es_sink library, available in this repo.
36 |
37 | Usage
38 |
39 | First, run bgg.py to download names and details from boardgamegeek.
40 |
41 | ```$ python bgg.py --names --ids --details-file --download-ids --download-details```
42 |
43 | The ```--names``` parameter lets you specify a file that will hold the names
44 | from boardgamegeek, the ```--ids``` parameter lets you specify a file to
45 | hold the boardgamegeek ids, and the ```--details-file``` parameter lets you
46 | specify a file to hold the game details. Note: these files must be different.
47 |
48 | When you're ready to upload to Elasticsearch, run
49 |
50 | ```$ python bgg.py --names --ids --details-file --send-to-es```
51 |
52 | There is currently no command line support for changing the endpoint. You can
53 | edit the ESDescriptor in es_manager.py to change the destination location,
54 | authentication method, and ES version. See the __init__ function in
55 | es_manager.py. See the es_sink library for details on how to change the
56 | parameters.
57 | '''
58 |
59 | import argparse
60 | import json
61 |
62 |
63 | from bgg_manager import BGGManager
64 | from es_manager import ESManager
65 |
66 |
67 | def parse_args():
68 | '''Parse command line arguments
69 | --download_ids Add to the command line to use the bgg APIs
70 | to get game ids
71 | --download_details Add to the command line to use the bgg APIs
72 | to use names and ids to retrieve game details
73 | --send-to-es Add to the command line to send downloaded
74 | game details to Elasticsearch
75 | --ids Specify a file name for pickled ids structure
76 | --names Specify a file name for pickled names
77 | --details-file Specify a file name for game details
78 | '''
79 | parser = argparse.ArgumentParser()
80 | parser.add_argument('--download_ids', action='store_true', default=False,
81 | help='''Use the API to scan for games first''')
82 | parser.add_argument('--download_details', action='store_true', default=False,
83 | help='''Use the API to download game details''')
84 | parser.add_argument('--ids', action='store', default='',
85 | help='''Source file for pickled ids''')
86 | parser.add_argument('--names', action='store', default='',
87 | help='''Source file for pickled names''')
88 | parser.add_argument('--details-file', action='store', default='details.txt',
89 | help='''Destination file for downloaded game details''')
90 | parser.add_argument('--send-to-es', action='store_true', default=False,
91 | help='''Use the API to download game details''')
92 |
93 | return parser.parse_args()
94 |
95 | if __name__ == '__main__':
96 | '''Main entry. Tries to download or load game names, ids, and details. If
97 | specified, it sends the details to Elasticsearch. '''
98 | args = parse_args()
99 | if not args.download_ids:
100 | if not args.names or not args.ids:
101 | err = '''If you're not downloading game ids and names, you must '''
102 | err += '''specify both a --names file and an --ids file'''
103 | raise ValueError(err)
104 | bgg_manager = BGGManager(args.ids, args.names, args.details_file)
105 | print("Loading game names and ids")
106 | bgg_manager.load_game_names_and_ids(download=args.download_ids)
107 | print("Loading details")
108 | bgg_manager.load_game_details(download=args.download_details)
109 |
110 | esm = ESManager()
111 |
112 | if args.send_to_es:
113 | print('Deleting games index')
114 | esm.remove_index('games')
115 | print('Done')
116 |
117 | mapping = ''
118 | with open('bgg_mapping.json', 'r') as mapping_file:
119 | mapping = json.load(mapping_file)
120 |
121 | print('Creating games index')
122 | esm.create_index('games', json.dumps(mapping))
123 |
124 | print("Sending data")
125 | for detail in bgg_manager.game_details():
126 | esm.add_document(detail)
127 | esm.flush() # Final flush to empty any stragglers.
128 |
129 | print("Done")
130 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/elasticsearch/70-es-data-sts.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: MIT-0
3 |
4 | ---
5 | apiVersion: apps/v1
6 | kind: StatefulSet
7 | metadata:
8 | labels:
9 | component: elasticsearch
10 | role: data
11 | name: es-data
12 | namespace: elasticsearch
13 | spec:
14 | serviceName: elasticsearch-data
15 | replicas: 3
16 | selector:
17 | matchLabels:
18 | component: elasticsearch
19 | role: data
20 | template:
21 | metadata:
22 | labels:
23 | component: elasticsearch
24 | role: data
25 | annotations:
26 | iam.amazonaws.com/role:
27 | spec:
28 | # Add toleration for not scheduling on dedicated node
29 | tolerations:
30 | - key: dedicated
31 | value: "true"
32 | effect: NoSchedule
33 | initContainers:
34 | - name: init-sysctl
35 | image: busybox:1.27.2
36 | command:
37 | - sysctl
38 | - -w
39 | - vm.max_map_count=262144
40 | securityContext:
41 | privileged: true
42 | - name: fixmount
43 | command: [ 'sh', '-c', 'chown -R 1000:1000 /usr/share/elasticsearch/data' ]
44 | image: busybox
45 | volumeMounts:
46 | - mountPath: /usr/share/elasticsearch/data
47 | name: data
48 | # Weighted anti-affinity to disallow deploying client node to the same worker node as master node
49 | affinity:
50 | podAntiAffinity:
51 | preferredDuringSchedulingIgnoredDuringExecution:
52 | - weight: 1
53 | podAffinityTerm:
54 | topologyKey: "kubernetes.io/hostname"
55 | labelSelector:
56 | matchLabels:
57 | component: elasticsearch
58 | role: data
59 | # Node Affinity to attract this Deployment's pods to a specific set of worker nodes
60 | nodeAffinity:
61 | requiredDuringSchedulingIgnoredDuringExecution:
62 | nodeSelectorTerms:
63 | - matchExpressions:
64 | - key: type # Replace this with corresponding worker node label's key
65 | operator: In
66 | values:
67 | - general # Replace this with corresponding worker node label's value
68 | serviceAccountName: elasticsearch
69 | containers:
70 | - name: elasticsearch
71 | env:
72 | - name: CLUSTER_NAME
73 | value: logs
74 | - name: NODE_MASTER
75 | value: "false"
76 | - name: NODE_INGEST
77 | value: "false"
78 | - name: NETWORK_HOST
79 | value: "_eth0_"
80 | - name: TRANSPORT_TLS_PEM_PASS
81 | value: "REPLACE_WITH_TLS_PRIVATE_KEY_PASSPHRASE" # Replace this with the passphrase for the TLS private key
82 | - name: HTTP_TLS_PEM_PASS
83 | value: "REPLACE_WITH_TLS_PRIVATE_KEY_PASSPHRASE" # Replace this with the passphrase for the TLS private key
84 | - name: NUMBER_OF_MASTERS
85 | value: "3"
86 | - name: NODE_NAME
87 | valueFrom:
88 | fieldRef:
89 | fieldPath: metadata.name
90 | - name: DISCOVERY_SERVICE
91 | value: elasticsearch-discovery
92 | - name: KUBERNETES_NAMESPACE
93 | valueFrom:
94 | fieldRef:
95 | fieldPath: metadata.namespace
96 | - name: NODE_DATA
97 | value: "true"
98 | - name: PROCESSORS
99 | valueFrom:
100 | resourceFieldRef:
101 | resource: limits.cpu
102 | - name: ES_JAVA_OPTS
103 | value: -Xms8g -Xmx8g
104 | # Official Image from Open Distro Team
105 | image: amazon/opendistro-for-elasticsearch:0.9.0
106 | imagePullPolicy: Always
107 | # only publish the transport port
108 | ports:
109 | - containerPort: 9300
110 | name: transport
111 | resources:
112 | requests:
113 | cpu: 2
114 | memory: 16Gi
115 | limits:
116 | cpu: 2
117 | memory: 16Gi
118 | livenessProbe:
119 | tcpSocket:
120 | port: transport
121 | initialDelaySeconds: 60
122 | periodSeconds: 10
123 | volumeMounts:
124 | - mountPath: /usr/share/elasticsearch/data
125 | name: data
126 | - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
127 | name: config
128 | subPath: elasticsearch.yml
129 | - mountPath: /usr/share/elasticsearch/config/logging.yml
130 | name: config
131 | subPath: logging.yml
132 | - mountPath: /usr/share/elasticsearch/config/elk-crt.pem
133 | name: certs
134 | subPath: elk-crt.pem
135 | readOnly: true
136 | - mountPath: /usr/share/elasticsearch/config/elk-key.pem
137 | name: certs
138 | subPath: elk-key.pem
139 | readOnly: true
140 | - mountPath: /usr/share/elasticsearch/config/elk-root-ca.pem
141 | name: certs
142 | subPath: elk-root-ca.pem
143 | readOnly: true
144 | - mountPath: /usr/share/elasticsearch/config/admin-crt.pem
145 | name: certs
146 | subPath: admin-crt.pem
147 | readOnly: true
148 | - mountPath: /usr/share/elasticsearch/config/admin-key.pem
149 | name: certs
150 | subPath: admin-key.pem
151 | readOnly: true
152 | - mountPath: /usr/share/elasticsearch/config/admin-root-ca.pem
153 | name: certs
154 | subPath: admin-root-ca.pem
155 | readOnly: true
156 | volumes:
157 | - name: config
158 | configMap:
159 | name: elasticsearch
160 | - name: certs
161 | secret:
162 | secretName: elasticsearch-tls-data
163 | volumeClaimTemplates:
164 | - metadata:
165 | name: data
166 | spec:
167 | accessModes: [ ReadWriteOnce ]
168 | storageClassName: elk-gp2
169 | resources:
170 | requests:
171 | storage: 2Ti
172 |
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/kibana/kibana-deployment.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if .Values.kibana.enabled }}
16 | apiVersion: extensions/v1beta1
17 | kind: Deployment
18 | metadata:
19 | labels:
20 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
21 | name: {{ template "opendistro-es.fullname" . }}-kibana
22 | spec:
23 | replicas: {{ .Values.kibana.replicas }}
24 | template:
25 | metadata:
26 | labels:
27 | {{ include "opendistro-es.labels.standard" . | indent 8 }}
28 | app: {{ template "opendistro-es.fullname" . }}-kibana
29 | annotations:
30 | {{/* This forces a restart if the secret config has changed */}}
31 | {{- if .Values.kibana.config }}
32 | checksum/config: {{ include (print .Template.BasePath "/kibana/kibana-config-secret.yaml") . | sha256sum | trunc 63 }}
33 | {{- end }}
34 | spec:
35 | {{- include "opendistro-es.imagePullSecrets" . | indent 6 }}
36 | containers:
37 | - env:
38 | - name: CLUSTER_NAME
39 | value: {{ .Values.global.clusterName }}
40 | # If no custom configuration provided, default to internal DNS
41 | {{- if not .Values.kibana.config }}
42 | - name: ELASTICSEARCH_HOSTS
43 | value: https://{{ template "opendistro-es.fullname" . }}-client-service:9200
44 | {{- end }}
45 | {{- if .Values.kibana.elasticsearchAccount.secret }}
46 | - name: ELASTICSEARCH_USERNAME
47 | valueFrom:
48 | secretKeyRef:
49 | name: {{ .Values.kibana.elasticsearchAccount.secret }}
50 | key: username
51 | - name: ELASTICSEARCH_PASSWORD
52 | valueFrom:
53 | secretKeyRef:
54 | name: {{ .Values.kibana.elasticsearchAccount.secret }}
55 | key: password
56 | {{- if and .Values.kibana.elasticsearchAccount.keyPassphrase.enabled }}
57 | - name: KEY_PASSPHRASE
58 | valueFrom:
59 | secretKeyRef:
60 | name: {{ .Values.kibana.elasticsearchAccount.secret }}
61 | key: keypassphrase
62 | # 32-character random string to be used as cookie password by security plugin
63 | {{- end }}
64 | - name: COOKIE_PASS
65 | valueFrom:
66 | secretKeyRef:
67 | name: {{ .Values.kibana.elasticsearchAccount.secret }}
68 | key: cookie
69 | {{- end }}
70 | {{- if .Values.kibana.extraEnvs }}
71 | {{ toYaml .Values.kibana.extraEnvs | indent 8 }}
72 | {{- end }}
73 | image: {{ .Values.kibana.image }}:{{ .Values.kibana.imageTag }}
74 | {{- with .Values.kibana.readinessProbe}}
75 | readinessProbe:
76 | {{ toYaml . | indent 10 }}
77 | {{- end }}
78 | {{- with .Values.kibana.livenessProbe}}
79 | livenessProbe:
80 | {{ toYaml . | indent 10 }}
81 | {{- end }}
82 | resources:
83 | {{ toYaml .Values.kibana.resources | indent 12 }}
84 | name: {{ template "opendistro-es.fullname" . }}-kibana
85 | volumeMounts:
86 | {{- if .Values.kibana.config }}
87 | - mountPath: {{ .Values.kibana.configDirectory }}/kibana.yml
88 | name: config
89 | subPath: kibana.yml
90 | {{- end }}
91 | {{- if and .Values.kibana.ssl.kibana.enabled .Values.kibana.ssl.kibana.existingCertSecret }}
92 | - mountPath: {{ .Values.kibana.certsDirectory }}/kibana-crt.pem
93 | name: kibana-certs
94 | subPath: kibana-crt.pem
95 | - mountPath: {{ .Values.kibana.certsDirectory }}/kibana-key.pem
96 | name: kibana-certs
97 | subPath: kibana-key.pem
98 | - mountPath: {{ .Values.kibana.certsDirectory }}/kibana-root-ca.pem
99 | name: kibana-certs
100 | subPath: kibana-root-ca.pem
101 | {{- end }}
102 | {{- if and .Values.kibana.ssl.elasticsearch.enabled .Values.kibana.ssl.elasticsearch.existingCertSecret }}
103 | - mountPath: {{ .Values.kibana.certsDirectory }}/elk-rest-crt.pem
104 | name: elasticsearch-certs
105 | subPath: elk-rest-crt.pem
106 | - mountPath: {{ .Values.kibana.certsDirectory }}/elk-rest-key.pem
107 | name: elasticsearch-certs
108 | subPath: elk-rest-key.pem
109 | - mountPath: {{ .Values.kibana.certsDirectory }}/elk-rest-root-ca.pem
110 | name: elasticsearch-certs
111 | subPath: elk-rest-root-ca.pem
112 | {{- end }}
113 | ports:
114 | - containerPort: {{ .Values.kibana.port }}
115 | serviceAccountName: {{ template "opendistro-es.kibana.serviceAccountName" . }}
116 | volumes:
117 | {{- if .Values.kibana.config }}
118 | - name: config
119 | secret:
120 | secretName: {{ template "opendistro-es.fullname" . }}-kibana-config
121 | {{- end }}
122 | {{- if and .Values.kibana.ssl.kibana.enabled .Values.kibana.ssl.kibana.existingCertSecret }}
123 | - name: kibana-certs
124 | secret:
125 | secretName: {{ .Values.kibana.ssl.kibana.existingCertSecret }}
126 | {{- end }}
127 | {{- if and .Values.kibana.ssl.elasticsearch.enabled .Values.kibana.ssl.elasticsearch.existingCertSecret }}
128 | - name: elasticsearch-certs
129 | secret:
130 | secretName: {{ .Values.kibana.ssl.elasticsearch.existingCertSecret }}
131 | {{- end }}
132 | {{- with .Values.kibana.nodeSelector }}
133 | nodeSelector:
134 | {{ toYaml . | indent 8 }}
135 | {{- end }}
136 | {{- with .Values.kibana.tolerations }}
137 | tolerations:
138 | {{ toYaml . | indent 8 }}
139 | {{- end }}
140 | restartPolicy: Always
141 | {{ end }}
142 |
--------------------------------------------------------------------------------
/iot_device_simulator/sensor.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019, Amazon Web Services Inc.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
16 | Python 3
17 |
18 | Sensors! You want 'em, we got 'em
19 | '''
20 |
21 | from abc import ABCMeta, abstractmethod
22 | from datetime import datetime
23 | import math
24 | import random
25 | import time
26 | import uuid
27 | from pytz import timezone
28 |
29 | class Sensor():
30 | '''Abstract class for all sensors'''
31 |
32 | __metaclass__ = ABCMeta
33 |
34 | def __init__(self, value_name, target_tz='US/Pacific'):
35 | self.sensor_id = "Sensor_%s" % str(uuid.uuid1())
36 | self.value_name = value_name
37 | self.timezone = timezone(target_tz)
38 |
39 | def now_local(self):
40 | '''Return the current time in PST timezone'''
41 | return datetime.now(timezone('UTC')).replace(microsecond=0).astimezone(self.timezone)
42 |
43 | def get_value_name(self):
44 | return self.value_name
45 |
46 | @abstractmethod
47 | def report(self):
48 | ''' Return a dict with timestamp and value'''
49 |
50 |
51 | class SineSensor(Sensor):
52 | '''Reports a sin wave from min to max with sigfigs representing
53 | how quickly the value changes.
54 | TODO: timestamp belongs on the device'''
55 | def __init__(self, value_name, bottom=0, top=0, sigfigs=0, fuzz=0,
56 | target_tz='US/Pacific'):
57 | super(SineSensor, self).__init__(value_name, target_tz)
58 | self.bottom = bottom
59 | self.top = top
60 | self.sigfigs = sigfigs
61 | self.fuzz = fuzz
62 |
63 | def value(self):
64 | '''Computes the value based on the current timestamp. Maybe better to compute
65 | off the last value'''
66 | divisor = 10.0**self.sigfigs
67 | one_to_n = int(time.time()) % divisor + 1
68 | mult_pct = one_to_n / divisor
69 | total_domain = 4 * math.pi
70 | sin = 1.0 + math.sin(total_domain * mult_pct)
71 | value_range = self.top - self.bottom
72 | return (self.bottom + (sin / 2.0) * value_range) + \
73 | (self.fuzz * random.random())
74 |
75 | def report(self):
76 | data_dict = {'@timestamp': self.now_local().isoformat(),
77 | 'sensor_id': self.sensor_id,
78 | self.value_name: self.value()}
79 | return data_dict
80 |
81 |
82 | class ConstSensor(Sensor):
83 | '''A sensor that reports a constant value. Add fuzz for variance
84 | TODO: timestamp belongs on the device'''
85 | def __init__(self, value_name, value=0, fuzz=0, target_tz='US/Pacific'):
86 | super(ConstSensor, self).__init__(value_name, target_tz)
87 | self.cur_value = value
88 | self.fuzz = fuzz
89 | def value(self):
90 | '''The value is a constant. Just add fuzz.'''
91 | return self.cur_value + random.random() * self.fuzz
92 | def report(self):
93 | return {
94 | '@timestamp': self.now_local().isoformat(),
95 | 'sensor_id': self.sensor_id,
96 | self.value_name: self.value()
97 | }
98 |
99 |
100 | class DriftingSensor(ConstSensor):
101 | '''A constant sensor that drifts randomly over time. Applies
102 | a random drift ranging from -drift_amt to +drift_amt when
103 | a random * the amount of seconds since the last drift is above
104 | threshold. '''
105 | def __init__(self, value_name, seed=0, threshold=0, drift_amt=0,
106 | reset_threshold=0, fuzz=0, target_tz='US/Pacific'):
107 | super(DriftingSensor, self).__init__(value_name,
108 | value=seed,
109 | fuzz=fuzz,
110 | target_tz=target_tz)
111 | self.initial_value = seed
112 | self.threshold = threshold
113 | self.reset_threshold = reset_threshold
114 | self.drift_amt = drift_amt
115 | self.last_drift = time.time()
116 |
117 | def value(self):
118 | '''The value drifts by a random amount between -drift_amt and +drift_amt
119 | within a random amount of time. If reset_threshold is exceeded, then
120 | the value returns to its initial point. Fuzz also applies.'''
121 | time_elapsed = time.time() - self.last_drift
122 | if random.random() * time_elapsed > self.reset_threshold:
123 | self.cur_value = self.initial_value
124 | if random.random() * time_elapsed > self.threshold:
125 | self.cur_value += random.uniform(-self.drift_amt, self.drift_amt)
126 | self.last_drift = time.time()
127 | return super(DriftingSensor, self).value()
128 | def report(self):
129 | return {
130 | '@timestamp': self.now_local().isoformat(),
131 | 'sensor_id': self.sensor_id,
132 | self.value_name: self.value()
133 | }
134 |
135 |
136 | class MonotonicSensor(Sensor):
137 | '''A sensor that reports a constant value. Add fuzz for variance
138 | TODO: timestamp belongs on the device'''
139 | def __init__(self, value_name, value=0, delta=0, fuzz=0,
140 | ceiling=None, target_tz='US/Pacific'):
141 | super(MonotonicSensor, self).__init__(value_name, target_tz)
142 | self.cur_value = value
143 | self.delta = delta
144 | self.fuzz = fuzz
145 | self.ceiling = ceiling
146 | def value(self):
147 | '''The value is a constant. Just add fuzz.'''
148 | if self.ceiling and self.cur_value >= self.ceiling:
149 | return self.cur_value
150 | self.cur_value = self.cur_value + self.delta + random.random() * self.fuzz
151 | return self.cur_value
152 | def report(self):
153 | return {
154 | '@timestamp': self.now_local().isoformat(),
155 | 'sensor_id': self.sensor_id,
156 | self.value_name: self.value()
157 | }
158 |
--------------------------------------------------------------------------------
/pa-to-es/main.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | SPDX-License-Identifier: MIT-0
4 |
5 | This walks all of the combinations of metrics, dimensions, and aggregations.
6 | METRICS - contains descriptions of the metric to be pulled and the dimensions
7 | for that metric. See also the docs here:
8 | https://opendistro.github.io/for-elasticsearch-docs/docs/pa/reference/.
9 | '''
10 |
11 | import argparse
12 | from datetime import datetime
13 | import json
14 |
15 | import metric_descriptions
16 | from node_tracker import NodeTracker
17 | from pytz import timezone
18 | import requests
19 | from result_parser import ResultParser
20 |
21 |
22 | class MetricGatherer():
23 | ''' Use this class to retrieve metrics from Open Distro's Performance
24 | Analyzer. Call get_all_metrics() to receive a list of ES docs. '''
25 |
26 | def __init__(self, args):
27 | self.node_tracker = NodeTracker(args)
28 | self.args = args
29 |
30 | def to_url_params(self, metric_description):
31 | '''Converts a metric description into the corresponding URL params'''
32 | return "metrics={}&dim={}&agg={}&nodes=all".format(
33 | metric_description.name, ",".join(metric_description.dimensions),
34 | metric_description.agg)
35 |
36 | def get_metric(self, metric_description):
37 | ''' Retrieves data for the metric represented by metric_description from
38 | the Performance Analyzer API. Returns the full result from the
39 | HTTP Requests library. '''
40 | BASE_URL = 'http://{}:9600/_opendistro/_performanceanalyzer/metrics?'
41 | BASE_URL = BASE_URL.format(self.args.endpoint)
42 | url = "{}{}".format(BASE_URL, self.to_url_params(metric_description))
43 | return requests.get(url)
44 |
45 | def get_all_metrics(self):
46 | ''' Loops through all the metric descriptions, sending one at a time,
47 | parsing the results, and returning a list of dicts, each one
48 | representing one future Elasticsearch document. '''
49 | docs = []
50 | for metric in metric_descriptions.get_working_metric_descriptions():
51 | result = self.get_metric(metric)
52 | if result.status_code != 200:
53 | print("FAIL", metric, '\n', result.text)
54 | else:
55 | rp = ResultParser(metric, result.text, self.node_tracker)
56 | for doc in rp.records():
57 | docs.append(doc)
58 | return docs
59 |
60 |
61 | class MetricWriter():
62 | ''' Use this class to send documents in bulk to Elasticsearch'''
63 |
64 | def __init__(self, args):
65 | ''' Recieves the command-line args, which must include an index root,
66 | and an ES type. '''
67 | self.index_name = args.index_name
68 | self.index_type = args.index_type
69 | self.seven = args.seven
70 | self.endpoint = args.endpoint
71 |
72 | def now_pst(self):
73 | '''Return the current time in PST timezone'''
74 | ''' TODO: This should use the timezone of the current host or UTC.'''
75 | now_utc = datetime.now(timezone('UTC'))
76 | return now_utc.astimezone(timezone('US/Pacific'))
77 |
78 | def put_doc_batches(self, docs):
79 | ''' Takes a list of Elasticsearch documents, interleaves the control
80 | lines and sends them via the _bulk API.'''
81 | batch = []
82 | for doc in docs:
83 | ''' It would be better to take the index name from the doc's
84 | timestamp. Otherwise weird stuff happens at midnight.'''
85 | index_name = "{}-{}".format(self.index_name,
86 | self.now_pst().strftime("%Y.%m.%d"))
87 | if self.seven:
88 | control_line = '{{"index" : {{ "_index" : "{}" }} }}'
89 | control_line = control_line.format(index_name)
90 | else:
91 | control_line = '{{"index" : {{ "_index" : "{}", "_type": "{}" }} }}'
92 | control_line = control_line.format(index_name, self.index_type)
93 |
94 | batch.append(control_line)
95 | batch.append(json.JSONEncoder().encode(doc))
96 |
97 | bulk = '\n'.join(batch) + '\n'
98 | print("Sending batch of {} characters".format(len(bulk)))
99 | result = requests.post('https://{}:9200/_bulk'.format(self.endpoint),
100 | data=bulk,
101 | headers={'Content-Type':'application/json'},
102 | ### HACK ALERT !!! TODO TODO TODO ###
103 | auth=('admin', 'admin'),
104 | verify=False)
105 | print('Sent batch', result.status_code)
106 |
107 | def get_args():
108 | ''' Parse command line arguments '''
109 | description = 'Send performance data from Open Distro for Elasticsearch to Elasticsearch'
110 | parser = argparse.ArgumentParser(description=description)
111 | parser.add_argument('-i', '--index-name', type=str, default='pa',
112 | help='root string for the index name for performance indexes',
113 | action='store')
114 | parser.add_argument('-t', '--index-type', type=str, default='log',
115 | help='root string for the index type for performance indexes',
116 | action='store')
117 | parser.add_argument('--seven', default=False, action='store_true',
118 | help='send data to ES 7 (removes type)')
119 | parser.add_argument('-e', '--endpoint', default='localhost', type=str,
120 | help='the Open Distro for Elasticsearch endpoint',
121 | action='store')
122 | args = parser.parse_args()
123 | return args
124 |
125 | if __name__ == '__main__':
126 | ''' This is the main function for the sample code provided.
127 |
128 | python3 main.py
129 |
130 | to run it. You can optionally set the ES index name (a timestamp is added
131 | to the --index-name, rolling over daily) and ES type via command-line. For
132 | Elasticsearch version 7 and beyond, set the --seven flag to prevent
133 | use of _types.
134 |
135 | It's a simple-minded, not-very-efficient loop that pulls all metrics
136 | formats them, and pushes to Elasticsearch.
137 |
138 | On my Mac, this loop takes ~8 seconds. Ideally it would be < 5 seconds.
139 | Performance Analyzer aggregates across 5 second intervals, so this will
140 | miss some data points. The obvious fix for that is to get metrics in
141 | bulk, rather than one at a time.'''
142 | while 1:
143 | print('Gathering docs')
144 | docs = MetricGatherer(get_args()).get_all_metrics()
145 | print('Sending docs: ', len(docs))
146 | MetricWriter(get_args()).put_doc_batches(docs)
147 |
--------------------------------------------------------------------------------
/cloudformation-deployment/od4es.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion": "2010-09-09",
3 | "Description": "Open Distro for Elasticsearch stack. **Attention** This template creates AWS resources that will incur charges on your account.",
4 | "Parameters": {
5 | "EnvironmentTag": {
6 | "Description": "Enter Environment Tag",
7 | "Type": "String",
8 | "MinLength": 3,
9 | "MaxLength": 10,
10 | "AllowedPattern": "^[a-z][-a-z0-9]*$",
11 | "Default": "od4es-cfn"
12 | },
13 | "CIDRPrefix": {
14 | "Description": "Enter Class B CIDR Prefix (e.g. 192.168, 10.1, 172.16)",
15 | "Type": "String",
16 | "AllowedPattern": "(192\\.168)|10\\.[0-9][0-9]{0,1}|(172\\.([1][6-9]|[2][0-9]|[3][0-1]))",
17 | "ConstraintDescription": "must be a valid Private Subnet CIDR Prefix between 192.168 or 10.{0-99} or 172.16",
18 | "Default": "10.1"
19 | },
20 | "KeyName": {
21 | "Description": "The EC2 Key Pair to allow SSH access to all the instances for this solution",
22 | "Type": "AWS::EC2::KeyPair::KeyName",
23 | "ConstraintDescription": "must be the name of an existing EC2 KeyPair."
24 | },
25 | "MasterInstanceType": {
26 | "Description": "The EC2 instance type for the master nodes in the cluster",
27 | "Type": "String",
28 | "Default": "m5.large",
29 | "AllowedValues": ["m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5a.large", "m5a.xlarge", "m5a.2xlarge", "m5a.4xlarge", "m5n.large", "m5n.xlarge", "m5n.2xlarge", "m5n.4xlarge", "c5.large", "c5.xlarge", "c5.2xlarge", "c5.4xlarge", "c5n.large", "c5n.xlarge", "c5n.2xlarge", "c5n.4xlarge", "r5.large", "r5.xlarge", "r5.2xlarge", "r5.4xlarge", "r5a.large", "r5a.xlarge", "r5a.2xlarge", "r5a.4xlarge", "r5n.large", "r5n.xlarge", "r5n.2xlarge", "r5n.4xlarge"]
30 | },
31 | "DataInstanceType": {
32 | "Description": "The EC2 instance type for the data nodes in the cluster",
33 | "Type": "String",
34 | "Default": "m5.large",
35 | "AllowedValues": ["m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5a.large", "m5a.xlarge", "m5a.2xlarge", "m5a.4xlarge", "m5n.large", "m5n.xlarge", "m5n.2xlarge", "m5n.4xlarge", "c5.large", "c5.xlarge", "c5.2xlarge", "c5.4xlarge", "c5n.large", "c5n.xlarge", "c5n.2xlarge", "c5n.4xlarge", "r5.large", "r5.xlarge", "r5.2xlarge", "r5.4xlarge", "r5a.large", "r5a.xlarge", "r5a.2xlarge", "r5a.4xlarge", "r5n.large", "r5n.xlarge", "r5n.2xlarge", "r5n.4xlarge"]
36 | },
37 | "DataNodeCount": {
38 | "Description": "The number of data nodes to deploy in the cluster",
39 | "Type": "String",
40 | "AllowedPattern": "^(\\d{1,2})",
41 | "Default": "2"
42 | },
43 | "ClientInstanceType": {
44 | "Description": "The EC2 instance type for the Kibana and client nodes in the cluster",
45 | "Type": "String",
46 | "Default": "m5.large",
47 | "AllowedValues": ["m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5a.large", "m5a.xlarge", "m5a.2xlarge", "m5a.4xlarge", "m5n.large", "m5n.xlarge", "m5n.2xlarge", "m5n.4xlarge", "c5.large", "c5.xlarge", "c5.2xlarge", "c5.4xlarge", "c5n.large", "c5n.xlarge", "c5n.2xlarge", "c5n.4xlarge", "r5.large", "r5.xlarge", "r5.2xlarge", "r5.4xlarge", "r5a.large", "r5a.xlarge", "r5a.2xlarge", "r5a.4xlarge", "r5n.large", "r5n.xlarge", "r5n.2xlarge", "r5n.4xlarge"]
48 | },
49 | "ClientNodeCount": {
50 | "Description": "The number of client/kibana nodes to deploy in the cluster",
51 | "Type": "String",
52 | "AllowedPattern": "^(\\d{1,2})",
53 | "Default": "2"
54 | }
55 | },
56 | "Resources": {
57 | "Network": {
58 | "Type": "AWS::CloudFormation::Stack",
59 | "Properties": {
60 | "TemplateURL": "https://s3-us-west-2.amazonaws.com/odfe-cfn/network.json",
61 | "Parameters": {
62 | "EnvironmentTag": {
63 | "Ref": "EnvironmentTag"
64 | },
65 | "CIDRPrefix": {
66 | "Ref": "CIDRPrefix"
67 | }
68 | }
69 | }
70 | },
71 | "Seed":{
72 | "Type": "AWS::CloudFormation::Stack",
73 | "Properties": {
74 | "TemplateURL": "https://s3-us-west-2.amazonaws.com/odfe-cfn/seed.json",
75 | "Parameters": {
76 | "NetworkStackName" : { "Fn::GetAtt" : [ "Network", "Outputs.StackName" ] },
77 | "KeyName" : {"Ref" : "KeyName"},
78 | "MasterInstanceType": { "Ref": "MasterInstanceType" }
79 | }
80 | }
81 | },
82 | "DataNodes":{
83 | "Type": "AWS::CloudFormation::Stack",
84 | "Properties": {
85 | "TemplateURL": "https://s3-us-west-2.amazonaws.com/odfe-cfn/data-nodes.json",
86 | "Parameters": {
87 | "NetworkStackName" : { "Fn::GetAtt" : [ "Network", "Outputs.StackName" ] },
88 | "KeyName" : {"Ref" : "KeyName"},
89 | "DataInstanceType": { "Ref": "DataInstanceType" },
90 | "DataNodeCount": { "Ref": "DataNodeCount" }
91 | }
92 | }
93 | },
94 | "MasterNodes":{
95 | "Type": "AWS::CloudFormation::Stack",
96 | "Properties": {
97 | "TemplateURL": "https://s3-us-west-2.amazonaws.com/odfe-cfn/master-nodes.json",
98 | "Parameters": {
99 | "NetworkStackName" : { "Fn::GetAtt" : [ "Network", "Outputs.StackName" ] },
100 | "KeyName" : {"Ref" : "KeyName"},
101 | "MasterInstanceType": { "Ref": "MasterInstanceType" }
102 | }
103 | }
104 | },
105 | "ClientNodes":{
106 | "Type": "AWS::CloudFormation::Stack",
107 | "Properties": {
108 | "TemplateURL": "https://s3-us-west-2.amazonaws.com/odfe-cfn/client-nodes.json",
109 | "Parameters": {
110 | "NetworkStackName" : { "Fn::GetAtt" : [ "Network", "Outputs.StackName" ] },
111 | "KeyName" : {"Ref" : "KeyName"},
112 | "ClientInstanceType": { "Ref": "ClientInstanceType" }
113 | }
114 | }
115 | }
116 | },
117 | "Outputs": {
118 | "SSHKeyName": {
119 | "Description": "SSH Key Name.",
120 | "Value": {"Ref": "KeyName"},
121 | "Export": {
122 | "Name": {
123 | "Fn::Sub": "${AWS::StackName}-SSHKeyName"
124 | }
125 | }
126 | },
127 | "NetworkStack": {
128 | "Description": "Network stack name",
129 | "Value": { "Fn::GetAtt" : [ "Network", "Outputs.StackName" ] }
130 | },
131 | "ClientNodesASG": {
132 | "Description": "AutoScaling group. Examine the instances in this group to get the public IPs to connect with Kibana and to send bulk and query API requests.",
133 | "Value": { "Fn::GetAtt": ["ClientNodes", "Outputs.AutoScalingGroup"] }
134 | }
135 | }
136 | }
--------------------------------------------------------------------------------
/es_sink/es_sink/descriptor.py:
--------------------------------------------------------------------------------
1 | '''
2 | Copyright 2020, Amazon Web Services Inc.
3 | This code is licensed under MIT license (see LICENSE.txt for details)
4 |
5 | Python 3
6 |
7 | Provides a buffer object that holds log lines in Elasticsearch _bulk
8 | format. As each line is added, the buffer stores the control line
9 | as well as the log line.
10 | '''
11 |
12 |
13 | from collections import namedtuple
14 |
15 |
16 | from es_sink.es_auth import ESAuth, ESNoAuth, ESSigV4Auth, ESHttpAuth
17 | from es_sink.transport_utils import now_pst
18 |
19 |
20 | class SQSDescriptor():
21 | '''Description of an SQS queue. Enables generalization of sink targets'''
22 | def __init__(self, q_url, region):
23 | '''An SQS queue has a URL and a region'''
24 | self._sqs_url = q_url
25 | self._region = region
26 |
27 | @property
28 | def sqs_url(self):
29 | '''The target SQS URL'''
30 | return self. sqs_url
31 |
32 | @property
33 | def region(self):
34 | '''The region of the queue'''
35 | return self._region
36 |
37 |
38 | # Describes handling of indexes for the ESDescriptor class
39 | # es_v7 Use ES V7 APIs (no _type, mostly)
40 | # es_index: For API calls that use an index
41 | # es_type: For ES V6 clusters and calls that use a _type
42 | # timestamped: For ES API calls, mostly writes, append _YY.MM.DD
43 | # to the index name
44 | IndexDescriptor = namedtuple('IndexDescriptor', ['es_index', 'es_type',
45 | 'es_v7', 'timestamped'],
46 | defaults=(None, None, True, True))
47 |
48 |
49 | class ESDescriptor():
50 | """Description of an Elasticsearch endpoint."""
51 |
52 | def __init__(self, endpoint, index_descriptor, region=None, auth=None):
53 | """Describes an ELasticsearch sink.
54 |
55 | This could be refactored to be a little bit better. As of now, it
56 | supports Amazon ES endpoints as well as vanilla ES endpoints. It also
57 | supports ES V6 and ES V7 endpoints. These could be mixins.
58 |
59 | endpoint: The base url to send REST API calls
60 | region: For Amazon ES domains, the AWS region. E.g.
61 | us-west-2
62 | indexing_descriptor: An IndexingDescriptor as above, specifying the
63 | ` index name, es type, v7 status, and whether to
64 | create indices with a timestamped name
65 | es_auth: A subclass of the ESAuth class specifying how to
66 | handle authentication of requests sent to the
67 | Elasticsearch endpoint.
68 | """
69 | self._endpoint = endpoint
70 |
71 | if not isinstance(index_descriptor, IndexDescriptor):
72 | raise TypeError('Wrong type for index_descriptor')
73 | self._indexing = index_descriptor
74 |
75 | self._auth = auth
76 | if not auth:
77 | self._auth = ESNoAuth()
78 |
79 | if not issubclass(type(auth), ESAuth):
80 | raise ValueError('You must use the a child of the ESAuth class')
81 |
82 |
83 | if isinstance(auth, ESSigV4Auth) and not region:
84 | raise ValueError('You must specify a region to use SigV4Signing')
85 | self._region = region
86 |
87 |
88 | def user_password(self):
89 | '''Expose a method to retrieve the username/password.'''
90 | if not self._auth or not isinstance(self._auth, ESHttpAuth):
91 | raise ValueError('The descriptors authentication is not HTTP')
92 | return self._auth.auth_creds()
93 |
94 | @property
95 | def region(self):
96 | '''The region of the Amazon ES domain'''
97 | return self._region
98 |
99 | def is_signed(self):
100 | '''Should requests be signed with AWS SigV4 signing?'''
101 | return isinstance(self._auth, ESSigV4Auth)
102 |
103 | def is_http_auth(self):
104 | '''Should requests be signed with AWS SigV4 signing?'''
105 | return isinstance(self._auth, ESHttpAuth)
106 |
107 | def auth(self):
108 | '''Return the auth object passed in to init'''
109 | return self._auth
110 |
111 | def timestamped(self):
112 | '''Returns true when the index names should carry a timestamp'''
113 | return self._indexing.timestamped
114 |
115 | def _index_name(self):
116 | ''' Return es_index-YY.MM.DD. Not timezone-aware '''
117 | if self.timestamped():
118 | return "{}-{}".format(self._indexing.es_index,
119 | now_pst().strftime("%Y.%m.%d"))
120 | return self._indexing.es_index
121 |
122 | def base_url(self):
123 | ''' Returns the endpoint. Slash-terminated.'''
124 | if self._endpoint.endswith('/'):
125 | return self._endpoint
126 | return '{}/'.format(self._endpoint)
127 |
128 | def base_url_with_index(self):
129 | '''Returns the endpoint/index, slash terminated. '''
130 | return '{}{}/'.format(self.base_url(), self._index_name())
131 |
132 | def base_url_6(self):
133 | ''' Returns the endpoint/index/type. Slash-terminated.
134 | Set timestamped=True to add the YY.MM.DD to the index
135 | name.'''
136 | return '{}{}/{}/'.format(self.base_url(), self._index_name(),
137 | self._indexing.es_type)
138 |
139 | def base_url_7(self):
140 | ''' Returns the endpoint/index/. Slash-terminated.
141 | Set timestamped=True to add the YY.MM.DD to the index
142 | name.'''
143 | return '{}{}/'.format(self.base_url(), self._index_name())
144 |
145 | def bulk_url(self):
146 | ''' d - an ESDescriptor. Returns the base url with _bulk.
147 | This assumes that you do not want index embedded.
148 | Set timestamped=True to add the YY.MM.DD to the index
149 | name.'''
150 | return '{}{}/_bulk'.format(self.base_url(), self._index_name())
151 |
152 | def _es_v7(self):
153 | return self._indexing.es_v7
154 |
155 | def search_url(self):
156 | ''' d - an ESDescriptor. Returns the base url with
157 | //_search handles es v7 by removing the
158 | type. Set timestamped=True to add the YY.MM.DD to the index
159 | name.'''
160 | if self._es_v7():
161 | return '{}{}/_search'.format(self.base_url(),
162 | self._index_name())
163 |
164 | return '{}{}/{}/_search'.format(self.base_url(),
165 | self._index_name(),
166 | self._indexing.es_type)
167 |
168 | ACTION_LINE_6 = '{{"index" : {{ "_index" : "{}", "_type": "{}" }} }}'
169 | ACTION_LINE_7 = '{{"index" : {{ "_index" : "{}" }} }}'
170 | def bulk_control_line(self):
171 | ''' Strictly, this shouldn't go in this class. It's not really
172 | part of a description. OTOH, all the info is here and it will
173 | save lots of duplicated code.
174 | Returns the "control" line for a _bulk request. '''
175 | if self._es_v7():
176 | return self.ACTION_LINE_7.format(self._index_name())
177 |
178 | return self.ACTION_LINE_6.format(self._index_name(),
179 | self._indexing.es_type)
180 |
--------------------------------------------------------------------------------
/es_sink/tests/test_descriptor.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("../es_sink")
3 |
4 |
5 | import unittest
6 |
7 |
8 | import es_sink.es_auth as es_auth
9 | import es_sink.descriptor
10 | import es_sink.transport_utils
11 |
12 | class TestDescriptor(unittest.TestCase):
13 |
14 | TS_ID6 = es_sink.descriptor.IndexDescriptor(es_index='logs',
15 | es_type='log',
16 | es_v7=False,
17 | timestamped=True)
18 | TS_ID7 = es_sink.descriptor.IndexDescriptor(es_index='logs',
19 | es_v7=True,
20 | timestamped=True)
21 |
22 | NO_TS_ID6 = es_sink.descriptor.IndexDescriptor(es_index='logs',
23 | es_type='log',
24 | es_v7=False,
25 | timestamped=False)
26 | NO_TS_ID7 = es_sink.descriptor.IndexDescriptor(es_index='logs',
27 | es_v7=True,
28 | timestamped=False)
29 |
30 | ACTION_LINE_6 = '{{"index" : {{ "_index" : "logs{}", "_type": "log" }} }}'
31 | ACTION_LINE_7 = '{{"index" : {{ "_index" : "logs{}" }} }}'
32 |
33 | def test_index_descriptor_defaults(self):
34 | idx = es_sink.descriptor.IndexDescriptor()
35 | self.assertIsNone(idx.es_index)
36 | self.assertIsNone(idx.es_type)
37 | self.assertTrue(idx.es_v7)
38 | self.assertTrue(idx.timestamped)
39 |
40 | def test_es_auth(self):
41 | auth = es_auth.ESHttpAuth('admin', 'admin')
42 | auth = es_auth.ESNoAuth()
43 | auth = es_auth.ESSigV4Auth()
44 |
45 | def test_create(self):
46 | auth = es_auth.ESHttpAuth('admin', 'admin')
47 | index_descriptor = self.TS_ID6
48 | desc = es_sink.descriptor.ESDescriptor("https://localhost:9200/",
49 | index_descriptor,
50 | auth=auth)
51 |
52 | def test_missing_user_pass_region(self):
53 | auth = es_auth.ESNoAuth()
54 | index_descriptor = self.TS_ID7
55 | desc = es_sink.descriptor.ESDescriptor("https://localhost:9200/",
56 | index_descriptor,
57 | auth=auth)
58 | self.assertRaises(ValueError, desc.user_password)
59 | self.assertIsNone(desc.region)
60 |
61 | def test_auth(self):
62 | auth = es_auth.ESNoAuth()
63 | index_descriptor = es_sink.descriptor.IndexDescriptor()
64 | desc = es_sink.descriptor.ESDescriptor("https://localhost:9200/",
65 | index_descriptor,
66 | auth=auth)
67 | self.assertFalse(desc.is_signed())
68 | self.assertFalse(desc.is_http_auth())
69 |
70 | auth = es_auth.ESSigV4Auth()
71 | desc = es_sink.descriptor.ESDescriptor("https://localhost:9200/",
72 | index_descriptor,
73 | region='us-west-2',
74 | auth=auth)
75 | self.assertTrue(desc.is_signed())
76 | self.assertFalse(desc.is_http_auth())
77 |
78 | auth = es_auth.ESHttpAuth('admin', 'adminpw')
79 | desc = es_sink.descriptor.ESDescriptor("https://localhost:9200/",
80 | index_descriptor,
81 | region='us-west-2',
82 | auth=auth)
83 | self.assertFalse(desc.is_signed())
84 | self.assertTrue(desc.is_http_auth())
85 |
86 | def test_index_naming_logs6(self):
87 | auth = es_auth.ESNoAuth()
88 | base_url = "https://localhost:9200/"
89 | # Small chance this will fail at midnight. Who's running unit tests at
90 | # midnight anyway.
91 | timestamp = es_sink.transport_utils.now_pst().strftime("%Y.%m.%d")
92 | index_suffix = '-' + timestamp
93 | url6 = '{}logs{}/log/'.format(base_url, index_suffix)
94 | desc = es_sink.descriptor.ESDescriptor(base_url, self.TS_ID6, auth=auth)
95 |
96 | self.assertTrue(desc.timestamped())
97 | self.assertEqual(desc.base_url(), base_url)
98 |
99 | self.assertEqual(desc.base_url_with_index(),
100 | '{}logs{}/'.format(base_url, index_suffix))
101 | self.assertEqual(desc.base_url_6(), url6)
102 | self.assertEqual(desc.bulk_url(), desc.base_url_with_index() + "_bulk")
103 | self.assertEqual(desc.search_url(), "{}logs{}/log/_search".format(
104 | base_url, index_suffix))
105 | self.assertEqual(desc.bulk_control_line(),
106 | self.ACTION_LINE_6.format(index_suffix))
107 |
108 | def test_index_naming_logs7(self):
109 | auth = es_auth.ESNoAuth()
110 | base_url = "https://localhost:9200/"
111 | # Small chance this will fail at midnight. Who's running unit tests at
112 | # midnight anyway.
113 | timestamp = es_sink.transport_utils.now_pst().strftime("%Y.%m.%d")
114 | index_suffix = '-' + timestamp
115 | url7 = '{}logs{}/'.format(base_url, index_suffix)
116 | desc = es_sink.descriptor.ESDescriptor(base_url, self.TS_ID7, auth=auth)
117 |
118 | self.assertTrue(desc.timestamped())
119 | self.assertEqual(desc.base_url(), base_url)
120 |
121 | self.assertEqual(desc.base_url_with_index(),
122 | '{}logs{}/'.format(base_url, index_suffix))
123 | self.assertEqual(desc.base_url_7(), url7)
124 | self.assertEqual(desc.bulk_url(), desc.base_url_with_index() + "_bulk")
125 | self.assertEqual(desc.search_url(), "{}logs{}/_search".format(
126 | base_url, index_suffix))
127 | self.assertEqual(desc.bulk_control_line(),
128 | self.ACTION_LINE_7.format(index_suffix))
129 |
130 | def test_untimestamped6(self):
131 | auth = es_auth.ESNoAuth()
132 | base_url = "https://localhost:9200/"
133 | index_suffix = ''
134 | url6 = '{}logs{}/log/'.format(base_url, index_suffix)
135 | desc = es_sink.descriptor.ESDescriptor(base_url, self.NO_TS_ID6,
136 | auth=auth)
137 |
138 | self.assertFalse(desc.timestamped())
139 | self.assertEqual(desc.base_url(), base_url)
140 |
141 | self.assertEqual(desc.base_url_with_index(),
142 | '{}logs{}/'.format(base_url, index_suffix))
143 | self.assertEqual(desc.base_url_6(), url6)
144 | self.assertEqual(desc.bulk_url(), desc.base_url_with_index() + "_bulk")
145 | self.assertEqual(desc.search_url(), "{}logs{}/log/_search".format(
146 | base_url, index_suffix))
147 | self.assertEqual(desc.bulk_control_line(),
148 | self.ACTION_LINE_6.format(index_suffix))
149 |
150 | def test_untimestamped7(self):
151 | auth = es_auth.ESNoAuth()
152 | base_url = "https://localhost:9200/"
153 | index_suffix = ''
154 | url7 = '{}logs{}/'.format(base_url, index_suffix)
155 | desc = es_sink.descriptor.ESDescriptor(base_url, self.NO_TS_ID7,
156 | auth=auth)
157 |
158 | self.assertFalse(desc.timestamped())
159 | self.assertEqual(desc.base_url(), base_url)
160 |
161 | self.assertEqual(desc.base_url_with_index(),
162 | '{}logs{}/'.format(base_url, index_suffix))
163 | self.assertEqual(desc.base_url_7(), url7)
164 | self.assertEqual(desc.bulk_url(), desc.base_url_with_index() + "_bulk")
165 | self.assertEqual(desc.search_url(), "{}logs{}/_search".format(
166 | base_url, index_suffix))
167 | self.assertEqual(desc.bulk_control_line(),
168 | self.ACTION_LINE_7.format(index_suffix))
169 |
170 |
171 |
172 |
173 | if __name__ == '__main__':
174 | unittest.main()
--------------------------------------------------------------------------------
/open-distro-elasticsearch-kubernetes/helm/opendistro-es/templates/elasticsearch/es-client-deploy.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2019 Viasat, Inc.
2 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License").
5 | # You may not use this file except in compliance with the License.
6 | # A copy of the License is located at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # or in the "license" file accompanying this file. This file is distributed
11 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 | # express or implied. See the License for the specific language governing
13 | # permissions and limitations under the License.
14 |
15 | {{- if .Values.elasticsearch.client.enabled }}
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | labels:
20 | {{ include "opendistro-es.labels.standard" . | indent 4 }}
21 | role: client
22 | name: {{ template "opendistro-es.fullname" . }}-client
23 | namespace: {{ .Release.Namespace }}
24 | spec:
25 | replicas: {{ .Values.elasticsearch.client.replicas }}
26 | selector:
27 | matchLabels:
28 | {{ include "opendistro-es.labels.standard" . | indent 6 }}
29 | role: client
30 | template:
31 | metadata:
32 | labels:
33 | {{ include "opendistro-es.labels.standard" . | indent 8 }}
34 | role: client
35 | annotations:
36 | {{/* This forces a restart if the secret config has changed */}}
37 | {{- if .Values.elasticsearch.config }}
38 | configchecksum: {{ include (print .Template.BasePath "/elasticsearch/es-config-secret.yaml") . | sha256sum | trunc 63 }}
39 | {{- end }}
40 | {{- if .Values.elasticsearch.client.podAnnotations }}
41 | {{ toYaml .Values.elasticsearch.client.podAnnotations | indent 8 }}
42 | {{- end }}
43 | spec:
44 | {{- include "opendistro-es.imagePullSecrets" . | indent 6 }}
45 | serviceAccountName: {{ template "opendistro-es.elasticsearch.serviceAccountName" . }}
46 | {{- with .Values.elasticsearch.client.tolerations }}
47 | tolerations:
48 | {{ toYaml . | indent 8 }}
49 | {{- end }}
50 | {{- with .Values.elasticsearch.client.nodeSelector }}
51 | nodeSelector:
52 | {{ toYaml . | indent 8 }}
53 | {{- end }}
54 | # Weighted anti-affinity to disallow deploying client node to the same worker node as master node
55 | affinity:
56 | podAntiAffinity:
57 | preferredDuringSchedulingIgnoredDuringExecution:
58 | - weight: 1
59 | podAffinityTerm:
60 | topologyKey: "kubernetes.io/hostname"
61 | labelSelector:
62 | matchLabels:
63 | role: client
64 | {{- with .Values.elasticsearch.client.nodeAffinity }}
65 | nodeAffinity:
66 | {{ toYaml . | indent 10 }}
67 | {{- end }}
68 | initContainers:
69 | - name: init-sysctl
70 | image: {{ .Values.elasticsearch.initContainer.image }}:{{ .Values.elasticsearch.initContainer.imageTag }}
71 | command:
72 | - sysctl
73 | - -w
74 | - vm.max_map_count={{ .Values.elasticsearch.maxMapCount }}
75 | securityContext:
76 | privileged: true
77 | containers:
78 | - name: elasticsearch
79 | env:
80 | - name: cluster.name
81 | value: {{ .Values.global.clusterName }}
82 | - name: node.master
83 | value: "false"
84 | - name: node.ingest
85 | value: "true"
86 | - name: node.data
87 | value: "false"
88 | - name: network.host
89 | value: "0.0.0.0"
90 | - name: node.name
91 | valueFrom:
92 | fieldRef:
93 | fieldPath: metadata.name
94 | - name: discovery.seed_hosts
95 | value: {{ template "opendistro-es.fullname" . }}-discovery
96 | - name: KUBERNETES_NAMESPACE
97 | valueFrom:
98 | fieldRef:
99 | fieldPath: metadata.namespace
100 | - name: PROCESSORS
101 | valueFrom:
102 | resourceFieldRef:
103 | resource: limits.cpu
104 | - name: ES_JAVA_OPTS
105 | value: {{ .Values.elasticsearch.client.javaOpts }}
106 | {{- if .Values.elasticsearch.extraEnvs }}
107 | {{ toYaml .Values.elasticsearch.extraEnvs | indent 8 }}
108 | {{- end }}
109 | resources:
110 | {{ toYaml .Values.elasticsearch.client.resources | indent 12 }}
111 | # Official Image from Open Distro Team
112 | image: {{ .Values.elasticsearch.image }}:{{ .Values.elasticsearch.imageTag }}
113 | imagePullPolicy: Always
114 | ports:
115 | - containerPort: 9200
116 | name: http
117 | - containerPort: 9300
118 | name: transport
119 | - containerPort: 9600
120 | name: metrics
121 | {{- with .Values.elasticsearch.client.readinessProbe}}
122 | readinessProbe:
123 | {{ toYaml . | indent 10 }}
124 | {{- end }}
125 | {{- with .Values.elasticsearch.client.livenessProbe}}
126 | livenessProbe:
127 | {{ toYaml . | indent 10 }}
128 | {{- end }}
129 | volumeMounts:
130 | {{- if .Values.elasticsearch.config }}
131 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elasticsearch.yml
132 | name: config
133 | subPath: elasticsearch.yml
134 | {{- end }}
135 | {{- if .Values.elasticsearch.log4jConfig }}
136 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/log4j2.properties
137 | name: config
138 | subPath: log4j2.properties
139 | {{- end }}
140 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/logging.yml
141 | name: config
142 | subPath: logging.yml
143 | {{- if and .Values.elasticsearch.ssl.transport.enabled .Values.elasticsearch.ssl.transport.existingCertSecret }}
144 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elk-transport-crt.pem
145 | name: transport-certs
146 | subPath: elk-transport-crt.pem
147 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elk-transport-key.pem
148 | name: transport-certs
149 | subPath: elk-transport-key.pem
150 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elk-transport-root-ca.pem
151 | name: transport-certs
152 | subPath: elk-transport-root-ca.pem
153 | {{- end }}
154 | {{- if and .Values.elasticsearch.ssl.rest.enabled .Values.elasticsearch.ssl.rest.existingCertSecret }}
155 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elk-rest-crt.pem
156 | name: rest-certs
157 | subPath: elk-rest-crt.pem
158 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elk-rest-key.pem
159 | name: rest-certs
160 | subPath: elk-rest-key.pem
161 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/elk-rest-root-ca.pem
162 | name: rest-certs
163 | subPath: elk-rest-root-ca.pem
164 | {{- end }}
165 | {{- if and .Values.elasticsearch.ssl.admin.enabled .Values.elasticsearch.ssl.admin.existingCertSecret }}
166 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/admin-crt.pem
167 | name: admin-certs
168 | subPath: admin-crt.pem
169 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/admin-key.pem
170 | name: admin-certs
171 | subPath: admin-key.pem
172 | - mountPath: {{ .Values.elasticsearch.configDirectory }}/admin-root-ca.pem
173 | name: admin-certs
174 | subPath: admin-root-ca.pem
175 | {{- end }}
176 | volumes:
177 | - name: config
178 | secret:
179 | secretName: {{ template "opendistro-es.fullname" . }}-es-config
180 | {{- if and .Values.elasticsearch.ssl.transport.enabled .Values.elasticsearch.ssl.transport.existingCertSecret }}
181 | - name: transport-certs
182 | secret:
183 | secretName: {{ .Values.elasticsearch.ssl.transport.existingCertSecret }}
184 | {{- end }}
185 | {{- if and .Values.elasticsearch.ssl.rest.enabled .Values.elasticsearch.ssl.rest.existingCertSecret }}
186 | - name: rest-certs
187 | secret:
188 | secretName: {{ .Values.elasticsearch.ssl.rest.existingCertSecret }}
189 | {{- end }}
190 | {{- if and .Values.elasticsearch.ssl.admin.enabled .Values.elasticsearch.ssl.admin.existingCertSecret }}
191 | - name: admin-certs
192 | secret:
193 | secretName: {{ .Values.elasticsearch.ssl.admin.existingCertSecret }}
194 | {{- end }}
195 | {{- end }}
196 |
--------------------------------------------------------------------------------