├── .ci
├── get_version_from_git.sh
└── gh_release_to_obs_changeset.py
├── .coveragerc
├── .github
└── workflows
│ ├── dashboards-ci.yml
│ └── exporter-ci.yml
├── .gitignore
├── .pylintrc
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.md
├── bin
├── hanadb_exporter
└── supportconfig-hanadb_exporter
├── config.json.example
├── daemon
└── hanadb_exporter@.service
├── dashboards
├── README.md
├── sap-hana.json
└── screenshot.png
├── docs
└── METRICS.md
├── hanadb_exporter
├── __init__.py
├── db_manager.py
├── main.py
├── prometheus_exporter.py
├── prometheus_metrics.py
├── secrets_manager.py
└── utils.py
├── logging_config.ini
├── metrics.json
├── packaging
└── obs
│ ├── grafana-sap-hana-dashboards
│ ├── _service
│ ├── grafana-sap-hana-dashboards.changes
│ └── grafana-sap-hana-dashboards.spec
│ └── prometheus-hanadb_exporter
│ ├── _service
│ └── prometheus-hanadb_exporter.spec
├── pytest.ini
├── requirements.txt
├── setup.py
├── tests
├── __init__.py
├── db_manager_test.py
├── main_test.py
├── prometheus_exporter_test.py
├── prometheus_metrics_test.py
├── secrets_manager_test.py
└── utils_test.py
└── tox.ini
/.ci/get_version_from_git.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | TAG=$(git describe --tags --abbrev=0 2>/dev/null)
3 | SUFFIX=$(git show -s --format=%ct.%h HEAD)
4 |
5 | if [ -n "${TAG}" ]; then
6 | COMMITS_SINCE_TAG=$(git rev-list ${TAG}.. --count)
7 | if [ "${COMMITS_SINCE_TAG}" -gt 0 ]; then
8 | SUFFIX="dev${COMMITS_SINCE_TAG}.${SUFFIX}"
9 | fi
10 | else
11 | TAG="0"
12 | fi
13 |
14 | echo "${TAG}+git.${SUFFIX}"
15 |
--------------------------------------------------------------------------------
/.ci/gh_release_to_obs_changeset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import json
5 | import os
6 | import sys
7 | import textwrap
8 | import urllib.request
9 | import urllib.error
10 | from datetime import datetime
11 | from datetime import timezone
12 | import tempfile
13 |
14 | parser = argparse.ArgumentParser(description="Add a GitHub release to an RPM changelog", usage=argparse.SUPPRESS)
15 | parser.add_argument("repo", help="GitHub repository (owner/name)")
16 | parser.add_argument("-t", "--tag", help="A specific Git tag to get; if none, latest will be used")
17 | parser.add_argument("-a", "--author", help="The author of the RPM changelog entry")
18 | parser.add_argument("-f", "--file", help="Prepend the new changelog entry to file instead of printing in stdout")
19 |
20 | if len(sys.argv) == 1:
21 | parser.print_help(sys.stderr)
22 | sys.exit(1)
23 |
24 | args = parser.parse_args()
25 |
26 | releaseSegment = f"/tags/{args.tag}" if args.tag else "/latest"
27 | url = f'https://api.github.com/repos/{args.repo}/releases{releaseSegment}'
28 |
29 | request = urllib.request.Request(url)
30 |
31 | githubToken = os.getenv("GITHUB_OAUTH_TOKEN")
32 | if githubToken:
33 | request.add_header("Authorization", "token " + githubToken)
34 |
35 | try:
36 | response = urllib.request.urlopen(request)
37 | except urllib.error.HTTPError as error:
38 | if error.code == 404:
39 | print(f"Release {args.tag} not found in {args.repo}. Skipping changelog generation.")
40 | sys.exit(0)
41 | print(f"GitHub API responded with a {error.code} error!", file=sys.stderr)
42 | print("Url:", url, file=sys.stderr)
43 | print("Response:", json.dumps(json.load(error), indent=4), file=sys.stderr, sep="\n")
44 | sys.exit(1)
45 |
46 | release = json.load(response)
47 |
48 | releaseDate = datetime.strptime(release['published_at'], "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc)
49 |
50 | with tempfile.TemporaryFile("r+") as temp:
51 | print("-------------------------------------------------------------------", file=temp)
52 |
53 | print(f"{releaseDate.strftime('%c')} {releaseDate.strftime('%Z')}", end="", file=temp)
54 | if args.author:
55 | print(f" - {args.author}", end="", file=temp)
56 | print("\n", file=temp)
57 |
58 | print(f"- Release {args.tag}", end="", file=temp)
59 | if release['name'] and release['name'] != args.tag:
60 | print(f" - {release['name']}", end="", file=temp)
61 | print("\n", file=temp)
62 |
63 | if release['body']:
64 | print(textwrap.indent(release['body'], " "), file=temp, end="\n\n")
65 | temp.seek(0)
66 |
67 | if args.file:
68 | try:
69 | with open(args.file, "r") as prev:
70 | old = prev.read()
71 | except FileNotFoundError:
72 | old = ""
73 | with open(args.file, "w") as new:
74 | for line in temp:
75 | new.write(line)
76 | new.write(old)
77 | sys.exit(0)
78 |
79 | print(temp.read())
80 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | */tests/*
4 | */test/*
5 | *setup.py*
6 | tests/*
7 | data_file = tests/.coverage
8 |
9 | [html]
10 | directory = tests/htmlcov
11 |
12 | [xml]
13 | output = tests/coverage.xml
14 |
--------------------------------------------------------------------------------
/.github/workflows/dashboards-ci.yml:
--------------------------------------------------------------------------------
1 | name: Dashboards CI
2 |
3 | # this workflow will
4 | # - check for JSON syntax errors on every pull request
5 | # - commit to the OBS development upstream whenever the master branch is updated
6 |
7 | on:
8 | push:
9 | paths:
10 | - 'Makefile'
11 | - 'dashboards/**'
12 | - 'packaging/obs/grafana-sap-hana-dashboards/**'
13 | - '.github/workflows/dashboards*'
14 | pull_request:
15 | paths:
16 | - 'Makefile'
17 | - 'dashboards/**'
18 | - 'packaging/obs/grafana-sap-hana-dashboards/**'
19 | - '.github/workflows/dashboards*'
20 |
21 | jobs:
22 | test:
23 | runs-on: ubuntu-latest
24 | steps:
25 | - uses: actions/checkout@v2
26 | - uses: actions/setup-node@v1
27 | - run: npm -g install jsonlint
28 | - name: validate JSON
29 | run: find dashboards -name "*.json" -type f -exec jsonlint -c {} \;
30 |
31 | obs-commit:
32 | needs: test
33 | if: github.ref == 'refs/heads/master'
34 | runs-on: ubuntu-latest
35 | container: shap/continuous_deliver
36 | env:
37 | OBS_USER: ${{ secrets.OBS_USER }}
38 | OBS_PASS: ${{ secrets.OBS_PASS }}
39 | OBS_PROJECT: ${{ secrets.OBS_PROJECT }}
40 | REVISION: ${{ github.sha }}
41 | REPOSITORY: ${{ github.repository }}
42 | GITHUB_OAUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
43 | steps:
44 | - uses: actions/checkout@v2
45 | with:
46 | fetch-depth: 0
47 | - name: configure OSC
48 | run: |
49 | /scripts/init_osc_creds.sh
50 | mkdir -p $HOME/.config/osc
51 | cp /root/.config/osc/oscrc $HOME/.config/osc
52 | - run: make dashboards-obs-commit
53 |
54 |
--------------------------------------------------------------------------------
/.github/workflows/exporter-ci.yml:
--------------------------------------------------------------------------------
1 | name: Exporter CI
2 |
3 | # this workflow will
4 | # - test the codebase on every pull request
5 | # - commit to the OBS development upstream whenever the master branch is updated
6 | # - perform an OBS Submit Request to downstream whenever a GitHub release is published
7 |
8 | on:
9 | push:
10 | paths-ignore:
11 | - 'dashboards/**'
12 | - 'packaging/obs/grafana-sap-hana-dashboards/**'
13 | - '.github/workflows/dashboards*'
14 | pull_request:
15 | paths-ignore:
16 | - 'dashboards/**'
17 | - 'packaging/obs/grafana-sap-hana-dashboards/**'
18 | - '.github/workflows/dashboards*'
19 | release:
20 | types: [ published ]
21 |
22 | jobs:
23 | test:
24 | runs-on: ubuntu-20.04
25 | strategy:
26 | matrix:
27 | python-version: [3.6, 3.8, 3.11]
28 | steps:
29 | - name: Set up Python ${{ matrix.python-version }}
30 | uses: actions/setup-python@v5
31 | with:
32 | python-version: ${{ matrix.python-version }}
33 | - uses: actions/checkout@v4
34 | - name: install deps
35 | run: make deps
36 | - name: static analysis
37 | run: make static-checks
38 | continue-on-error: true
39 | - name: test
40 | run: make test
41 |
42 | obs-commit:
43 | needs: test
44 | if: ${{ github.event_name != 'pull_request' }}
45 | concurrency:
46 | group: obs
47 | runs-on: ubuntu-20.04
48 | container: shap/continuous_deliver
49 | env:
50 | OBS_USER: ${{ secrets.OBS_USER }}
51 | OBS_PASS: ${{ secrets.OBS_PASS }}
52 | OBS_PROJECT: ${{ vars.OBS_PROJECT }}
53 | REVISION: ${{ github.sha }}
54 | REPOSITORY: ${{ github.repository }}
55 | GITHUB_OAUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
56 | steps:
57 | - uses: actions/checkout@v3
58 | with:
59 | fetch-depth: 0
60 | - name: configure OSC
61 | run: |
62 | /scripts/init_osc_creds.sh
63 | mkdir -p $HOME/.config/osc
64 | cp /root/.config/osc/oscrc $HOME/.config/osc
65 | - run: make exporter-obs-commit
66 |
67 | obs-submit-request:
68 | needs: test
69 | if: github.event.release
70 | concurrency:
71 | group: obs
72 | runs-on: ubuntu-20.04
73 | container: shap/continuous_deliver
74 | env:
75 | OBS_USER: ${{ secrets.OBS_USER }}
76 | OBS_PASS: ${{ secrets.OBS_PASS }}
77 | OBS_PROJECT: ${{ vars.OBS_PROJECT }}
78 | PACKAGE_NAME: prometheus-hanadb_exporter
79 | TARGET_PROJECT: ${{ vars.OBS_DOWNSTREAM_PROJECT }}
80 | REVISION: ${{ github.event.release.tag_name }}
81 | REPOSITORY: ${{ github.repository }}
82 | GITHUB_OAUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
83 | steps:
84 | - uses: actions/checkout@v3
85 | with:
86 | fetch-depth: 0
87 | - name: configure OSC
88 | run: |
89 | /scripts/init_osc_creds.sh
90 | mkdir -p $HOME/.config/osc
91 | cp /root/.config/osc/oscrc $HOME/.config/osc
92 | - run: make exporter-obs-workdir
93 | - run: make exporter-obs-changelog
94 | - run: make exporter-obs-commit
95 | - run: /scripts/submit.sh
96 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /tests/htmlcov
2 | /tests/.coverage
3 | /tests/coverage.*
4 | dist/*
5 | .cache
6 | .idea
7 | *.pyc
8 | *.egg-info*
9 | .tox/*
10 | config.json
11 | /virt
12 | .vscode
13 | .pytest_cache
14 | /build
15 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MASTER]
2 |
3 | # Specify a configuration file.
4 | #rcfile=
5 |
6 | # Python code to execute, usually for sys.path manipulation such as
7 | # pygtk.require().
8 | #init-hook=
9 |
10 | # Add files or directories to the blacklist. They should be base names, not
11 | # paths.
12 | ignore=CVS
13 |
14 | # Add files or directories matching the regex patterns to the blacklist. The
15 | # regex matches against base names, not paths.
16 | ignore-patterns=.*_test.py
17 |
18 | # Pickle collected data for later comparisons.
19 | persistent=yes
20 |
21 | # List of plugins (as comma separated values of python modules names) to load,
22 | # usually to register additional checkers.
23 | load-plugins=
24 |
25 | # Use multiple processes to speed up Pylint.
26 | jobs=1
27 |
28 | # Allow loading of arbitrary C extensions. Extensions are imported into the
29 | # active Python interpreter and may run arbitrary code.
30 | unsafe-load-any-extension=no
31 |
32 | # A comma-separated list of package or module names from where C extensions may
33 | # be loaded. Extensions are loading into the active Python interpreter and may
34 | # run arbitrary code
35 | extension-pkg-whitelist=
36 |
37 | # Allow optimization of some AST trees. This will activate a peephole AST
38 | # optimizer, which will apply various small optimizations. For instance, it can
39 | # be used to obtain the result of joining multiple strings with the addition
40 | # operator. Joining a lot of strings can lead to a maximum recursion error in
41 | # Pylint and this flag can prevent that. It has one side effect, the resulting
42 | # AST will be different than the one from reality. This option is deprecated
43 | # and it will be removed in Pylint 2.0.
44 | optimize-ast=no
45 |
46 |
47 | [MESSAGES CONTROL]
48 |
49 | # Only show warnings with the listed confidence levels. Leave empty to show
50 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
51 | confidence=
52 |
53 | # Enable the message, report, category or checker with the given id(s). You can
54 | # either give multiple identifier separated by comma (,) or put this option
55 | # multiple time (only on the command line, not in the configuration file where
56 | # it should appear only once). See also the "--disable" option for examples.
57 | #enable=
58 |
59 | # Disable the message, report, category or checker with the given id(s). You
60 | # can either give multiple identifiers separated by comma (,) or put this
61 | # option multiple times (only on the command line, not in the configuration
62 | # file where it should appear only once).You can also use "--disable=all" to
63 | # disable everything first and then reenable specific checks. For example, if
64 | # you want to run only the similarities checker, you can use "--disable=all
65 | # --enable=similarities". If you want to run only the classes checker, but have
66 | # no Warning level messages displayed, use"--disable=all --enable=classes
67 | # --disable=W"
68 | disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating
69 |
70 |
71 | [REPORTS]
72 |
73 | # Set the output format. Available formats are text, parseable, colorized, msvs
74 | # (visual studio) and html. You can also give a reporter class, eg
75 | # mypackage.mymodule.MyReporterClass.
76 | output-format=text
77 |
78 | # Put messages in a separate file for each module / package specified on the
79 | # command line instead of printing them on stdout. Reports (if any) will be
80 | # written in a file name "pylint_global.[txt|html]". This option is deprecated
81 | # and it will be removed in Pylint 2.0.
82 | files-output=no
83 |
84 | # Tells whether to display a full report or only the messages
85 | reports=yes
86 |
87 | # Python expression which should return a note less than 10 (10 is the highest
88 | # note). You have access to the variables errors warning, statement which
89 | # respectively contain the number of errors / warnings messages and the total
90 | # number of statements analyzed. This is used by the global evaluation report
91 | # (RP0004).
92 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
93 |
94 | # Template used to display messages. This is a python new-style format string
95 | # used to format the message information. See doc for all details
96 | #msg-template=
97 |
98 |
99 | [BASIC]
100 |
101 | # Good variable names which should always be accepted, separated by a comma
102 | good-names=i,j,k,ex,Run,_
103 |
104 | # Bad variable names which should always be refused, separated by a comma
105 | bad-names=foo,bar,baz,toto,tutu,tata
106 |
107 | # Colon-delimited sets of names that determine each other's naming style when
108 | # the name regexes allow several styles.
109 | name-group=
110 |
111 | # Include a hint for the correct naming format with invalid-name
112 | include-naming-hint=no
113 |
114 | # List of decorators that produce properties, such as abc.abstractproperty. Add
115 | # to this list to register other decorators that produce valid properties.
116 | property-classes=abc.abstractproperty
117 |
118 | # Regular expression matching correct function names
119 | function-rgx=[a-z_][a-z0-9_]{2,30}$
120 |
121 | # Naming hint for function names
122 | function-name-hint=[a-z_][a-z0-9_]{2,30}$
123 |
124 | # Regular expression matching correct variable names
125 | variable-rgx=[a-z_][a-z0-9_]{2,30}$
126 |
127 | # Naming hint for variable names
128 | variable-name-hint=[a-z_][a-z0-9_]{2,30}$
129 |
130 | # Regular expression matching correct constant names
131 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
132 |
133 | # Naming hint for constant names
134 | const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
135 |
136 | # Regular expression matching correct attribute names
137 | attr-rgx=[a-z_][a-z0-9_]{2,30}$
138 |
139 | # Naming hint for attribute names
140 | attr-name-hint=[a-z_][a-z0-9_]{2,30}$
141 |
142 | # Regular expression matching correct argument names
143 | argument-rgx=[a-z_][a-z0-9_]{2,30}$
144 |
145 | # Naming hint for argument names
146 | argument-name-hint=[a-z_][a-z0-9_]{2,30}$
147 |
148 | # Regular expression matching correct class attribute names
149 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
150 |
151 | # Naming hint for class attribute names
152 | class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
153 |
154 | # Regular expression matching correct inline iteration names
155 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
156 |
157 | # Naming hint for inline iteration names
158 | inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
159 |
160 | # Regular expression matching correct class names
161 | class-rgx=[A-Z_][a-zA-Z0-9]+$
162 |
163 | # Naming hint for class names
164 | class-name-hint=[A-Z_][a-zA-Z0-9]+$
165 |
166 | # Regular expression matching correct module names
167 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
168 |
169 | # Naming hint for module names
170 | module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
171 |
172 | # Regular expression matching correct method names
173 | method-rgx=[a-z_][a-z0-9_]{2,30}$
174 |
175 | # Naming hint for method names
176 | method-name-hint=[a-z_][a-z0-9_]{2,30}$
177 |
178 | # Regular expression which should only match function or class names that do
179 | # not require a docstring.
180 | no-docstring-rgx=^_
181 |
182 | # Minimum line length for functions/classes that require docstrings, shorter
183 | # ones are exempt.
184 | docstring-min-length=-1
185 |
186 |
187 | [ELIF]
188 |
189 | # Maximum number of nested blocks for function / method body
190 | max-nested-blocks=5
191 |
192 |
193 | [FORMAT]
194 |
195 | # Maximum number of characters on a single line.
196 | max-line-length=100
197 |
198 | # Regexp for a line that is allowed to be longer than the limit.
199 | ignore-long-lines=^\s*(# )??$
200 |
201 | # Allow the body of an if to be on the same line as the test if there is no
202 | # else.
203 | single-line-if-stmt=no
204 |
205 | # List of optional constructs for which whitespace checking is disabled. `dict-
206 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
207 | # `trailing-comma` allows a space between comma and closing bracket: (a, ).
208 | # `empty-line` allows space-only lines.
209 | no-space-check=trailing-comma,dict-separator
210 |
211 | # Maximum number of lines in a module
212 | max-module-lines=1000
213 |
214 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
215 | # tab).
216 | indent-string=' '
217 |
218 | # Number of spaces of indent required inside a hanging or continued line.
219 | indent-after-paren=4
220 |
221 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
222 | expected-line-ending-format=
223 |
224 |
225 | [LOGGING]
226 |
227 | # Logging modules to check that the string format arguments are in logging
228 | # function parameter format
229 | logging-modules=logging
230 |
231 |
232 | [MISCELLANEOUS]
233 |
234 | # List of note tags to take in consideration, separated by a comma.
235 | notes=FIXME,XXX,TODO
236 |
237 |
238 | [SIMILARITIES]
239 |
240 | # Minimum lines number of a similarity.
241 | min-similarity-lines=4
242 |
243 | # Ignore comments when computing similarities.
244 | ignore-comments=yes
245 |
246 | # Ignore docstrings when computing similarities.
247 | ignore-docstrings=yes
248 |
249 | # Ignore imports when computing similarities.
250 | ignore-imports=no
251 |
252 |
253 | [SPELLING]
254 |
255 | # Spelling dictionary name. Available dictionaries: none. To make it working
256 | # install python-enchant package.
257 | spelling-dict=
258 |
259 | # List of comma separated words that should not be checked.
260 | spelling-ignore-words=
261 |
262 | # A path to a file that contains private dictionary; one word per line.
263 | spelling-private-dict-file=
264 |
265 | # Tells whether to store unknown words to indicated private dictionary in
266 | # --spelling-private-dict-file option instead of raising a message.
267 | spelling-store-unknown-words=no
268 |
269 |
270 | [TYPECHECK]
271 |
272 | # Tells whether missing members accessed in mixin class should be ignored. A
273 | # mixin class is detected if its name ends with "mixin" (case insensitive).
274 | ignore-mixin-members=yes
275 |
276 | # List of module names for which member attributes should not be checked
277 | # (useful for modules/projects where namespaces are manipulated during runtime
278 | # and thus existing member attributes cannot be deduced by static analysis. It
279 | # supports qualified module names, as well as Unix pattern matching.
280 | ignored-modules=
281 |
282 | # List of class names for which member attributes should not be checked (useful
283 | # for classes with dynamically set attributes). This supports the use of
284 | # qualified names.
285 | ignored-classes=optparse.Values,thread._local,_thread._local
286 |
287 | # List of members which are set dynamically and missed by pylint inference
288 | # system, and so shouldn't trigger E1101 when accessed. Python regular
289 | # expressions are accepted.
290 | generated-members=
291 |
292 | # List of decorators that produce context managers, such as
293 | # contextlib.contextmanager. Add to this list to register other decorators that
294 | # produce valid context managers.
295 | contextmanager-decorators=contextlib.contextmanager
296 |
297 |
298 | [VARIABLES]
299 |
300 | # Tells whether we should check for unused import in __init__ files.
301 | init-import=no
302 |
303 | # A regular expression matching the name of dummy variables (i.e. expectedly
304 | # not used).
305 | dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy
306 |
307 | # List of additional names supposed to be defined in builtins. Remember that
308 | # you should avoid to define new builtins when possible.
309 | additional-builtins=
310 |
311 | # List of strings which can identify a callback function by name. A callback
312 | # name must start or end with one of those strings.
313 | callbacks=cb_,_cb
314 |
315 | # List of qualified module names which can have objects that can redefine
316 | # builtins.
317 | redefining-builtins-modules=six.moves,future.builtins
318 |
319 |
320 | [CLASSES]
321 |
322 | # List of method names used to declare (i.e. assign) instance attributes.
323 | defining-attr-methods=__init__,__new__,setUp
324 |
325 | # List of valid names for the first argument in a class method.
326 | valid-classmethod-first-arg=cls
327 |
328 | # List of valid names for the first argument in a metaclass class method.
329 | valid-metaclass-classmethod-first-arg=mcs
330 |
331 | # List of member names, which should be excluded from the protected access
332 | # warning.
333 | exclude-protected=_asdict,_fields,_replace,_source,_make
334 |
335 |
336 | [DESIGN]
337 |
338 | # Maximum number of arguments for function / method
339 | max-args=5
340 |
341 | # Argument names that match this expression will be ignored. Default to name
342 | # with leading underscore
343 | ignored-argument-names=_.*
344 |
345 | # Maximum number of locals for function / method body
346 | max-locals=15
347 |
348 | # Maximum number of return / yield for function / method body
349 | max-returns=6
350 |
351 | # Maximum number of branch for function / method body
352 | max-branches=12
353 |
354 | # Maximum number of statements in function / method body
355 | max-statements=50
356 |
357 | # Maximum number of parents for a class (see R0901).
358 | max-parents=7
359 |
360 | # Maximum number of attributes for a class (see R0902).
361 | max-attributes=7
362 |
363 | # Minimum number of public methods for a class (see R0903).
364 | min-public-methods=2
365 |
366 | # Maximum number of public methods for a class (see R0904).
367 | max-public-methods=20
368 |
369 | # Maximum number of boolean expressions in a if statement
370 | max-bool-expr=5
371 |
372 |
373 | [IMPORTS]
374 |
375 | # Deprecated modules which should not be used, separated by a comma
376 | deprecated-modules=regsub,TERMIOS,Bastion,rexec
377 |
378 | # Create a graph of every (i.e. internal and external) dependencies in the
379 | # given file (report RP0402 must not be disabled)
380 | import-graph=
381 |
382 | # Create a graph of external dependencies in the given file (report RP0402 must
383 | # not be disabled)
384 | ext-import-graph=
385 |
386 | # Create a graph of internal dependencies in the given file (report RP0402 must
387 | # not be disabled)
388 | int-import-graph=
389 |
390 | # Force import order to recognize a module as part of the standard
391 | # compatibility libraries.
392 | known-standard-library=
393 |
394 | # Force import order to recognize a module as part of a third party library.
395 | known-third-party=enchant
396 |
397 | # Analyse import fallback blocks. This can be used to support both Python 2 and
398 | # 3 compatible code, which means that the block might have code that exists
399 | # only in one or another interpreter, leading to false positives when analysed.
400 | analyse-fallback-blocks=no
401 |
402 |
403 | [EXCEPTIONS]
404 |
405 | # Exceptions that will emit a warning when being caught. Defaults to
406 | # "Exception"
407 | overgeneral-exceptions=Exception
408 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to contribute
2 |
3 | ## OBS Packaging
4 |
5 | The CI will automatically interact with SUSE's [Open Build Service](https://build.opensuse.org): the `main` branch will be kept in sync with the `network:ha-clustering:sap-deployments:devel` project.
6 |
7 | ### Publishing the Exporter RPM in openSUSE code stream.
8 |
9 | For the exporter only, a new Submit Request against the `server:monitoring` project can be triggered by publishing a new GitHub release.
10 |
11 | Please ensure that tags always follow the [SemVer] scheme, and the text of the release adheres to the [_keep a changelog_](https://keepachangelog.com/) format.
12 |
13 | When accepting Submit Requests against `server:monitoring`, you can contextually forward them to the `openSUSE:Factory` project.
14 |
15 | ### Publishing Grafana Dashboards
16 |
17 | While Grafana dashboard are not continuously deployed like the exporter, the [OBS development package](https://build.opensuse.org/package/show/network:ha-clustering:sap-deployments:devel/grafana-sap-hana-dashboards) is still kept in sync with the `main` branch of this repository.
18 | GitHub releases do not apply in this case, they are only used for the exporter; the dashboards RPM version number is instead hard-coded in the OBS source services configuration.
19 |
20 | To publish a new release:
21 | - update the numerical prefix of the `versionformat` field in the [_service](packaging/obs/grafana-sap-hana-dashboards/_service) file;
22 | - add an entry to the [changelog](packaging/obs/grafana-sap-hana-dashboards/grafana-sap-hana-dashboards.changes) file;
23 | - commit these changes directly in the `main` branch;
24 | - perform a Submit Request via [`osc`] manually:
25 | `osc sr network:ha-clustering:sap-deployments:devel grafana-sap-hana-dashboards openSUSE:Factory`
26 |
27 |
28 | ### Publishing RPMs in OBS manually
29 |
30 | For both the exporter and the dashboards, assuming you have configured [`osc`] already, you can use the same make targets used in the CI to produce a local OBS package working directory.
31 |
32 | The following:
33 | ```
34 | make exporter-obs-workdir
35 | ```
36 | will checkout the exporter OBS package and prepare a new OBS commit in the `build/obs` directory.
37 |
38 | You can use the `OSB_PROJECT`, `REPOSITORY`, `VERSION` and `REVISION` environment variables to change the behaviour these make targets.
39 |
40 | By default, the current Git working directory is used to infer the values of `VERSION` and `REVISION`, which are used by OBS source services to generate a compressed archive of the sources.
41 |
42 | For example, if wanted to update the RPM package in your own OBS branch with the latest sources from a Git feature branch in your own GitHub fork, you might do the following:
43 | ```bash
44 | git checkout feature/xyz
45 | git push johndoe feature/xyz # don't forget to push changes in your own fork
46 | export OBS_PROJECT=home:JohnDoe
47 | export REPOSITORY=johndoe/my_forked_repo
48 | make clean
49 | make exporter-obs-workdir
50 | ```
51 | This will prepare to commit in the `home:JohnDoe/my_forked_repo` OBS package by checking out the `feature/xyz` branch from `github.com/johndoe/my_forked_repo`, updating the version number of the RPM spec file, and producing a compressed archive of the sources.
52 |
53 | To actually perform the OBS commit, run:
54 | ```bash
55 | make exporter-obs-commit
56 | ```
57 |
58 | Note that that actual releases may also involve an intermediate step that updates the changelog automatically, but this is only used when triggering the CI/CD via GitHub releases.
59 |
60 | The equivalent targets for the dashboard package are `make dashboards-obs-workdir` and `make dashboards-obs-commit`
61 |
62 | [SemVer]: https://semver.org
63 | [`osc`]: https://en.opensuse.org/openSUSE:OSC
64 |
65 | ### Note about SemVer usage
66 |
67 | Please be aware that RPM doesn't allow hyphens in the version number: `~` can been used as a replacement, although it's not entirely compliant with the SemVer spec.
68 |
69 | For more information, please refer to: https://github.com/semver/semver/issues/145
70 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | https://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | Copyright 2019-2020 SUSE LLC
180 |
181 | Licensed under the Apache License, Version 2.0 (the "License");
182 | you may not use this file except in compliance with the License.
183 | You may obtain a copy of the License at
184 |
185 | https://www.apache.org/licenses/LICENSE-2.0
186 |
187 | Unless required by applicable law or agreed to in writing, software
188 | distributed under the License is distributed on an "AS IS" BASIS,
189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190 | See the License for the specific language governing permissions and
191 | limitations under the License.
192 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | # Include useful user documentation
2 | include README.md
3 | include docs/METRICS.md
4 | include LICENSE
5 | include requirements.txt
6 |
7 | # Exclude unitary test files
8 | global-exclude tests/*
9 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # this is the what ends up in the RPM "Version" field and embedded in the --version CLI flag
2 | VERSION ?= $(shell .ci/get_version_from_git.sh)
3 |
4 | # this will be used as the build date by the Go compile task
5 | DATE = $(shell date --iso-8601=seconds)
6 |
7 | # if you want to release to OBS, this must be a remotely available Git reference
8 | REVISION ?= $(shell git rev-parse --abbrev-ref HEAD)
9 |
10 | # we only use this to comply with RPM changelog conventions at SUSE
11 | AUTHOR ?= shap-staff@suse.de
12 |
13 | # you can customize any of the following to build forks
14 | OBS_PROJECT ?= network:ha-clustering:sap-deployments:devel
15 | REPOSITORY ?= SUSE/hanadb_exporter
16 |
17 | default: deps test
18 |
19 | deps:
20 | python -m pip install --upgrade pip
21 | pip install tox
22 |
23 | test:
24 | tox -e py
25 |
26 | test-all:
27 | tox
28 |
29 | static-checks:
30 | tox -e pylint
31 |
32 | checks: test static-checks
33 |
34 | coverage: tests/coverage.xml tests/htmlcov tests/.coverage
35 | tests/coverage.xml tests/htmlcov tests/.coverage:
36 | tox -e coverage
37 |
38 | clean:
39 | rm -rf .tox tests/{coverage.xml,.coverage,htmlcov} build
40 |
41 | exporter-obs-workdir: build/obs/prometheus-hanadb_exporter
42 | build/obs/prometheus-hanadb_exporter:
43 | @mkdir -p $@
44 | osc checkout $(OBS_PROJECT) prometheus-hanadb_exporter -o $@
45 | rm -f $@/*.tar.gz
46 | cp -rv packaging/obs/prometheus-hanadb_exporter/* $@/
47 | # we interpolate environment variables in OBS _service file so that we control what is downloaded by the tar_scm source service
48 | sed -i 's~%%VERSION%%~$(VERSION)~' $@/_service
49 | sed -i 's~%%REVISION%%~$(REVISION)~' $@/_service
50 | sed -i 's~%%REPOSITORY%%~$(REPOSITORY)~' $@/_service
51 | cd $@; osc service runall
52 |
53 | exporter-obs-changelog: exporter-obs-workdir
54 | .ci/gh_release_to_obs_changeset.py $(REPOSITORY) -a $(AUTHOR) -t $(REVISION) -f build/obs/prometheus-hanadb_exporter/prometheus-hanadb_exporter.changes
55 |
56 | exporter-obs-commit: exporter-obs-workdir
57 | cd build/obs/prometheus-hanadb_exporter; osc addremove
58 | cd build/obs/prometheus-hanadb_exporter; osc commit -m "Update from git rev $(REVISION)"
59 |
60 | dashboards-obs-workdir: build/obs/grafana-sap-hana-dashboards
61 | build/obs/grafana-sap-hana-dashboards:
62 | @mkdir -p $@
63 | osc checkout $(OBS_PROJECT) grafana-sap-hana-dashboards -o $@
64 | rm -f $@/*.tar.gz
65 | cp -rv packaging/obs/grafana-sap-hana-dashboards/* $@/
66 | # we interpolate environment variables in OBS _service file so that we control what is downloaded by the tar_scm source service
67 | sed -i 's~%%REVISION%%~$(REVISION)~' $@/_service
68 | sed -i 's~%%REPOSITORY%%~$(REPOSITORY)~' $@/_service
69 | cd $@; osc service runall
70 |
71 | dashboards-obs-commit: dashboards-obs-workdir
72 | cd build/obs/grafana-sap-hana-dashboards; osc addremove
73 | cd build/obs/grafana-sap-hana-dashboards; osc commit -m "Update from git rev $(REVISION)"
74 |
75 | .PHONY: checks clean coverage deps static-checks test test-all
76 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SAP HANA Database exporter
2 |
3 | [](https://github.com/SUSE/hanadb_exporter/actions?query=workflow%3A%22Exporter+CI%22)
4 | [](https://github.com/SUSE/hanadb_exporter/actions?query=workflow%3A%22Dashboards+CI%22)
5 |
6 | Prometheus exporter written in Python, to export SAP HANA database metrics. The
7 | project is based in the official prometheus exporter: [prometheus_client](https://github.com/prometheus/client_python).
8 |
9 | The exporter is able to export the metrics from more than 1 database/tenant if the `multi_tenant` option is enabled in the configuration file (enabled by default).
10 |
11 | The labels `sid` (system identifier), `insnr` (instance number), `database_name` (database name) and `host` (machine hostname) will be exported for all the metrics.
12 |
13 |
14 | ## Prerequisites
15 |
16 | 1. A running and reachable SAP HANA database (single or multi container). Running the exporter in the
17 | same machine where the HANA database is running is recommended. Ideally each database
18 | should be monitored by one exporter.
19 |
20 | 2. A SAP HANA Connector, for that, you have two options:
21 | - [`dbapi` (SAP/official)](https://help.sap.com/viewer/1efad1691c1f496b8b580064a6536c2d/Cloud/en-US/39eca89d94ca464ca52385ad50fc7dea.html)
22 | - [`pyhdb` (unofficial/open source)](https://github.com/SAP/PyHDB)
23 |
24 | The installation of the connector is covered in the [Installation](#installation) section.
25 |
26 | 3. Some metrics are collected on the HANA monitoring views by the [SAP Host agent](https://help.sap.com/saphelp_nwpi711/helpdata/en/21/98c443122744efae67c0352033691d/frameset.htm). Make sure to have it installed and running to have access to all the monitoring metrics.
27 |
28 |
29 | ## Metrics file
30 |
31 | The exporter uses an additional file to know the metrics that are going to be exported. Here more information about the [metrics file](./docs/METRICS.md).
32 |
33 | ## Installation
34 |
35 | The project can be installed in many ways, including but not limited to:
36 |
37 | 1. [RPM](#rpm)
38 | 2. [Manual clone](#manual-clone)
39 |
40 | ### RPM
41 |
42 | On openSUSE or SUSE Linux Enterprise use `zypper` package manager:
43 | ```shell
44 | zypper install prometheus-hanadb_exporter
45 | ```
46 |
47 | Find the latest development repositories at [SUSE's Open Build Service](https://build.opensuse.org/package/show/network:ha-clustering:sap-deployments:devel/prometheus-hanadb_exporter).
48 |
49 | ### Manual clone
50 |
51 | > The exporter is developed to be used with Python3.\
52 | > The usage of a virtual environment is recommended.
53 |
54 | ```
55 | git clone https://github.com/SUSE/hanadb_exporter
56 | cd hanadb_exporter # project root folder
57 | virtualenv virt
58 | source virt/bin/activate
59 | # uncomment one of the next two options (to use hdbcli, you will need to have the HANA client folder where this python package is available)
60 | # pip install pyhdb
61 | # pip install path-to-hdbcli-N.N.N.tar.gaz
62 | pip install .
63 | # pip install -e . # To install in development mode
64 | # deactivate # to exit from the virtualenv
65 | ```
66 |
67 | If you prefer, you can install the PyHDB SAP HANA connector as a RPM package doing (example for Tumbleweed, but available for other versions):
68 |
69 | ```
70 | # All the commands must be executed as root user
71 | zypper addrepo https://download.opensuse.org/repositories/network:/ha-clustering:/sap-deployments:/devel/openSUSE_Tumbleweed/network:ha-clustering:sap-deployments:devel.repo
72 | zypper ref
73 | zypper in python3-PyHDB
74 | ```
75 |
76 | ## Configuring the exporter
77 |
78 | Create the `config.json` configuration file.
79 | An example of `config.json` available in [config.json.example](config.json.example). Here the most
80 | important items in the configuration file:
81 | - `listen_address`: Address where the prometheus exporter will be exposed (0.0.0.0 by default).
82 | - `exposition_port`: Port where the prometheus exporter will be exposed (9968 by default).
83 | - `multi_tenant`: Export the metrics from other tenants. To use this the connection must be done with the System Database (port 30013).
84 | - `timeout`: Timeout to connect to the database. After this time the app will fail (even in daemon mode).
85 | - `hana.host`: Address of the SAP HANA database.
86 | - `hana.port`: Port where the SAP HANA database is exposed.
87 | - `hana.userkey`: Stored user key. This is the secure option if you don't want to have the password in the configuration file. The `userkey` and `user/password` are self exclusive being the first the default if both options are set.
88 | - `hana.user`: An existing user with access right to the SAP HANA database.
89 | - `hana.password`: Password of an existing user.
90 | - `hana.ssl`: Enable SSL connection (False by default). Only available for `dbapi` connector
91 | - `hana.ssl_validate_cert`: Enable SSL certification validation. This field is required by HANA cloud. Only available for `dbapi` connector
92 | - `hana.aws_secret_name`: The secret name containing the username and password. This is a secure option to use AWS secrets manager if SAP HANA database is stored on AWS. `aws_secret_name` and `user/password` are self exclusive, `aws_secret_name` is the default if both options are set.
93 | - `logging.config_file`: Python logging system configuration file (by default WARN and ERROR level messages will be sent to the syslog)
94 | - `logging.log_file`: Logging file (/var/log/hanadb_exporter.log by default)
95 |
96 | The logging configuration file follows the python standard logging system style: [Python logging](https://docs.python.org/3/library/logging.config.html).
97 |
98 | Using the default [configuration file](./logging_config.ini), it will redirect the logs to the file assigned in the [json configuration file](./config.json.example) and to the syslog (only logging level up to WARNING).
99 |
100 | ### Using the stored user key
101 |
102 | This is the recommended option if we want to keep the database secure (for development environments the `user/password` with `SYSTEM` user can be used as it's faster to setup).
103 | To use the `userkey` option the `dbapi` must be installed (usually stored in `/hana/shared/PRD/hdbclient/hdbcli-N.N.N.tar.gz` and installable with pip3).
104 | It cannot be used from other different client (the key is stored in the client itself). This will raise the `hdbcli.dbapi.Error: (-10104, 'Invalid value for KEY')` error.
105 | For that a new stored user key must be created with the user that is running python. For that (please, notice that the `hdbclient` is the same as the `dbapi` python package):
106 | ```
107 | /hana/shared/PRD/hdbclient/hdbuserstore set yourkey host:30013@SYSTEMDB hanadb_exporter pass
108 | ```
109 |
110 | ### Using AWS Secrets Manager
111 |
112 | If SAP HANA database is stored on AWS EC2 instance, this is a secure option to store the `user/password` without having them in the configuration file.
113 | To use this option:
114 | - Create a [secret](https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html) in key/value pairs format, specify Key `username` and then for Value enter the database user. Add a second Key `password` and then for Value enter the password.
115 | For the secret name, enter a name for your secret, and pass that name in the configuration file as a value for `aws_secret_name` item. Secret json example:
116 |
117 | ```
118 | {
119 | "username": "database_user",
120 | "password": "database_password"
121 | }
122 | ```
123 | - Allow read-only access from EC2 IAM role to the secret by attaching a [resource-based policy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html) to the secret. Policy Example:
124 | ```
125 | {
126 | "Version" : "2012-10-17",
127 | "Statement" : [
128 | {
129 | "Effect": "Allow",
130 | "Principal": {"AWS": "arn:aws:iam::123456789012:role/EC2RoleToAccessSecrets"},
131 | "Action": "secretsmanager:GetSecretValue",
132 | "Resource": "*",
133 | }
134 | ]
135 | }
136 | ```
137 |
138 |
139 |
140 | Some tips:
141 | - Set `SYSTEMDB` as default database, this way the exporter will know where to get the tenants data.
142 | - Don't use the stored user key created for the backup as this is created using the sidadm user.
143 | - The usage of a user with access only to the monitoring tables is recommended instead of using SYSTEM user.
144 | - If a user with monitoring role is used the user must exist in all the databases (SYSTEMDB+tenants).
145 |
146 | ### Create a new user with monitoring role
147 | Run the next commands to create a user with moniroting roles (**the commands must be executed in all the databases**):
148 | ```
149 | su - prdadm
150 | hdbsql -u SYSTEM -p pass -d SYSTEMDB #(PRD for the tenant in this example)
151 | CREATE USER HANADB_EXPORTER_USER PASSWORD MyExporterPassword NO FORCE_FIRST_PASSWORD_CHANGE;
152 | CREATE ROLE HANADB_EXPORTER_ROLE;
153 | GRANT MONITORING TO HANADB_EXPORTER_ROLE;
154 | GRANT HANADB_EXPORTER_ROLE TO HANADB_EXPORTER_USER;
155 | ```
156 |
157 | ## Running the exporter
158 |
159 | Start the exporter by running the following command:
160 | ```
161 | hanadb_exporter -c config.json -m metrics.json
162 | # Or
163 | python3 hanadb_exporter/main.py -c config.json -m metrics.json
164 | ```
165 |
166 | If a `config.json` configuration file is stored in `/etc/hanadb_exporter` the exporter can be started with the next command too:
167 | ```
168 | hanadb_exporter --identifier config # Notice that the identifier matches with the config file without extension
169 | ```
170 |
171 | ### Running as a daemon
172 |
173 | The hanadb_exporter can be executed using `systemd`. For that, the best option is to install the project using the rpm package as described in [Installation](#installation).
174 |
175 | After that we need to create the configuration file as `/etc/hanadb_exporter/my-exporter.json` (the name of the file is relevant as we will use it to start the daemon).
176 | The [config.json.example](./config.json.example) can be used as example (the example file is stored in `/usr/etc/hanadb_exporter` folder too).
177 |
178 | The default [metrics file](./metrics.json) is stored in `/usr/etc/hanadb_exporter/metrics.json`. If a new `metrics.json` is stored in `/etc/hanadb_exporter` this will be used.
179 |
180 | The logging configuration file can be updated as well to customize changing the new configuration file `logging.config_file` entry (default one available in `/usr/etc/hanadb_exporter/logging_config.ini`).
181 |
182 | Now, the exporter can be started as a daemon. As we can have multiple `hanadb_exporter` instances running in one machine, the service is created using a template file, so an extra information must be given to `systemd` (this is done adding the `@` keyword after the service name together with the name of the configuration file created previously in `/etc/hanadb_exporter/{name}.json`):
183 | ```
184 | # All the command must be executed as root user
185 | systemctl start prometheus-hanadb_exporter@my-exporter
186 | # Check the status with
187 | systemctl status prometheus-hanadb_exporter@my-exporter
188 | # Enable the exporter to be started at boot time
189 | systemctl enable prometheus-hanadb_exporter@my-exporter
190 | ```
191 |
192 | ## License
193 |
194 | See the [LICENSE](LICENSE) file for license rights and limitations.
195 |
196 | ## Authors
197 |
198 | - Kristoffer Gronlund (kgronlund@suse.com)
199 | - Xabier Arbulu Insausti (xarbulu@suse.com)
200 | - Ayoub Belarbi (abelarbi@suse.com)
201 | - Diego Akechi (dakechi@suse.com)
202 |
203 | ## Reviewers
204 |
205 | *Pull request* preferred reviewers for this project:
206 | - Kristoffer Gronlund (kgronlund@suse.com)
207 | - Xabier Arbulu Insausti (xarbulu@suse.com)
208 | - Ayoub Belarbi (abelarbi@suse.com)
209 |
210 | ## References
211 |
212 | https://prometheus.io/docs/instrumenting/writing_exporters/
213 |
214 | https://prometheus.io/docs/practices/naming/
215 |
216 | http://sap.optimieren.de/hana/hana/html/sys_statistics_views.html
217 |
218 | https://help.sap.com/viewer/1efad1691c1f496b8b580064a6536c2d/Cloud/en-US/39eca89d94ca464ca52385ad50fc7dea.html
219 |
--------------------------------------------------------------------------------
/bin/hanadb_exporter:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | hanadb_exporter command line execution file
4 | """
5 |
6 | from hanadb_exporter import main
7 |
8 | if __name__ == "__main__":
9 | main.run()
10 |
--------------------------------------------------------------------------------
/bin/supportconfig-hanadb_exporter:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -u
3 |
4 | # supportconfig plugin for hanadb_exporter
5 | #
6 | # v1.0
7 | #
8 | # February 2024 v1.0 first release
9 |
10 | SVER='1.0.0'
11 | TITLE="SUSE supportconfig plugin for hanadb_exporter"
12 |
13 | function display_package_info() {
14 | echo -e "\n#==[ Command ]======================================#"
15 | echo -e "# rpm -qi ${1}"
16 | rpm -qi "${1}"
17 |
18 | echo -e "\n#==[ Command ]======================================#"
19 | echo -e "# rpm -V ${1}"
20 | rpm -V "${1}"
21 | }
22 |
23 | function display_file_stat() {
24 | echo -e "\n#==[ File ]===========================#"
25 | echo -e "# ls -ld ${1} ; stat ${1} \n"
26 |
27 | if [ -e "${1}" ] ; then
28 | ls -ld "${1}"
29 | echo
30 | stat "${1}"
31 | else
32 | echo "${1} does not exist!"
33 | fi
34 | }
35 |
36 | function display_file() {
37 | echo -e "\n#==[ File Content ]===========================#"
38 | echo -e "# cat ${1}"
39 |
40 | if [ -e "${1}" ] ; then
41 | cat "${1}"
42 | else
43 | echo "${1} does not exist!"
44 | fi
45 | }
46 |
47 | function display_systemd_status() {
48 | echo -e "\n#==[ Command ]======================================#"
49 | echo -e "# systemctl status ${1}"
50 |
51 | systemctl status ''"${1}"'' 2>&1
52 | }
53 |
54 | function display_cmd() {
55 | ORG_CMDLINE="${@}"
56 | CMDBIN=${ORG_CMDLINE%% *}
57 | FULLCMD=$(\which $CMDBIN 2>/dev/null | awk '{print $1}')
58 | echo -e "\n#==[ Command ]======================================#"
59 | if [ -x "$FULLCMD" ]; then
60 | CMDLINE=$(echo $ORG_CMDLINE | sed -e "s!${CMDBIN}!${FULLCMD}!")
61 | echo -e "# $CMDLINE"
62 | echo "$CMDLINE" | bash
63 | else
64 | echo -e "# $ORG_CMDLINE"
65 | echo "Command not found or not executable"
66 | fi
67 | }
68 |
69 | function display_log() {
70 | local file
71 | echo -e "\n#==[ Log Files ]====================================#"
72 | for file in "${@}" ; do
73 | echo -e "\n# ${file}"
74 | SKIP_FILE=$(echo ${file} | egrep "tbz$|bz2$|gz$|zip$")
75 | if [ -n "$SKIP_FILE" ]; then
76 | echo -e "skipping..."
77 | continue
78 | fi
79 | cat "${file}"
80 | echo -e "######"
81 | done
82 | }
83 |
84 | # ---- Main ----
85 | echo -e "Supportconfig Plugin for $TITLE, v${SVER}"
86 |
87 | display_package_info prometheus-hanadb_exporter
88 | # use 'pattern' for systemctl status cmd
89 | display_systemd_status "*hanadb_exporter*"
90 |
91 | for file in /usr/etc/hanadb_exporter/* /etc/hanadb_exporter/*; do
92 | [ -e "${file}" ] && { display_file_stat "${file}" ; display_file "${file}" ; echo ; }
93 | done
94 |
95 | # default log file
96 | display_log /var/log/hanadb_exporter*
97 | # get logfile name from config file
98 | if [ -f /etc/hanadb_exporter/config.json ]; then
99 | logfile_from_config=$(sed -n 's%[[:blank:]]*"log_file":[[:blank:]]*"\(.*\)"%\1%p' /etc/hanadb_exporter/config.json)
100 | [[ "$logfile_from_config" != "/var/log/hanadb_exporter.log" ]] && [ -f "$logfile_from_config" ] && display_log $logfile_from_config
101 | fi
102 | # log entries from syslog
103 | display_cmd "grep -E -i 'hanadb_exporter\[.*\]:' /var/log/messages"
104 |
105 | display_cmd "ss -tulpan | grep exporter"
106 |
107 | # Bye.
108 | exit 0
109 |
--------------------------------------------------------------------------------
/config.json.example:
--------------------------------------------------------------------------------
1 | {
2 | "listen_address": "0.0.0.0",
3 | "exposition_port": 9668,
4 | "multi_tenant": true,
5 | "timeout": 30,
6 | "hana": {
7 | "host": "localhost",
8 | "port": 30013,
9 | "user": "SYSTEM",
10 | "password": "PASSWORD",
11 | "ssl": false,
12 | "ssl_validate_cert": false
13 | },
14 | "logging": {
15 | "config_file": "./logging_config.ini",
16 | "log_file": "hanadb_exporter.log"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/daemon/hanadb_exporter@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=SAP HANA database metrics exporter
3 | Documentation=https://github.com/SUSE/hanadb_exporter
4 |
5 | [Service]
6 | Type=notify
7 | ExecStart=/usr/bin/hanadb_exporter --identifier %i --daemon
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/dashboards/README.md:
--------------------------------------------------------------------------------
1 | # Grafana dashboards
2 |
3 | We provide dashboards for Grafana, leveraging the exporter.
4 |
5 |
6 | ## SAP HANA
7 |
8 | This dashboard shows the details of a SAP HANA instance.
9 |
10 | It supports both multiple instances and multi-tenancy.
11 |
12 | 
13 |
14 |
15 | ## Installation
16 |
17 | ### RPM
18 |
19 | On openSUSE and SUSE Linux Enterprise distributions, you can install the package via zypper in your Grafana host:
20 | ```
21 | zypper in grafana-sap-hana-dashboards
22 | systemctl restart grafana-server
23 | ```
24 |
25 | For the latest development version, please refer to the [development upstream project in OBS](https://build.opensuse.org/project/show/network:ha-clustering:sap-deployments:devel), which is automatically updated everytime we merge changes in this repository.
26 |
27 | ### Manual
28 |
29 | Copy the [provider configuration file](https://build.opensuse.org/package/view_file/network:ha-clustering:sap-deployments:devel/grafana-sap-providers/provider-sles4sap.yaml?expand=1) in `/etc/grafana/provisioning/dashboards` and then the JSON files inside `/var/lib/grafana/dashboards/sles4sap`.
30 |
31 | Once done, restart the Grafana server.
32 |
33 |
34 | ## Development notes
35 |
36 | - Please make sure the `version` field in the JSON is incremented just once per PR.
37 | - Unlike the exporter, OBS Submit Requests are not automated for the dashboard package.
38 | Once PRs are merged, you will have to manually perform a Submit Request against `openSUSE:Factory`, after updating the `version` field in the `_service` file and adding an entry to the `grafana-sap-hana-dashboards.changes` file.
39 |
--------------------------------------------------------------------------------
/dashboards/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/hanadb_exporter/4780a8a6f3509a0fd2658c423df39168271f7d20/dashboards/screenshot.png
--------------------------------------------------------------------------------
/docs/METRICS.md:
--------------------------------------------------------------------------------
1 | # Metrics file
2 |
3 | This document explains how to create/update the used metrics file in order to provide to the hanadb_exporter the required information to run the queries and export the data.
4 |
5 | # JSON format
6 |
7 | If the metrics file uses JSON format ([metrics.json](../metrics.json)) these are the available options
8 | to create the correct structure:
9 |
10 | Each entry in the JSON file is formed by a SAP HANA SQL query (key) and the metrics/additional information (value). The additional information is composed by:
11 |
12 | * `enabled (boolean, optional)`: If the query is executed or not (`true` by default is the `enabled` entry is not set). If set to `false` the metrics for this query won't be executed.
13 | * `hana_version_range (list, optional)`: The SAP HANA database versions range where the query is available (`[1.0.0]` by default). If the current database version is not inside the provided range, the query won't be executed. If the list has only one element, all versions beyond this value (this included) will be queried.
14 | * `metrics (list)`: A list of metrics for this query. Each metric will need the next information;
15 | * `name (str):`: The name used to export the metric.
16 | * `description (str)`: The description of the metric (available as `# HELP`).
17 | * `labels (list)`: List of labels used to split the records.
18 | * `value (str)`: The name of the column used to gather the exported value (must match with one of the columns of the query).
19 | * `unit (str):`: Used unit for the exported value (`mb` for example).
20 | * `type (enum{gauge})`: Type of the exported metric (available options: `gauge`).
21 |
22 | Here an example of a query and some metrics:
23 |
24 | ```
25 | {
26 | "SELECT TOP 10 host, LPAD(port, 5) port, SUBSTRING(REPLACE_REGEXPR('\n' IN statement_string WITH ' ' OCCURRENCE ALL), 1,30) sql_string, statement_hash sql_hash, execution_count, total_execution_time + total_preparation_time total_elapsed_time FROM sys.m_sql_plan_cache ORDER BY total_elapsed_time, execution_count DESC;":
27 | {
28 | "enabled": true,
29 | "hana_version_range": ["1.0"]
30 | "metrics": [
31 | {
32 | "name": "hanadb_sql_top_time_consumers",
33 | "description": "Top statements time consumers. Sum of the time consumed in all executions in Microseconds",
34 | "labels": ["HOST", "PORT", "SQL_STRING", "SQL_HASH"],
35 | "value": "TOTAL_ELAPSED_TIME",
36 | "unit": "mu",
37 | "type": "gauge"
38 | },
39 | {
40 | "name": "hanadb_sql_top_time_consumers",
41 | "description": "Top statements time consumers. Number of total executions of the SQL Statement",
42 | "labels": ["HOST", "PORT", "SQL_STRING", "SQL_HASH"],
43 | "value": "EXECUTION_COUNT",
44 | "unit": "count",
45 | "type": "gauge"
46 | }
47 | ]
48 | }
49 | }
50 | ```
51 |
--------------------------------------------------------------------------------
/hanadb_exporter/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | SAP HANA database data exporter
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2019-05-09
9 | """
10 |
11 | __version__ = "0.7.4"
12 |
--------------------------------------------------------------------------------
/hanadb_exporter/db_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | SAP HANA database manager
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2019-10-24
9 | """
10 |
11 | import logging
12 | import time
13 |
14 | from shaptools import hdb_connector
15 | from hanadb_exporter import utils
16 |
17 | try:
18 | import certifi
19 | CERTIFI_INSTALLED = True
20 | except ImportError:
21 | CERTIFI_INSTALLED = False
22 |
23 | RECONNECTION_INTERVAL = 15
24 |
25 |
26 | class UserKeyNotSupportedError(ValueError):
27 | """
28 | User key not supported error
29 | """
30 |
31 |
32 | class DatabaseManager(object):
33 | """
34 | Manage the connection to a multi container HANA system
35 | """
36 |
37 | TENANT_DATA_QUERY =\
38 | """SELECT DATABASE_NAME,SQL_PORT FROM SYS_DATABASES.M_SERVICES
39 | WHERE COORDINATOR_TYPE='MASTER' AND SQL_PORT<>0"""
40 |
41 | def __init__(self):
42 | self._logger = logging.getLogger(__name__)
43 | self._system_db_connector = hdb_connector.HdbConnector()
44 | self._db_connectors = []
45 |
46 | def _get_tenants_port(self):
47 | """
48 | Get tenants port
49 | """
50 | data = self._system_db_connector.query(self.TENANT_DATA_QUERY)
51 | formatted_data = utils.format_query_result(data)
52 | for tenant_data in formatted_data:
53 | if tenant_data['DATABASE_NAME'] != 'SYSTEMDB':
54 | yield tenant_data['DATABASE_NAME'], int(tenant_data['SQL_PORT'])
55 |
56 | def _connect_tenants(self, host, connection_data):
57 | """
58 | Connect to the tenants
59 |
60 | Args:
61 | host (str): Host of the HANA database
62 | connection_data (dict): Data retrieved from _get_connection_data
63 | """
64 | for database, tenant_port in self._get_tenants_port():
65 | conn = hdb_connector.HdbConnector()
66 | # If userkey is used database name must be added to connect to tenants
67 | if connection_data.get('userkey'):
68 | connection_data['databaseName'] = database
69 | try:
70 | conn.connect(
71 | host, tenant_port, **connection_data)
72 | self._db_connectors.append(conn)
73 | self._logger.info('Connected succesfully to TENANT database %s', database)
74 | except hdb_connector.connectors.base_connector.ConnectionError as err:
75 | self._logger.warn(
76 | 'Could not connect to TENANT database %s with error: %s', database, str(err))
77 |
78 | def _get_connection_data(self, userkey, user, password, **kwargs):
79 | """
80 | Check that provided user data is valid. user/password pair or userkey must be provided
81 | """
82 | if userkey:
83 | if hdb_connector.API == 'pyhdb':
84 | raise UserKeyNotSupportedError(
85 | 'userkey usage is not supported with pyhdb connector, hdbcli must be installed')
86 | self._logger.info('stored user key %s will be used to connect to the database', userkey)
87 | if user or password:
88 | self._logger.warn(
89 | 'userkey will be used to create the connection. user/password are omitted')
90 | elif user and password:
91 | self._logger.info('user/password combination will be used to connect to the database')
92 | else:
93 | raise ValueError(
94 | 'Provided user data is not valid. userkey or user/password pair must be provided')
95 |
96 | ssl = kwargs.get('ssl', False)
97 | if ssl:
98 | self._logger.info('Using ssl connection...')
99 |
100 | if ssl and CERTIFI_INSTALLED:
101 | trust_store = certifi.where()
102 | elif ssl:
103 | self._logger.warn('certifi package is not installed. Using the default ssl pem key...')
104 |
105 | return {
106 | 'userkey': userkey,
107 | 'user': user,
108 | 'password': password,
109 | 'RECONNECT': 'FALSE',
110 | 'encrypt': ssl,
111 | 'sslValidateCertificate': kwargs.get('ssl_validate_cert', False) if ssl else False,
112 | 'sslTrustStore': trust_store if ssl and CERTIFI_INSTALLED else None
113 | }
114 |
115 | def start(self, host, port, **kwargs):
116 | """
117 | Start de database manager. This will open a connection with the System database and
118 | retrieve the current environemtn tenant databases data
119 |
120 | Args:
121 | host (str): Host of the HANA database
122 | port (int): Port of the System database (3XX13 when XX is the instance number)
123 | userkey (str): User stored key
124 | user (str): System database user name (SYSTEM usually)
125 | password (str): System database user password
126 | multi_tenant (bool): Connect to all tenants checking the data in the System database
127 | timeout (int, opt): Timeout in seconds to connect to the System database
128 | ssl (bool, opt): Enable SSL connection
129 | ssl_validate_cert (bool, opt): Validate SSL certificate. Required in HANA cloud
130 | """
131 | connection_data = self._get_connection_data(
132 | kwargs.get('userkey', None),
133 | kwargs.get('user', ''),
134 | kwargs.get('password', ''),
135 | ssl=kwargs.get('ssl', False),
136 | ssl_validate_cert=kwargs.get('ssl_validate_cert', False)
137 | )
138 |
139 | current_time = time.time()
140 | timeout = current_time + kwargs.get('timeout', 600)
141 | while current_time <= timeout:
142 | try:
143 | # parameters are passed using kwargs to the connect method
144 | # pyhdb only uses 'user' and `password`
145 | # dbapi uses 'user', 'password', 'userkey' and other optional params
146 | self._system_db_connector.connect(host, port, **connection_data)
147 | self._db_connectors.append(self._system_db_connector)
148 | break
149 | except hdb_connector.connectors.base_connector.ConnectionError as err:
150 | self._logger.error(
151 | 'the connection to the system database failed. error message: %s', str(err))
152 | # This conditions is used to stop the exporter if the provided userkey is not valid
153 | if 'Invalid value for KEY' in str(err):
154 | raise hdb_connector.connectors.base_connector.ConnectionError(
155 | 'provided userkey is not valid. Check if dbapi is installed correctly')
156 | time.sleep(RECONNECTION_INTERVAL)
157 | current_time = time.time()
158 | else:
159 | raise hdb_connector.connectors.base_connector.ConnectionError(
160 | 'timeout reached connecting the System database')
161 |
162 | if kwargs.get('multi_tenant', True):
163 | self._connect_tenants(host, connection_data)
164 |
165 | def get_connectors(self):
166 | """
167 | Get the connectors
168 | """
169 | return self._db_connectors
170 |
--------------------------------------------------------------------------------
/hanadb_exporter/main.py:
--------------------------------------------------------------------------------
1 | """
2 | SAP HANA database prometheus data exporter app
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2019-05-09
9 | """
10 |
11 | import sys
12 | import os
13 | import traceback
14 | import logging
15 | from logging.config import fileConfig
16 | import time
17 | import json
18 | import argparse
19 |
20 | from prometheus_client.core import REGISTRY
21 | from prometheus_client import start_http_server
22 |
23 | from hanadb_exporter import __version__
24 | from hanadb_exporter import prometheus_exporter
25 | from hanadb_exporter import db_manager
26 | from hanadb_exporter import utils
27 | from hanadb_exporter import secrets_manager
28 |
29 | LOGGER = logging.getLogger(__name__)
30 | # in new systems /etc/ folder is not used in favor of /usr/etc
31 | CONFIG_FILES_DIR = [
32 | '/etc/hanadb_exporter/',
33 | '/usr/etc/hanadb_exporter/'
34 | ]
35 | METRICS_FILES = [
36 | '/etc/hanadb_exporter/metrics.json',
37 | '/usr/etc/hanadb_exporter/metrics.json'
38 | ]
39 |
40 | def parse_config(config_file):
41 | """
42 | Parse config file
43 | """
44 | with open(config_file, 'r') as f_ptr:
45 | json_data = json.load(f_ptr)
46 | return json_data
47 |
48 |
49 | def parse_arguments():
50 | """
51 | Parse command line arguments
52 | """
53 | parser = argparse.ArgumentParser()
54 | parser.add_argument(
55 | "-c", "--config", help="Path to hanadb_exporter configuration file")
56 | parser.add_argument(
57 | "-m", "--metrics", help="Path to hanadb_exporter metrics file")
58 | parser.add_argument(
59 | "-d", "--daemon", action="store_true",
60 | help="Start the exporter as a systemd daemon. Only used when the the application "\
61 | "is managed by systemd")
62 | parser.add_argument(
63 | "--identifier", help="Identifier of the configuration file from /etc/hanadb_exporter")
64 | parser.add_argument(
65 | "-v", "--verbosity",
66 | help="Python logging level. Options: DEBUG, INFO, WARN, ERROR (INFO by default)")
67 | parser.add_argument(
68 | "-V", "--version", action="store_true",
69 | help="Print the hanadb_exporter version")
70 | args = parser.parse_args()
71 | return args
72 |
73 |
74 | def setup_logging(config):
75 | """
76 | Setup logging system
77 | """
78 | hana_config = config.get('hana')
79 | sufix = 'hanadb_exporter_{}_{}'.format(hana_config.get('host'), hana_config.get('port', 30015))
80 | log_file = config.get('logging').get('log_file', '/var/log/{}'.format(sufix))
81 |
82 | fileConfig(config.get('logging').get('config_file'), defaults={'logfilename': log_file})
83 |
84 | # The next method is used to recatch and raise all
85 | # exceptions to redirect them to the logging system
86 | def handle_exception(*exc_info): # pragma: no cover
87 | """
88 | Catch exceptions to log them
89 | """
90 | text = ''.join(traceback.format_exception(*exc_info))
91 | logging.getLogger('hanadb_exporter').error(text)
92 |
93 | sys.excepthook = handle_exception
94 |
95 |
96 | def lookup_etc_folder(config_files_path):
97 | """
98 | Find predefined files in default locations (METRICS and CONFIG folder)
99 | This is used mainly because /etc location changed to /usr/etc in new systems
100 | return full filename path (e.g: /etc/hanadb_exporter/filename.json)
101 | """
102 | for conf_file in config_files_path:
103 | if os.path.isfile(conf_file):
104 | return conf_file
105 | raise ValueError(
106 | 'configuration file does not exist in {}'.format(",".join(config_files_path)))
107 |
108 | # Start up the server to expose the metrics.
109 | def run():
110 | """
111 | Main execution
112 | """
113 | args = parse_arguments()
114 | if args.version:
115 | # pylint:disable=C0325
116 | print("hanadb_exporter %s" % (__version__))
117 | return
118 | if args.config is not None:
119 | config = parse_config(args.config)
120 | elif args.identifier is not None:
121 | file_name = args.identifier + '.json'
122 | # determine if file is /etc or /usr/etc
123 | config_file = lookup_etc_folder([dirname + file_name for dirname in CONFIG_FILES_DIR])
124 | config = parse_config(config_file)
125 |
126 | else:
127 | raise ValueError('configuration file or identifier must be used')
128 |
129 | if config.get('logging', None):
130 | setup_logging(config)
131 | else:
132 | logging.basicConfig(level=args.verbosity or logging.INFO)
133 |
134 | if args.metrics:
135 | metrics = args.metrics
136 | else:
137 | metrics = lookup_etc_folder(METRICS_FILES)
138 |
139 | try:
140 | hana_config = config['hana']
141 | dbs = db_manager.DatabaseManager()
142 | user = hana_config.get('user', '')
143 | password = hana_config.get('password', '')
144 | userkey = hana_config.get('userkey', None)
145 | aws_secret_name = hana_config.get('aws_secret_name', '')
146 |
147 | if aws_secret_name:
148 | LOGGER.info(
149 | 'AWS secret name is going to be used to read the database username and password')
150 | db_credentials = secrets_manager.get_db_credentials(aws_secret_name)
151 | user = db_credentials["username"]
152 | password = db_credentials["password"]
153 |
154 | dbs.start(
155 | hana_config['host'], hana_config.get('port', 30013),
156 | user=user,
157 | password=password,
158 | userkey=userkey,
159 | multi_tenant=config.get('multi_tenant', True),
160 | timeout=config.get('timeout', 30),
161 | ssl=hana_config.get('ssl', False),
162 | ssl_validate_cert=hana_config.get('ssl_validate_cert', False))
163 | except KeyError as err:
164 | raise KeyError('Configuration file {} is malformed: {} not found'.format(args.config, err))
165 |
166 | if args.daemon:
167 | utils.systemd_ready()
168 |
169 | connectors = dbs.get_connectors()
170 | collector = prometheus_exporter.SapHanaCollectors(connectors=connectors, metrics_file=metrics)
171 | REGISTRY.register(collector)
172 | LOGGER.info('exporter successfully registered')
173 |
174 | LOGGER.info('starting to serve metrics')
175 | start_http_server(config.get('exposition_port', 9668), config.get('listen_address', '0.0.0.0'))
176 | while True:
177 | time.sleep(1)
178 |
179 | if __name__ == "__main__": # pragma: no cover
180 | run()
181 |
--------------------------------------------------------------------------------
/hanadb_exporter/prometheus_exporter.py:
--------------------------------------------------------------------------------
1 | """
2 | SAP HANA database prometheus data exporter
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2019-05-09
9 | """
10 |
11 | import logging
12 |
13 | from prometheus_client import core
14 | from shaptools import hdb_connector
15 | from hanadb_exporter import prometheus_metrics
16 | from hanadb_exporter import utils
17 |
18 |
19 | class SapHanaCollectors(object):
20 | """
21 | SAP HANA database data exporter using multiple db connectors
22 | """
23 |
24 | def __init__(self, connectors, metrics_file):
25 | self._logger = logging.getLogger(__name__)
26 | self._collectors = []
27 | for connector in connectors:
28 | collector = SapHanaCollector(connector, metrics_file)
29 | self._collectors.append(collector)
30 |
31 | def collect(self):
32 | """
33 | Collect metrics for each collector
34 | """
35 | for collector in self._collectors:
36 | for metric in collector.collect():
37 | yield metric
38 |
39 |
40 | class SapHanaCollector(object):
41 | """
42 | SAP HANA database data exporter
43 | """
44 |
45 | METADATA_LABEL_HEADERS = ['sid', 'insnr', 'database_name']
46 |
47 | def __init__(self, connector, metrics_file):
48 | self._logger = logging.getLogger(__name__)
49 | self._hdb_connector = connector
50 | # metrics_config contains the configuration api/json data
51 | self._metrics_config = prometheus_metrics.PrometheusMetrics(metrics_file)
52 | self.retrieve_metadata()
53 |
54 | @property
55 | def metadata_labels(self):
56 | """
57 | Get metadata labels data
58 | """
59 | return [self._sid, self._insnr, self._database_name]
60 |
61 | def retrieve_metadata(self):
62 | """
63 | Retrieve database metadata: sid, instance number, database name and hana version
64 | """
65 | query = \
66 | """SELECT
67 | (SELECT value
68 | FROM M_SYSTEM_OVERVIEW
69 | WHERE section = 'System'
70 | AND name = 'Instance ID') SID,
71 | (SELECT value
72 | FROM M_SYSTEM_OVERVIEW
73 | WHERE section = 'System'
74 | AND name = 'Instance Number') INSNR,
75 | m.database_name,
76 | m.version
77 | FROM m_database m;"""
78 |
79 | self._logger.info('Querying database metadata...')
80 | query_result = self._hdb_connector.query(query)
81 | formatted_result = utils.format_query_result(query_result)[0]
82 | self._hana_version = formatted_result['VERSION']
83 | self._sid = formatted_result['SID']
84 | self._insnr = formatted_result['INSNR']
85 | self._database_name = formatted_result['DATABASE_NAME']
86 | self._logger.info(
87 | 'Metadata retrieved. version: %s, sid: %s, insnr: %s, database: %s',
88 | self._hana_version, self._sid, self._insnr, self._database_name)
89 |
90 | def _manage_gauge(self, metric, formatted_query_result):
91 | """
92 | Manage Gauge type metric:
93 | metric is the json.file object for example
94 | parse a SQL query and fullfill(formatted_query_result) the metric object from prometheus
95 |
96 | Args:
97 | metric (dict): a dictionary containing information about the metric
98 | formatted_query_result (nested list): query formated by _format_query_result method
99 | """
100 | # Add sid, insnr and database_name labels
101 | combined_label_headers = self.METADATA_LABEL_HEADERS + metric.labels
102 | metric_obj = core.GaugeMetricFamily(
103 | metric.name, metric.description, None, combined_label_headers, metric.unit)
104 | for row in formatted_query_result:
105 | labels = []
106 | metric_value = None
107 | for column_name, column_value in row.items():
108 | try:
109 | labels.insert(metric.labels.index(column_name.lower()), column_value)
110 | except ValueError: # Received data is not a label, check for the lowercased value
111 | if column_name.lower() == metric.value.lower():
112 | metric_value = column_value
113 | if metric_value is None:
114 | self._logger.warn(
115 | 'Specified value in metrics.json for metric "%s": (%s) not found or it is '\
116 | 'invalid (None) in the query result',
117 | metric.name, metric.value)
118 | continue
119 | elif len(labels) != len(metric.labels):
120 | # Log when a label(s) specified in metrics.json is not found in the query result
121 | self._logger.warn(
122 | 'One or more label(s) specified in metrics.json '
123 | 'for metric "%s" that are not found in the query result',
124 | metric.name)
125 | continue
126 | else:
127 | # Add sid, insnr and database_name labels
128 | combined_labels = self.metadata_labels + labels
129 | metric_obj.add_metric(combined_labels, metric_value)
130 | self._logger.debug('%s \n', metric_obj.samples)
131 | return metric_obj
132 |
133 | def reconnect(self):
134 | """
135 | Reconnect if needed and retrieve new metadata
136 |
137 | hdb_connector reconnect already checks if the connection is working, but we need to
138 | recheck to run the retrieve_metadata method to update some possible changes
139 | """
140 | if not self._hdb_connector.isconnected():
141 | self._hdb_connector.reconnect()
142 | self.retrieve_metadata()
143 |
144 | def collect(self):
145 | """
146 | execute db queries defined by metrics_config/api file, and store them in
147 | a prometheus metric_object, which will be served over http for scraping e.g gauge, etc.
148 | """
149 | # Try to reconnect if the connection is lost. It will raise an exception is case of error
150 | self.reconnect()
151 |
152 | for query in self._metrics_config.queries:
153 | if not query.enabled:
154 | self._logger.info('Query %s is disabled', query.query)
155 | elif not utils.check_hana_range(self._hana_version, query.hana_version_range):
156 | self._logger.info('Query %s out of the provided hana version range: %s',
157 | query.query, query.hana_version_range)
158 | else:
159 | try:
160 | query_result = self._hdb_connector.query(query.query)
161 | except hdb_connector.connectors.base_connector.QueryError as err:
162 | self._logger.error('Failure in query: %s, skipping...', query.query)
163 | self._logger.error(str(err))
164 | continue # Moving to the next iteration (query)
165 | formatted_query_result = utils.format_query_result(query_result)
166 | if not formatted_query_result:
167 | self._logger.warning(
168 | 'Query %s ... has not returned any record', query.query)
169 | continue
170 | for metric in query.metrics:
171 | if metric.type == "gauge":
172 | try:
173 | metric_obj = self._manage_gauge(metric, formatted_query_result)
174 | except ValueError as err:
175 | self._logger.error(str(err))
176 | # If an a ValueError exception is caught, skip the metric and go on to
177 | # complete the rest of the loop
178 | continue
179 | else:
180 | raise NotImplementedError('{} type not implemented'.format(metric.type))
181 | yield metric_obj
182 |
--------------------------------------------------------------------------------
/hanadb_exporter/prometheus_metrics.py:
--------------------------------------------------------------------------------
1 | """
2 | SAP HANA database prometheus data exporter metrics
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2019-05-09
9 | """
10 |
11 | import logging
12 | import collections
13 | import json
14 |
15 |
16 | METRICMODEL = collections.namedtuple(
17 | 'Metric',
18 | 'name description labels value unit type enabled hana_version_range'
19 | )
20 |
21 |
22 | class Metric(METRICMODEL):
23 | """
24 | store loaded prometheus metrics from the config/api (structure inherited from namedtuple)
25 | """
26 |
27 | # pylint:disable=R0913
28 | # pylint:disable=W0622
29 | def __new__(cls, name, description, labels, value, unit, type,
30 | enabled=True, hana_version_range=None):
31 | if not value:
32 | raise ValueError('No value specified in metrics.json for {}'.format(name))
33 | if not hana_version_range:
34 | hana_version_range = ['1.0.0']
35 | # lowercase labels and values
36 | labels = [label.lower() for label in labels]
37 | value = value.lower()
38 | return super(Metric, cls).__new__(cls, name, description, labels, value,
39 | unit, type, enabled, hana_version_range)
40 |
41 |
42 | class Query(object):
43 | """
44 | Class to store the query and its metrics
45 | """
46 |
47 | def __init__(self):
48 | self.query = None
49 | self.metrics = []
50 | self.enabled = True
51 | self.hana_version_range = ['1.0.0']
52 |
53 | def parse(self, query, query_data):
54 | """
55 | Parse metrics by query
56 | """
57 | self.query = query
58 | self.metrics = []
59 | self.enabled = query_data.get('enabled', True)
60 | self.hana_version_range = query_data.get('hana_version', ['1.0.0'])
61 | for metric in query_data['metrics']:
62 | modeled_data = Metric(**metric)
63 | self.metrics.append(modeled_data)
64 |
65 | @classmethod
66 | def get_model(cls, query, metrics):
67 | """
68 | Get metric model data
69 | """
70 | modeled_query = cls()
71 | modeled_query.parse(query, metrics)
72 | return modeled_query
73 |
74 |
75 | class PrometheusMetrics(object):
76 | """
77 | Class to store the metrics data
78 | """
79 |
80 | def __init__(self, metrics_file):
81 | self.queries = self.load_metrics(metrics_file)
82 |
83 | @classmethod
84 | def load_metrics(cls, metrics_file):
85 | """
86 | Load metrics file as json
87 | """
88 | logger = logging.getLogger(__name__)
89 | queries = []
90 | with open(metrics_file, 'r') as file_ptr:
91 | data = json.load(file_ptr)
92 |
93 | try:
94 | for query, query_data in data.items():
95 | modeled_query = Query.get_model(query, query_data)
96 | queries.append(modeled_query)
97 | except TypeError as err:
98 | logger.error('Malformed %s file in query %s ...', metrics_file, query[:50])
99 | logger.error(str(err))
100 | raise
101 |
102 | return queries
103 |
--------------------------------------------------------------------------------
/hanadb_exporter/secrets_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for exporters/secrets_manager.py.
3 |
4 | :author: elturkym, schniber
5 |
6 | :since: 2021-07-15
7 | """
8 |
9 | import json
10 | import logging
11 |
12 | import boto3
13 | import requests
14 | from botocore.exceptions import ClientError
15 | from requests.exceptions import HTTPError
16 |
17 | EC2_INFO_URL = "http://169.254.169.254/latest/dynamic/instance-identity/document"
18 | TOKEN_URL = "http://169.254.169.254/latest/api/token"
19 | TOKEN_HEADER = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"}
20 | LOGGER = logging.getLogger(__name__)
21 |
22 |
23 | class SecretsManagerError(ValueError):
24 | """
25 | Unable to retrieve secret details
26 | """
27 |
28 |
29 | def get_db_credentials(secret_name):
30 | LOGGER.info("retrieving AWS secret details")
31 |
32 | ec2_info_response = requests.get(EC2_INFO_URL)
33 |
34 | # In case the EC2 instance is making use of IMDSv2, calls to the EC2 instance
35 | # metadata data service will return 401 Unauthorized HTTP Return code.
36 | # In this case, python catches the error, generates an authentication token
37 | # before attempting the call to the EC2 instance metadata service again using
38 | # the IMDSv2 token authentication header.
39 | if ec2_info_response.status_code == 401:
40 | try:
41 | ec2_metadata_service_token = requests.put(
42 | TOKEN_URL, headers=TOKEN_HEADER
43 | ).content
44 | except HTTPError as e:
45 | raise SecretsManagerError("EC2 instance metadata service request failed") from e
46 |
47 | ec2_info_response = requests.get(
48 | EC2_INFO_URL,
49 | headers={"X-aws-ec2-metadata-token": ec2_metadata_service_token},
50 | )
51 |
52 | try:
53 | ec2_info_response.raise_for_status()
54 | except HTTPError as e:
55 | raise SecretsManagerError("EC2 information request failed") from e
56 |
57 | ec2_info = ec2_info_response.json()
58 | region_name = ec2_info["region"]
59 |
60 | # Create a Secrets Manager client
61 | session = boto3.session.Session()
62 | client = session.client(
63 | service_name='secretsmanager',
64 | region_name=region_name
65 | )
66 |
67 | # In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
68 | # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
69 | # We rethrow the exception by default.
70 |
71 | try:
72 | get_secret_value_response = client.get_secret_value(
73 | SecretId=secret_name
74 | )
75 | except ClientError as e:
76 | raise SecretsManagerError("Couldn't retrieve secret details") from e
77 | else:
78 | # Decrypts secret using the associated KMS CMK.]
79 | secret = get_secret_value_response['SecretString']
80 | return json.loads(secret)
81 |
--------------------------------------------------------------------------------
/hanadb_exporter/utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Generic methods
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2019-07-04
9 | """
10 |
11 | import socket
12 | import os
13 |
14 | from distutils import version
15 |
16 | class NotSystemdException(Exception):
17 | """
18 | The exporter is not running as systemd daemon
19 | """
20 |
21 | # TODO: this method could go in shaptools itself, providing the query return formatted if
22 | # it is requested (returning a list of dictionaries like this method)
23 | def format_query_result(query_result):
24 | """
25 | Format query results to match column names with their values for each row
26 | Returns: list containing a dictionaries (column_name, value)
27 |
28 | Args:
29 | query_result (obj): QueryResult object
30 | """
31 | formatted_query_result = []
32 | query_columns = [meta[0] for meta in query_result.metadata]
33 | for record in query_result.records:
34 | record_data = {}
35 | for index, record_item in enumerate(record):
36 | record_data[query_columns[index]] = record_item
37 |
38 | formatted_query_result.append(record_data)
39 | return formatted_query_result
40 |
41 |
42 | def check_hana_range(hana_version, availability_range):
43 | """
44 | Check if the current hana version is inside the available range
45 |
46 | Args:
47 | hana_version (str): Current hana version
48 | availability_range (list): List with one or two elements definining the
49 | available hana versions
50 |
51 | Returns:
52 | bool: True if the current hana version is inside the availability range
53 | """
54 |
55 | if len(availability_range) == 1:
56 | return version.LooseVersion(hana_version) >= version.LooseVersion(availability_range[0])
57 | elif len(availability_range) == 2:
58 | return version.LooseVersion(hana_version) >= version.LooseVersion(availability_range[0]) \
59 | and version.LooseVersion(hana_version) <= version.LooseVersion(availability_range[1])
60 | raise ValueError('provided availability range does not have the correct number of elements')
61 |
62 | def systemd_ready():
63 | """
64 | Notify the systemd deamon that the service is READY
65 | """
66 | soc = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
67 | addr = os.getenv('NOTIFY_SOCKET')
68 | if not addr:
69 | raise NotSystemdException("Exporter is not running as systemd deamon")
70 |
71 | if addr[0] == '@':
72 | addr = '\0' + addr[1:]
73 | soc.connect(addr)
74 | soc.sendall(b'READY=1')
75 |
--------------------------------------------------------------------------------
/logging_config.ini:
--------------------------------------------------------------------------------
1 | # More configuraiton file format information in: https://docs.python.org/3/library/logging.config.html
2 | [loggers]
3 | keys=root,fileLogger,sysLogger
4 |
5 | [handlers]
6 | keys=fileHandler,sysHandler,stdoHandler
7 |
8 | [formatters]
9 | keys=customFormatter
10 |
11 | [logger_root]
12 | level=INFO
13 | handlers=fileHandler,sysHandler,stdoHandler
14 |
15 | # File logger configuration, used to redirect logs to a file (/var/log/hanadb_exporter.log by default)
16 | [logger_fileLogger]
17 | level=WARNING
18 | handlers=fileHandler
19 | qualname=shaptools,hanadb_exporter
20 | propagate=0
21 |
22 | # Logger to configure the syslog output
23 | [logger_sysLogger]
24 | level=WARNING
25 | handlers=sysHandler,stdoHandler
26 | qualname=shaptools,hanadb_exporter
27 | propagate=0
28 |
29 | # Handlers associated to the loggers
30 | [handler_fileHandler]
31 | class=FileHandler
32 | level=WARNING
33 | formatter=customFormatter
34 | args=('%(logfilename)s', 'a')
35 |
36 | [handler_sysHandler]
37 | class=handlers.SysLogHandler
38 | level=WARNING
39 | formatter=customFormatter
40 | args=('/dev/log',)
41 |
42 | # standard output handler
43 | [handler_stdoHandler]
44 | class=StreamHandler
45 | level=WARNING
46 | formatter=customFormatter
47 | args=(sys.stdout,)
48 |
49 | # formatters
50 |
51 | # this print timestamp also
52 | [formatter_customFormatter]
53 | format=%(asctime)s %(levelname)s %(name)s %(message)s
54 |
--------------------------------------------------------------------------------
/metrics.json:
--------------------------------------------------------------------------------
1 | {
2 | "SELECT host, ROUND(SUM(memory_size_in_total)/1024/1024) column_tables_used_mb FROM sys.m_cs_tables GROUP BY host;":
3 | {
4 | "enabled": true,
5 | "hana_version_range": ["1.0.0", "3.0.0"],
6 | "metrics": [
7 | {
8 | "name": "hanadb_column_tables_used_memory",
9 | "description": "Column tables total memory used in MB",
10 | "labels": ["HOST"],
11 | "value": "COLUMN_TABLES_USED_MB",
12 | "unit": "mb",
13 | "type": "gauge"
14 | }
15 | ]
16 | },
17 | "SELECT host, schema_name, ROUND(SUM(memory_size_in_total)/1024/1024) schema_memory_used_mb FROM sys.m_cs_tables GROUP BY host, schema_name;":
18 | {
19 | "enabled": true,
20 | "hana_version_range": ["1.0.0"],
21 | "metrics": [
22 | {
23 | "name": "hanadb_schema_used_memory",
24 | "description": "Total used memory by schema in MB",
25 | "labels": ["HOST", "SCHEMA_NAME"],
26 | "value": "SCHEMA_MEMORY_USED_MB",
27 | "unit": "mb",
28 | "type": "gauge"
29 | }
30 | ]
31 | },
32 | "SELECT MAX(TIMESTAMP) TIMESTAMP, HOST, MEASURED_ELEMENT_NAME CORE, SUM(MAP(CAPTION, 'User Time', TO_NUMBER(VALUE), 0)) USER_PCT, SUM(MAP(CAPTION, 'System Time', TO_NUMBER(VALUE), 0)) SYSTEM_PCT, SUM(MAP(CAPTION, 'Wait Time', TO_NUMBER(VALUE), 0)) WAITIO_PCT, SUM(MAP(CAPTION, 'Idle Time', 0, TO_NUMBER(VALUE))) BUSY_PCT, SUM(MAP(CAPTION, 'Idle Time', TO_NUMBER(VALUE), 0)) IDLE_PCT FROM sys.M_HOST_AGENT_METRICS WHERE MEASURED_ELEMENT_TYPE = 'Processor' GROUP BY HOST, MEASURED_ELEMENT_NAME;":
33 | {
34 | "enabled": true,
35 | "metrics": [
36 | {
37 | "name": "hanadb_cpu_user",
38 | "description": "Percentage of CPU time spent by HANA DB in user space, over the last minute (in seconds)",
39 | "labels": ["HOST", "CORE"],
40 | "value": "USER_PCT",
41 | "unit": "percent",
42 | "type": "gauge"
43 | },
44 | {
45 | "name": "hanadb_cpu_system",
46 | "description": "Percentage of CPU time spent by HANA DB in Kernel space, over the last minute (in seconds)",
47 | "labels": ["HOST", "CORE"],
48 | "value": "SYSTEM_PCT",
49 | "unit": "percent",
50 | "type": "gauge"
51 | },
52 | {
53 | "name": "hanadb_cpu_waitio",
54 | "description": "Percentage of CPU time spent by HANA DB in IO mode, over the last minute (in seconds)",
55 | "labels": ["HOST", "CORE"],
56 | "value": "WAITIO_PCT",
57 | "unit": "percent",
58 | "type": "gauge"
59 | },
60 | {
61 | "name": "hanadb_cpu_busy",
62 | "description": "Percentage of CPU time spent by HANA DB, over the last minute (in seconds)",
63 | "labels": ["HOST", "CORE"],
64 | "value": "BUSY_PCT",
65 | "unit": "percent",
66 | "type": "gauge"
67 | },
68 | {
69 | "name": "hanadb_cpu_idle",
70 | "description": "Percentage of CPU time not spent by HANA DB, over the last minute (in seconds)",
71 | "labels": ["HOST", "CORE"],
72 | "value": "IDLE_PCT",
73 | "unit": "percent",
74 | "type": "gauge"
75 | }
76 | ]
77 | },
78 | "SELECT MAX(timestamp) timestamp, host, measured_element_name interface, MAX(MAP(caption, 'Collision Rate', TO_NUMBER(value), 0)) coll_per_s, MAX(MAP(caption, 'Receive Rate', TO_NUMBER(value), 0)) recv_kb_per_s, MAX(MAP(caption, 'Transmit Rate', TO_NUMBER(value), 0)) trans_kb_per_s,MAX(MAP(caption, 'Packet Receive Rate', TO_NUMBER(value), 0)) recv_pack_per_s, MAX(MAP(caption, 'Packet Transmit Rate', TO_NUMBER(value), 0)) trans_pack_per_s, MAX(MAP(caption, 'Receive Error Rate', TO_NUMBER(value), 0)) recv_err_per_s, MAX(MAP(caption, 'Transmit Error Rate', TO_NUMBER(value), 0)) trans_err_per_s FROM sys.m_host_agent_metrics WHERE measured_element_type = 'NetworkPort' GROUP BY host, measured_element_name;":
79 | {
80 | "enabled": true,
81 | "metrics": [
82 | {
83 | "name": "hanadb_network_collisions_per",
84 | "description": "Network collisions per second",
85 | "labels": ["HOST", "INTERFACE"],
86 | "value": "COLL_PER_S",
87 | "unit": "seconds",
88 | "type": "gauge"
89 | },
90 | {
91 | "name": "hanadb_network_receive_rate_kb_per",
92 | "description": "Network received rate in kilobytes per seconds",
93 | "labels": ["HOST", "INTERFACE"],
94 | "value": "RECV_KB_PER_S",
95 | "unit": "seconds",
96 | "type": "gauge"
97 | },
98 | {
99 | "name": "hanadb_network_transmission_rate_kb_per",
100 | "description": "Network transmission rate in kilobytes per seconds",
101 | "labels": ["HOST", "INTERFACE"],
102 | "value": "TRANS_KB_PER_S",
103 | "unit": "seconds",
104 | "type": "gauge"
105 | },
106 | {
107 | "name": "hanadb_network_receive_requests_per",
108 | "description": "Network receive rate in requests (packets) per seconds",
109 | "labels": ["HOST", "INTERFACE"],
110 | "value": "RECV_PACK_PER_S",
111 | "unit": "seconds",
112 | "type": "gauge"
113 | },
114 | {
115 | "name": "hanadb_network_transmission_rate_requests_per",
116 | "description": "Network transmission rate in requests (packets) per seconds",
117 | "labels": ["HOST", "INTERFACE"],
118 | "value": "TRANS_PACK_PER_S",
119 | "unit": "seconds",
120 | "type": "gauge"
121 | },
122 | {
123 | "name": "hanadb_network_receive_rate_errors_per",
124 | "description": "Network receive rate of errors per seconds",
125 | "labels": ["HOST", "INTERFACE"],
126 | "value": "RECV_ERR_PER_S",
127 | "unit": "seconds",
128 | "type": "gauge"
129 | },
130 | {
131 | "name": "hanadb_network_transmission_rate_errors_per",
132 | "description": "Network tramission rate of errors per seconds",
133 | "labels": ["HOST", "INTERFACE"],
134 | "value": "TRANS_ERR_PER_S",
135 | "unit": "seconds",
136 | "type": "gauge"
137 | }
138 | ]
139 | },
140 | "SELECT host, LPAD(port,5) port, file_name, file_type, used_size/1024/1024 used_size_mb, total_size/1024/1024 total_size_mb, (total_size - used_size)/1024/1024 available_size_mb, LPAD(TO_DECIMAL(MAP(total_size, 0, 0, ( 1 - used_size / total_size ) * 100), 10, 2), 8) frag_pct FROM sys.m_volume_files WHERE file_type = 'DATA';":
141 | {
142 | "enabled": true,
143 | "metrics": [
144 | {
145 | "name": "hanadb_disk_data_files_used_size",
146 | "description": "Used space per file and file type (in MB)",
147 | "labels": ["HOST", "PORT", "FILE_NAME", "FILE_TYPE"],
148 | "value": "USED_SIZE_MB",
149 | "unit": "mb",
150 | "type": "gauge"
151 | },
152 | {
153 | "name": "hanadb_disk_data_files_total_size",
154 | "description": "Total space per file and file type (in MB)",
155 | "labels": ["HOST", "PORT", "FILE_NAME", "FILE_TYPE"],
156 | "value": "TOTAL_SIZE_MB",
157 | "unit": "mb",
158 | "type": "gauge"
159 | },
160 | {
161 | "name": "hanadb_disk_data_files_available_size",
162 | "description": "Available space per file and file type (in MB)",
163 | "labels": ["HOST", "PORT", "FILE_NAME", "FILE_TYPE"],
164 | "value": "AVAILABLE_SIZE_MB",
165 | "unit": "mb",
166 | "type": "gauge"
167 | },
168 | {
169 | "name": "hanadb_disk_data_files_fragmentation",
170 | "description": "Percentage of unused space compared to total space in file",
171 | "labels": ["HOST", "PORT", "FILE_NAME", "FILE_TYPE"],
172 | "value": "FRAG_PCT",
173 | "unit": "percent",
174 | "type": "gauge"
175 | }
176 | ]
177 | },
178 | "SELECT md.host, md.usage_type, md.path, md.filesystem_type, TO_DECIMAL(md.total_device_size / 1024 / 1024, 10, 2) total_device_size_mb, TO_DECIMAL(md.total_size / 1024 / 1024, 10, 2) total_size_mb, TO_DECIMAL(md.used_size / 1024 / 1024, 10, 2) total_used_size_mb, TO_DECIMAL(du.used_size / 1024 / 1024, 10, 2) used_size_mb FROM sys.m_disk_usage du, sys.m_disks md WHERE du.host = md.host AND du.usage_type = md.usage_type;":
179 | {
180 | "enabled": true,
181 | "metrics": [
182 | {
183 | "name": "hanadb_disk_total_device_size",
184 | "description": "Total device size returned by the operating system (in MB). It will be repeated if the device is shared between usages_types.",
185 | "labels": ["HOST", "USAGE_TYPE", "PATH", "FILESYSTEM_TYPE"],
186 | "value": "TOTAL_DEVICE_SIZE_MB",
187 | "unit": "mb",
188 | "type": "gauge"
189 | },
190 | {
191 | "name": "hanadb_disk_total_size",
192 | "description": "Specifies the volume size in MB. It will be repeated if the volume is shared between usages_types.",
193 | "labels": ["HOST", "USAGE_TYPE", "PATH", "FILESYSTEM_TYPE"],
194 | "value": "TOTAL_SIZE_MB",
195 | "unit": "mb",
196 | "type": "gauge"
197 | },
198 | {
199 | "name": "hanadb_disk_total_used_size",
200 | "description": "Specifies the used volume size in MB. It will be repeated if the volume is shared between usages_types.",
201 | "labels": ["HOST", "USAGE_TYPE", "PATH", "FILESYSTEM_TYPE"],
202 | "value": "TOTAL_USED_SIZE_MB",
203 | "unit": "mb",
204 | "type": "gauge"
205 | },
206 | {
207 | "name": "hanadb_disk_used_size",
208 | "description": "Size of used disk space in MB based on usage_type",
209 | "labels": ["HOST", "USAGE_TYPE", "PATH", "FILESYSTEM_TYPE"],
210 | "value": "USED_SIZE_MB",
211 | "unit": "mb",
212 | "type": "gauge"
213 | }
214 | ]
215 | },
216 | "SELECT host, disk, queue_length, srv_ms + wait_ms latency_ms, srv_ms, wait_ms, io_per_s, tp_kbps FROM( SELECT MAX(TIMESTAMP) timestamp, host, measured_element_name disk, MAX(MAP(caption, 'Queue Length', TO_NUMBER(value), 0)) queue_length, MAX(MAP(caption, 'Service Time', TO_NUMBER(value), 0)) srv_ms, MAX(MAP(caption, 'Wait Time', TO_NUMBER(value), 0)) wait_ms, MAX(MAP(caption, 'I/O Rate', TO_NUMBER(value), 0)) io_per_s, MAX(MAP(caption, 'Total Throughput', TO_NUMBER(value), 0)) tp_kbps FROM sys.m_host_agent_metrics WHERE measured_element_type = 'Disk' GROUP BY host, measured_element_name);":
217 | {
218 | "enabled": true,
219 | "metrics": [
220 | {
221 | "name": "hanadb_disk_io_queue_length",
222 | "description": "I/O queue length (average queue length of the requests that were issued to the Disk Device)",
223 | "labels": ["HOST", "DISK"],
224 | "value": "QUEUE_LENGTH",
225 | "unit": "requests",
226 | "type": "gauge"
227 | },
228 | {
229 | "name": "hanadb_disk_io_latency",
230 | "description": "Total I/O latency (ms), calculated from the sum of service and wait time",
231 | "labels": ["HOST", "DISK"],
232 | "value": "LATENCY_MS",
233 | "unit": "ms",
234 | "type": "gauge"
235 | },
236 | {
237 | "name": "hanadb_disk_io_service_time",
238 | "description": "I/O service time in ms",
239 | "labels": ["HOST", "DISK"],
240 | "value": "SRV_MS",
241 | "unit": "ms",
242 | "type": "gauge"
243 | },
244 | {
245 | "name": "hanadb_disk_io_wait_time",
246 | "description": "I/O service time in ms",
247 | "labels": ["HOST", "DISK"],
248 | "value": "WAIT_MS",
249 | "unit": "ms",
250 | "type": "gauge"
251 | },
252 | {
253 | "name": "hanadb_disk_io_requests_per",
254 | "description": "I/O requests per second",
255 | "labels": ["HOST", "DISK"],
256 | "value": "IO_PER_S",
257 | "unit": "second",
258 | "type": "gauge"
259 | },
260 | {
261 | "name": "hanadb_disk_io_throughput",
262 | "description": "I/O throughput in KB/seconds",
263 | "labels": ["HOST", "DISK"],
264 | "value": "TP_KBPS",
265 | "unit": "kb_second",
266 | "type": "gauge"
267 | }
268 | ]
269 | },
270 | "SELECT m.host, LPAD(m.port, 5) port, m.service_name service, TO_DECIMAL(m.shared_memory_allocated_size / 1024 / 1024, 10, 2) shm_alloc_mb, TO_DECIMAL(m.shared_memory_used_size / 1024 / 1024, 10, 2) shm_used_mb, TO_DECIMAL(MAP(m.shared_memory_allocated_size, 0, 0, m.shared_memory_used_size / m.shared_memory_allocated_size * 100), 10, 2) shm_used_pct, TO_DECIMAL(m.heap_memory_allocated_size / 1024 / 1024, 10, 2) heap_alloc_mb, TO_DECIMAL(m.heap_memory_used_size / 1024 / 1024, 10, 2) heap_used_mb, TO_DECIMAL(MAP(m.heap_memory_allocated_size, 0, 0, m.heap_memory_used_size / m.heap_memory_allocated_size * 100), 10, 2) heap_used_pct, TO_DECIMAL(m.total_memory_used_size / 1024 / 1024, 10, 2) total_memory_used_mb, TO_DECIMAL(m.physical_memory_size / 1024 / 1024, 10, 2) total_phys_mem_mb, TO_DECIMAL(m.logical_memory_size / 1024 / 1024, 10, 2) total_logical_mem_mb, TO_DECIMAL(m.code_size / 1024 / 1024, 10, 2) code_size_mem_mb, TO_DECIMAL(m.stack_size / 1024 / 1024, 10, 2) stack_size_mem_mb, TO_DECIMAL(m.compactors_freeable_size / 1024 / 1024, 10, 2) compactors_freeable_size_mem_mb, TO_DECIMAL(m.compactors_allocated_size / 1024 / 1024, 10, 2) compactors_allocated_size_mem_mb, TO_DECIMAL(m.allocation_limit / 1024 / 1024, 10, 2) process_alloc_limit_mb, TO_DECIMAL(m.effective_allocation_limit / 1024 / 1024, 10, 2) effective_proc_alloc_limit_mb FROM sys.m_service_memory m;":
271 | {
272 | "enabled": true,
273 | "metrics": [
274 | {
275 | "name": "hanadb_memory_service_shared_allocated",
276 | "description": "Allocated Shared Memory part of the memory pool per service in MB",
277 | "labels": ["HOST", "PORT", "SERVICE"],
278 | "value": "SHM_ALLOC_MB",
279 | "unit": "mb",
280 | "type": "gauge"
281 | },
282 | {
283 | "name": "hanadb_memory_service_shared_used",
284 | "description": "Used Shared Memory part of the memory pool per service in MB",
285 | "labels": ["HOST", "PORT", "SERVICE"],
286 | "value": "SHM_USED_MB",
287 | "unit": "mb",
288 | "type": "gauge"
289 | },
290 | {
291 | "name": "hanadb_memory_service_shared_used",
292 | "description": "Percentage of used Shared Memory from the memory pool per service",
293 | "labels": ["HOST", "PORT", "SERVICE"],
294 | "value": "SHM_USED_PCT",
295 | "unit": "percent",
296 | "type": "gauge"
297 | },
298 | {
299 | "name": "hanadb_memory_service_heap_allocated",
300 | "description": "Allocated Heap Memory part of the memory pool per service in MB",
301 | "labels": ["HOST", "PORT", "SERVICE"],
302 | "value": "HEAP_ALLOC_MB",
303 | "unit": "mb",
304 | "type": "gauge"
305 | },
306 | {
307 | "name": "hanadb_memory_service_heap_used",
308 | "description": "Used Heap Memory part of the memory pool per service in MB",
309 | "labels": ["HOST", "PORT", "SERVICE"],
310 | "value": "HEAP_USED_MB",
311 | "unit": "mb",
312 | "type": "gauge"
313 | },
314 | {
315 | "name": "hanadb_memory_service_heap_used",
316 | "description": "Percentage of used Heap Memory part of the memory pool per service",
317 | "labels": ["HOST", "PORT", "SERVICE"],
318 | "value": "HEAP_USED_PCT",
319 | "unit": "percent",
320 | "type": "gauge"
321 | },
322 | {
323 | "name": "hanadb_memory_service_total_used",
324 | "description": "Total memory from the memory pool used per services in MB",
325 | "labels": ["HOST", "PORT", "SERVICE"],
326 | "value": "TOTAL_MEMORY_USED_MB",
327 | "unit": "mb",
328 | "type": "gauge"
329 | },
330 | {
331 | "name": "hanadb_memory_service_physical_total",
332 | "description": "Total physical resident memory size (operating system perspective) per service in MB",
333 | "labels": ["HOST", "PORT", "SERVICE"],
334 | "value": "TOTAL_PHYS_MEM_MB",
335 | "unit": "mb",
336 | "type": "gauge"
337 | },
338 | {
339 | "name": "hanadb_memory_service_virtual_total",
340 | "description": "Total Virtual memory size (operating system perspective) per service in MB",
341 | "labels": ["HOST", "PORT", "SERVICE"],
342 | "value": "TOTAL_LOGICAL_MEM_MB",
343 | "unit": "mb",
344 | "type": "gauge"
345 | },
346 | {
347 | "name": "hanadb_memory_service_code_size",
348 | "description": "Code size, including shared libraries, per service in MB",
349 | "labels": ["HOST", "PORT", "SERVICE"],
350 | "value": "CODE_SIZE_MEM_MB",
351 | "unit": "mb",
352 | "type": "gauge"
353 | },
354 | {
355 | "name": "hanadb_memory_service_stack_size",
356 | "description": "Stack size per service in MB",
357 | "labels": ["HOST", "PORT", "SERVICE"],
358 | "value": "STACK_SIZE_MEM_MB",
359 | "unit": "mb",
360 | "type": "gauge"
361 | },
362 | {
363 | "name": "hanadb_memory_service_compactors_freeable_size",
364 | "description": "Memory that can be freed during a memory shortage per service in MB",
365 | "labels": ["HOST", "PORT", "SERVICE"],
366 | "value": "COMPACTORS_FREEABLE_SIZE_MEM_MB",
367 | "unit": "mb",
368 | "type": "gauge"
369 | },
370 | {
371 | "name": "hanadb_memory_service_compactors_allocated_size",
372 | "description": "Part of the memory pool that can potentially (if unpinned) be freed during a memory shortage per service in MB",
373 | "labels": ["HOST", "PORT", "SERVICE"],
374 | "value": "COMPACTORS_ALLOCATED_SIZE_MEM_MB",
375 | "unit": "mb",
376 | "type": "gauge"
377 | },
378 | {
379 | "name": "hanadb_memory_service_alloc_limit",
380 | "description": "Maximum memory pool size (configurable value) per service in MB",
381 | "labels": ["HOST", "PORT", "SERVICE"],
382 | "value": "PROCESS_ALLOC_LIMIT_MB",
383 | "unit": "mb",
384 | "type": "gauge"
385 | },
386 | {
387 | "name": "hanadb_memory_service_effective_alloc_limit",
388 | "description": "Effective maximum memory pool size, considering the pool sizes of other processes (computed value) per service in MB",
389 | "labels": ["HOST", "PORT", "SERVICE"],
390 | "value": "PROCESS_ALLOC_LIMIT_MB",
391 | "unit": "mb",
392 | "type": "gauge"
393 | }
394 | ]
395 | },
396 | "SELECT host, ROUND((used_physical_memory + free_physical_memory) / 1024 / 1024, 2) host_physical_mem_mb, ROUND(used_physical_memory / 1024 / 1024, 2) host_resident_mem_mb, ROUND(free_physical_memory / 1024 / 1024, 2) host_free_physical_mem_mb, ROUND(free_swap_space / 1024 / 1024, 2) host_free_swap_mb, ROUND(used_swap_space / 1024 / 1024, 2) host_used_swap_mb, ROUND(allocation_limit / 1024 / 1024, 2) host_alloc_limit_mb, ROUND(instance_total_memory_used_size / 1024 / 1024, 2) host_total_used_mem_mb, ROUND(instance_total_memory_peak_used_size / 1024 / 1024, 2) host_total_peak_used_mem_mb, ROUND(instance_total_memory_allocated_size / 1024 / 1024, 2) host_total_alloc_mem_mb, ROUND(instance_code_size / 1024 / 1024, 2) host_code_size_mb, ROUND(instance_shared_memory_allocated_size / 1024 / 1024, 2) host_shr_mem_alloc_mb FROM sys.m_host_resource_utilization;":
397 | {
398 | "enabled": true,
399 | "metrics": [
400 | {
401 | "name": "hanadb_host_memory_physical_total",
402 | "description": "Total physical memory per host in MB",
403 | "labels": ["HOST"],
404 | "value": "HOST_PHYSICAL_MEM_MB",
405 | "unit": "mb",
406 | "type": "gauge"
407 | },
408 | {
409 | "name": "hanadb_host_memory_resident",
410 | "description": "Total resident (used physical) memory per host in MB",
411 | "labels": ["HOST"],
412 | "value": "HOST_RESIDENT_MEM_MB",
413 | "unit": "mb",
414 | "type": "gauge"
415 | },
416 | {
417 | "name": "hanadb_host_memory_physical_free",
418 | "description": "Free physical memory per host in MB",
419 | "labels": ["HOST"],
420 | "value": "HOST_FREE_PHYSICAL_MEM_MB",
421 | "unit": "mb",
422 | "type": "gauge"
423 | },
424 | {
425 | "name": "hanadb_host_memory_swap_free",
426 | "description": "Free swap memory per host in MB",
427 | "labels": ["HOST"],
428 | "value": "HOST_FREE_SWAP_MB",
429 | "unit": "mb",
430 | "type": "gauge"
431 | },
432 | {
433 | "name": "hanadb_host_memory_swap_used",
434 | "description": "Used swap memory per host in MB",
435 | "labels": ["HOST"],
436 | "value": "HOST_USED_SWAP_MB",
437 | "unit": "mb",
438 | "type": "gauge"
439 | },
440 | {
441 | "name": "hanadb_host_memory_alloc_limit",
442 | "description": "Memory allocation limit for all process per host in MB",
443 | "labels": ["HOST"],
444 | "value": "HOST_ALLOC_LIMIT_MB",
445 | "unit": "mb",
446 | "type": "gauge"
447 | },
448 | {
449 | "name": "hanadb_host_memory_used_total",
450 | "description": "Amount of memory from the memory pool that is currently being used by SAP HANA processes per host in MB",
451 | "labels": ["HOST"],
452 | "value": "HOST_TOTAL_USED_MEM_MB",
453 | "unit": "mb",
454 | "type": "gauge"
455 | },
456 | {
457 | "name": "hanadb_host_memory_used_peak",
458 | "description": "Peak memory from the memory pool used by SAP HANA processes since the instance started (this is a sample-based value) per host in MB",
459 | "labels": ["HOST"],
460 | "value": "HOST_TOTAL_PEAK_USED_MEM_MB",
461 | "unit": "mb",
462 | "type": "gauge"
463 | },
464 | {
465 | "name": "hanadb_host_memory_pool_size",
466 | "description": "Size of the memory pool for all SAP HANA processes per host in MB",
467 | "labels": ["HOST"],
468 | "value": "HOST_TOTAL_ALLOC_MEM_MB",
469 | "unit": "mb",
470 | "type": "gauge"
471 | },
472 | {
473 | "name": "hanadb_host_memory_code_size",
474 | "description": "Code size, including shared libraries of SAP HANA processes per host in MB",
475 | "labels": ["HOST"],
476 | "value": "HOST_CODE_SIZE_MB",
477 | "unit": "mb",
478 | "type": "gauge"
479 | },
480 | {
481 | "name": "hanadb_host_memory_shared_alloc",
482 | "description": "Shared memory size of SAP HANA processes per host in MB",
483 | "labels": ["HOST"],
484 | "value": "HOST_SHR_MEM_ALLOC_MB",
485 | "unit": "mb",
486 | "type": "gauge"
487 | }
488 | ]
489 | },
490 | "SELECT HOST, LPAD(PORT, 5) PORT, SERVICE_NAME SERVICE, SQL_TYPE, EXECUTIONS EXECUTIONS, ROUND(ELAPSED_MS) ELAPSED_MS, TO_DECIMAL(ELA_PER_EXEC_MS, 10, 2) ELA_PER_EXEC_MS, TO_DECIMAL(LOCK_PER_EXEC_MS, 10, 2) LOCK_PER_EXEC_MS, ROUND(MAX_ELA_MS) MAX_ELA_MS FROM( SELECT S.HOST, S.PORT, S.SERVICE_NAME, L.SQL_TYPE, CASE L.SQL_TYPE WHEN 'SELECT' THEN SUM(C.SELECT_EXECUTION_COUNT) WHEN 'SELECT FOR UPDATE' THEN SUM(C.SELECT_FOR_UPDATE_COUNT) WHEN 'INSERT/UPDATE/DELETE' THEN SUM(C.UPDATE_COUNT) WHEN 'READ ONLY TRANSACTION' THEN SUM(C.READ_ONLY_TRANSACTION_COUNT) WHEN 'UPDATE TRANSACTION' THEN SUM(C.UPDATE_TRANSACTION_COUNT) WHEN 'ROLLBACK' THEN SUM(C.ROLLBACK_COUNT) WHEN 'OTHERS' THEN SUM(C.OTHERS_COUNT) WHEN 'PREPARE' THEN SUM(C.TOTAL_PREPARATION_COUNT) END EXECUTIONS, CASE L.SQL_TYPE WHEN 'SELECT' THEN SUM(C.SELECT_TOTAL_EXECUTION_TIME) / 1000 WHEN 'SELECT FOR UPDATE' THEN SUM(C.SELECT_FOR_UPDATE_TOTAL_EXECUTION_TIME) / 1000 WHEN 'INSERT/UPDATE/DELETE' THEN SUM(C.UPDATE_TOTAL_EXECUTION_TIME) / 1000 WHEN 'READ ONLY TRANSACTION' THEN SUM(C.READ_ONLY_TRANSACTION_TOTAL_EXECUTION_TIME) / 1000 WHEN 'UPDATE TRANSACTION' THEN SUM(C.UPDATE_TRANSACTION_TOTAL_EXECUTION_TIME) / 1000 WHEN 'ROLLBACK' THEN SUM(C.ROLLBACK_TOTAL_EXECUTION_TIME) / 1000 WHEN 'OTHERS' THEN SUM(C.OTHERS_TOTAL_EXECUTION_TIME) / 1000 WHEN 'PREPARE' THEN SUM(C.TOTAL_PREPARATION_TIME) / 1000 END ELAPSED_MS, CASE L.SQL_TYPE WHEN 'SELECT' THEN MAP(SUM(C.SELECT_EXECUTION_COUNT), 0, 0, SUM(C.SELECT_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.SELECT_EXECUTION_COUNT)) WHEN 'SELECT FOR UPDATE' THEN MAP(SUM(C.SELECT_FOR_UPDATE_COUNT), 0, 0, SUM(C.SELECT_FOR_UPDATE_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.SELECT_FOR_UPDATE_COUNT)) WHEN 'INSERT/UPDATE/DELETE' THEN MAP(SUM(C.UPDATE_COUNT), 0, 0, SUM(C.UPDATE_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.UPDATE_COUNT)) WHEN 'READ ONLY TRANSACTION' THEN MAP(SUM(C.READ_ONLY_TRANSACTION_COUNT), 0, 0, SUM(C.READ_ONLY_TRANSACTION_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.READ_ONLY_TRANSACTION_COUNT)) WHEN 'UPDATE TRANSACTION' THEN MAP(SUM(C.UPDATE_TRANSACTION_COUNT), 0, 0, SUM(C.UPDATE_TRANSACTION_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.UPDATE_TRANSACTION_COUNT)) WHEN 'ROLLBACK' THEN MAP(SUM(C.ROLLBACK_COUNT), 0, 0, SUM(C.ROLLBACK_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.ROLLBACK_COUNT)) WHEN 'OTHERS' THEN MAP(SUM(C.OTHERS_COUNT), 0, 0, SUM(C.OTHERS_TOTAL_EXECUTION_TIME) / 1000 / SUM(C.OTHERS_COUNT)) WHEN 'PREPARE' THEN MAP(SUM(C.TOTAL_PREPARATION_COUNT), 0, 0, SUM(C.TOTAL_PREPARATION_TIME) / 1000 / SUM(C.TOTAL_PREPARATION_COUNT)) END ELA_PER_EXEC_MS, CASE L.SQL_TYPE WHEN 'SELECT' THEN 0 WHEN 'SELECT FOR UPDATE' THEN MAP(SUM(C.SELECT_FOR_UPDATE_COUNT), 0, 0, SUM(C.SELECT_FOR_UPDATE_TOTAL_LOCK_WAIT_TIME) / 1000 / SUM(C.SELECT_FOR_UPDATE_COUNT)) WHEN 'INSERT/UPDATE/DELETE' THEN MAP(SUM(C.UPDATE_COUNT), 0, 0, SUM(C.UPDATE_TOTAL_LOCK_WAIT_TIME) / 1000 / SUM(C.UPDATE_COUNT)) WHEN 'READ ONLY TRANSACTION' THEN 0 WHEN 'UPDATE TRANSACTION' THEN 0 WHEN 'ROLLBACK' THEN 0 WHEN 'OTHERS' THEN MAP(SUM(C.OTHERS_COUNT), 0, 0, SUM(C.OTHERS_TOTAL_LOCK_WAIT_TIME) / 1000 / SUM(C.OTHERS_COUNT)) WHEN 'PREPARE' THEN 0 END LOCK_PER_EXEC_MS, CASE L.SQL_TYPE WHEN 'SELECT' THEN MAX(C.SELECT_MAX_EXECUTION_TIME) / 1000 WHEN 'SELECT FOR UPDATE' THEN MAX(C.SELECT_FOR_UPDATE_MAX_EXECUTION_TIME) / 1000 WHEN 'INSERT/UPDATE/DELETE' THEN MAX(C.UPDATE_MAX_EXECUTION_TIME) / 1000 WHEN 'READ ONLY TRANSACTION' THEN MAX(C.READ_ONLY_TRANSACTION_MAX_EXECUTION_TIME) / 1000 WHEN 'UPDATE TRANSACTION' THEN MAX(C.UPDATE_TRANSACTION_MAX_EXECUTION_TIME) / 1000 WHEN 'ROLLBACK' THEN MAX(C.ROLLBACK_MAX_EXECUTION_TIME) / 1000 WHEN 'OTHERS' THEN MAX(C.OTHERS_MAX_EXECUTION_TIME) / 1000 WHEN 'PREPARE' THEN MAX(C.MAX_PREPARATION_TIME) / 1000 END MAX_ELA_MS FROM SYS.M_SERVICES S, ( SELECT 1 LINE_NO, 'SELECT' SQL_TYPE FROM DUMMY UNION ALL ( SELECT 2, 'SELECT FOR UPDATE' FROM DUMMY ) UNION ALL ( SELECT 3, 'INSERT/UPDATE/DELETE' FROM DUMMY ) UNION ALL ( SELECT 4, 'READ ONLY TRANSACTION' FROM DUMMY ) UNION ALL ( SELECT 5, 'UPDATE TRANSACTION' FROM DUMMY ) UNION ALL ( SELECT 6, 'ROLLBACK' FROM DUMMY ) UNION ALL ( SELECT 7, 'OTHERS' FROM DUMMY ) UNION ALL ( SELECT 8, 'PREPARE' FROM DUMMY ) ) L, SYS.M_CONNECTION_STATISTICS C WHERE C.HOST = S.HOST AND C.PORT = S.PORT GROUP BY S.HOST, S.PORT, S.SERVICE_NAME, L.SQL_TYPE, L.LINE_NO);":
491 | {
492 | "enabled": true,
493 | "metrics": [
494 | {
495 | "name": "hanadb_sql_service_executions",
496 | "description": "Total number of SQL statements executions by service and SQL type",
497 | "labels": ["HOST", "PORT", "SERVICE", "SQL_TYPE"],
498 | "value": "EXECUTIONS",
499 | "unit": "count",
500 | "type": "gauge"
501 | },
502 | {
503 | "name": "hanadb_sql_service_elapsed_time",
504 | "description": "Total elapsed time of SQL statements executions by service and SQL type in miliseconds",
505 | "labels": ["HOST", "PORT", "SERVICE", "SQL_TYPE"],
506 | "value": "ELAPSED_MS",
507 | "unit": "ms",
508 | "type": "gauge"
509 | },
510 | {
511 | "name": "hanadb_sql_service_elap_per_exec_avg",
512 | "description": "Average elapsed time per execution by service and SQL type in miliseconds",
513 | "labels": ["HOST", "PORT", "SERVICE", "SQL_TYPE"],
514 | "value": "ELA_PER_EXEC_MS",
515 | "unit": "ms",
516 | "type": "gauge"
517 | },
518 | {
519 | "name": "hanadb_sql_service_lock_per_exec",
520 | "description": "Average lock wait time per execution by service and SQL type in miliseconds",
521 | "labels": ["HOST", "PORT", "SERVICE", "SQL_TYPE"],
522 | "value": "LOCK_PER_EXEC_MS",
523 | "unit": "ms",
524 | "type": "gauge"
525 | },
526 | {
527 | "name": "hanadb_sql_service_max_ela_time",
528 | "description": "Maximum elapsed time per execution by service and SQL type in miliseconds",
529 | "labels": ["HOST", "PORT", "SERVICE", "SQL_TYPE"],
530 | "value": "MAX_ELA_MS",
531 | "unit": "ms",
532 | "type": "gauge"
533 | }
534 | ]
535 | },
536 | "SELECT TOP 10 host, LPAD(port, 5) port, SUBSTRING(REPLACE_REGEXPR('\n' IN statement_string WITH ' ' OCCURRENCE ALL), 1,30) sql_string, statement_hash sql_hash, execution_count, total_execution_time + total_preparation_time total_elapsed_time FROM sys.m_sql_plan_cache ORDER BY total_elapsed_time, execution_count DESC;":
537 | {
538 | "enabled": true,
539 | "metrics": [
540 | {
541 | "name": "hanadb_sql_top_time_consumers",
542 | "description": "Top statements time consumers. Sum of the time consumed in all executions in Microseconds",
543 | "labels": ["HOST", "PORT", "SQL_STRING", "SQL_HASH"],
544 | "value": "TOTAL_ELAPSED_TIME",
545 | "unit": "mu",
546 | "type": "gauge"
547 | },
548 | {
549 | "name": "hanadb_sql_top_time_consumers",
550 | "description": "Top statements time consumers. Number of total executions of the SQL Statement",
551 | "labels": ["HOST", "PORT", "SQL_STRING", "SQL_HASH"],
552 | "value": "EXECUTION_COUNT",
553 | "unit": "count",
554 | "type": "gauge"
555 | }
556 | ]
557 | },
558 | "SELECT TOP 10 host, LPAD(port, 5) port, SUBSTRING(REPLACE_REGEXPR('\n' IN statement_string WITH ' ' OCCURRENCE ALL), 1,30) sql_string, statement_hash sql_hash, execution_count, total_execution_memory_size FROM sys.m_sql_plan_cache ORDER BY total_execution_memory_size, execution_count DESC;":
559 | {
560 | "enabled": true,
561 | "metrics": [
562 | {
563 | "name": "hanadb_sql_top_mem_consumers",
564 | "description": "Top statements memory consumers. Specifies the total size of tracked actual memory consumption in bytes",
565 | "labels": ["HOST", "PORT", "SQL_STRING", "SQL_HASH"],
566 | "value": "TOTAL_EXECUTION_MEMORY_SIZE",
567 | "unit": "byte",
568 | "type": "gauge"
569 | },
570 | {
571 | "name": "hanadb_sql_top_mem_consumers",
572 | "description": "Top statements time consumers. Number of total executions of the SQL Statement",
573 | "labels": ["HOST", "PORT", "SQL_STRING", "SQL_HASH"],
574 | "value": "EXECUTION_COUNT",
575 | "unit": "count",
576 | "type": "gauge"
577 | }
578 | ]
579 | },
580 | "SELECT host, LPAD(port, 5) port, connection_type, MAP(connection_status,'','N/A', connection_status) connection_status, COUNT(1) total_connections FROM SYS.M_CONNECTIONS GROUP BY host, port, connection_status, connection_type;":
581 | {
582 | "enabled": true,
583 | "metrics": [
584 | {
585 | "name": "hanadb_connections_total",
586 | "description": "Number of connections grouped by Type and Status",
587 | "labels": ["HOST", "PORT", "CONNECTION_TYPE", "CONNECTION_STATUS"],
588 | "value": "TOTAL_CONNECTIONS",
589 | "unit": "count",
590 | "type": "gauge"
591 | }
592 | ]
593 | },
594 | "SELECT TOP 10 ct.host, LPAD(ct.port,5) port, ct.schema_name, ct.table_name, TO_DECIMAL(ct.memory_size_in_total / 1024 / 1024, 10, 2) memory_size_in_total_mb, TO_DECIMAL(ct.estimated_max_memory_size_in_total / 1024 / 1024, 10, 2) estimated_max_mem_total_mb, ct.record_count, TO_DECIMAL(tps.disk_size / 1024 / 1024, 10, 2) disk_size_mb FROM sys.m_cs_tables ct, sys.m_table_persistence_statistics tps WHERE ct.schema_name = tps.schema_name AND ct.table_name = tps.table_name ORDER BY ct.memory_size_in_total DESC;":
595 | {
596 | "enabled": true,
597 | "metrics": [
598 | {
599 | "name": "hanadb_table_cs_top_mem_total",
600 | "description": "Top 10 tables consuming more memory. The total memory size (MB) is the sum of memory size in the main, delta, and history parts",
601 | "labels": ["HOST", "PORT", "SCHEMA_NAME", "TABLE_NAME"],
602 | "value": "MEMORY_SIZE_IN_TOTAL_MB",
603 | "unit": "mb",
604 | "type": "gauge"
605 | },
606 | {
607 | "name": "hanadb_table_cs_top_mem_estimated_max",
608 | "description": "Top 10 tables consuming more memory. The estimated maximum memory consumption (MB), in total, for the fully loaded table (data for open transactions is not included)",
609 | "labels": ["HOST", "PORT", "SCHEMA_NAME", "TABLE_NAME"],
610 | "value": "ESTIMATED_MAX_MEM_TOTAL_MB",
611 | "unit": "mb",
612 | "type": "gauge"
613 | },
614 | {
615 | "name": "hanadb_table_cs_top_mem_record",
616 | "description": "Top 10 tables consuming more memory. The record count of the table",
617 | "labels": ["HOST", "PORT", "SCHEMA_NAME", "TABLE_NAME"],
618 | "value": "RECORD_COUNT",
619 | "unit": "count",
620 | "type": "gauge"
621 | },
622 | {
623 | "name": "hanadb_table_cs_top_mem_disk_size",
624 | "description": "Top 10 tables consuming more memory. The total disk size of all of the table parts",
625 | "labels": ["HOST", "PORT", "SCHEMA_NAME", "TABLE_NAME"],
626 | "value": "DISK_SIZE_MB",
627 | "unit": "mb",
628 | "type": "gauge"
629 | }
630 | ]
631 | },
632 | "SELECT host, LPAD(port, 5) port, site_name, secondary_site_name, secondary_host, LPAD(secondary_port, 5) secondary_port, replication_mode, MAP(secondary_active_status, 'YES', 1,0) secondary_active_status, MAP(UPPER(replication_status),'ACTIVE',0,'ERROR', 4, 'SYNCING',2, 'INITIALIZING',1,'UNKNOWN', 3, 99) replication_status, TO_DECIMAL(SECONDS_BETWEEN(SHIPPED_LOG_POSITION_TIME, LAST_LOG_POSITION_TIME), 10, 2) ship_delay_s, TO_DECIMAL((LAST_LOG_POSITION - SHIPPED_LOG_POSITION) * 64 / 1024 / 1024, 10, 2) async_buff_used_mb, secondary_reconnect_count, secondary_failover_count FROM sys.m_service_replication;":
633 | {
634 | "enabled": true,
635 | "metrics": [
636 | {
637 | "name": "hanadb_sr_ship_delay",
638 | "description": "System Replication log shipping delay in seconds",
639 | "labels": ["HOST", "PORT", "SITE_NAME", "SECONDARY_SITE_NAME", "SECONDARY_HOST", "SECONDARY_PORT", "REPLICATION_MODE"],
640 | "value": "SHIP_DELAY_S",
641 | "unit": "seconds",
642 | "type": "gauge"
643 | },
644 | {
645 | "name": "hanadb_sr_async_used_shipping_buffer",
646 | "description": "System Replication current filling level of asynchronous log shipping buffer (MB)",
647 | "labels": ["HOST", "PORT", "SITE_NAME", "SECONDARY_SITE_NAME", "SECONDARY_HOST", "SECONDARY_PORT", "REPLICATION_MODE"],
648 | "value": "ASYNC_BUFF_USED_MB",
649 | "unit": "mb",
650 | "type": "gauge"
651 | },
652 | {
653 | "name": "hanadb_sr_secondary_active",
654 | "description": "System Replication - Specifies the secondary active status. Values: 0 - INACTIVE and 1 - ACTIVE",
655 | "labels": ["HOST", "PORT", "SITE_NAME", "SECONDARY_SITE_NAME", "SECONDARY_HOST", "SECONDARY_PORT", "REPLICATION_MODE"],
656 | "value": "SECONDARY_ACTIVE_STATUS",
657 | "unit": "status",
658 | "type": "gauge"
659 | },
660 | {
661 | "name": "hanadb_sr_replication",
662 | "description": "System Replication status. Values: 0-ACTIVE, 1-INITIALIZING, 2-SYNCING, 3-UNKNOWN, 4-ERROR, 99-UNMAPPED ",
663 | "labels": ["HOST", "PORT", "SITE_NAME", "SECONDARY_SITE_NAME", "SECONDARY_HOST", "SECONDARY_PORT", "REPLICATION_MODE"],
664 | "value": "REPLICATION_STATUS",
665 | "unit": "status",
666 | "type": "gauge"
667 | },
668 | {
669 | "name": "hanadb_sr_secondary_reconnect",
670 | "description": "System Replication secondary reconnect count",
671 | "labels": ["HOST", "PORT", "SITE_NAME", "SECONDARY_SITE_NAME", "SECONDARY_HOST", "SECONDARY_PORT", "REPLICATION_MODE"],
672 | "value": "SECONDARY_RECONNECT_COUNT",
673 | "unit": "count",
674 | "type": "gauge"
675 | },
676 | {
677 | "name": "hanadb_sr_secondary_failover",
678 | "description": "System Replication secondary failover count",
679 | "labels": ["HOST", "PORT", "SITE_NAME", "SECONDARY_SITE_NAME", "SECONDARY_HOST", "SECONDARY_PORT", "REPLICATION_MODE"],
680 | "value": "SECONDARY_FAILOVER_COUNT",
681 | "unit": "count",
682 | "type": "gauge"
683 | }
684 | ]
685 | },
686 | "SELECT TOP 10 TO_VARCHAR(RT.TAKEOVER_START_TIME) START_TIME, MAP(RT.TAKEOVER_END_TIME, NULL, 'N/A', TO_VARCHAR(RT.TAKEOVER_END_TIME)) END_TIME, MAP(SECONDS_BETWEEN(RT.TAKEOVER_START_TIME, RT.TAKEOVER_END_TIME),NULL, -1,SECONDS_BETWEEN(RT.TAKEOVER_START_TIME, RT.TAKEOVER_END_TIME)) DURATION_S, RT.SOURCE_SITE_NAME SRC_SITE_NAME, RT.SOURCE_MASTER_NAMESERVER_HOST SRC_HOST, RT.SITE_NAME TGT_SITE_NAME, RT.MASTER_NAMESERVER_HOST TGT_HOST, RT.TAKEOVER_TYPE TYPE, RT.OPERATION_MODE, MAP(RT.REPLICATION_STATUS,'ACTIVE',0,'ERROR', 4, 'SYNCING',2, 'INITIALIZING',1,'UNKNOWN', 3, 99) REPLICATION_STATUS, TO_VARCHAR(RT.LOG_POSITION_TIME) LOG_POS_TIME, TO_VARCHAR(RT.SHIPPED_LOG_POSITION_TIME) SHIPPED_LOG_POS_TIME, RT.LOG_POSITION, RT.SHIPPED_LOG_POSITION FROM M_SYSTEM_REPLICATION_TAKEOVER_HISTORY RT;":
687 | {
688 | "enabled": true,
689 | "metrics": [
690 | {
691 | "name": "hanadb_sr_takeover_replication",
692 | "description": "System Replication status at takeover time. Values: 0-ACTIVE, 1-INITIALIZING, 2-SYNCING, 3-UNKNOWN, 4-ERROR, 99-UNMAPPED",
693 | "labels": ["START_TIME", "END_TIME", "SRC_SITE_NAME", "SRC_HOST", "TGT_SITE_NAME", "TGT_HOST", "OPERATION_MODE", "LOG_POS_TIME", "SHIPPED_LOG_POS_TIME"],
694 | "value": "REPLICATION_STATUS",
695 | "unit": "status",
696 | "type": "gauge"
697 | },
698 | {
699 | "name": "hanadb_sr_takeover_duration_time",
700 | "description": "System Replication takeover duration time. Returns -1 if cannot be calculated, together with END_TIME=N/A. It is a workaround to when HANA cannot collect this value",
701 | "labels": ["START_TIME", "END_TIME", "SRC_SITE_NAME", "SRC_HOST", "TGT_SITE_NAME", "TGT_HOST", "OPERATION_MODE", "LOG_POS_TIME", "SHIPPED_LOG_POS_TIME"],
702 | "value": "DURATION_S",
703 | "unit": "seconds",
704 | "type": "gauge"
705 | },
706 | {
707 | "name": "hanadb_sr_takeover_log_position",
708 | "description": "Specifies the master log position, that has been reached by takeover",
709 | "labels": ["START_TIME", "END_TIME", "SRC_SITE_NAME", "SRC_HOST", "TGT_SITE_NAME", "TGT_HOST", "OPERATION_MODE", "LOG_POS_TIME", "SHIPPED_LOG_POS_TIME"],
710 | "value": "LOG_POSITION",
711 | "unit": "bigint",
712 | "type": "gauge"
713 | },
714 | {
715 | "name": "hanadb_sr_takeover_shipped_log_position",
716 | "description": "Specifies the highest master log position that has been shipped before executing takeover",
717 | "labels": ["START_TIME", "END_TIME", "SRC_SITE_NAME", "SRC_HOST", "TGT_SITE_NAME", "TGT_HOST", "OPERATION_MODE", "LOG_POS_TIME", "SHIPPED_LOG_POS_TIME"],
718 | "value": "SHIPPED_LOG_POSITION",
719 | "unit": "bigint",
720 | "type": "gauge"
721 | }
722 | ]
723 | },
724 | "SELECT alert_host host, LPAD(alert_port,5) port, alert_details, alert_useraction,to_varchar(alert_timestamp) alert_timestamp, to_varchar(alert_rating) alert_rating FROM _SYS_STATISTICS.STATISTICS_CURRENT_ALERTS;":
725 | {
726 | "enabled": true,
727 | "metrics": [
728 | {
729 | "name": "hanadb_alerts_current",
730 | "description": "HANA Database current alerts rating",
731 | "labels": ["HOST", "PORT", "ALERT_DETAILS", "ALERT_USERACTION", "ALERT_TIMESTAMP"],
732 | "value": "ALERT_RATING",
733 | "unit": "rating",
734 | "type": "gauge"
735 | }
736 | ]
737 | }
738 | }
739 |
--------------------------------------------------------------------------------
/packaging/obs/grafana-sap-hana-dashboards/_service:
--------------------------------------------------------------------------------
1 |
2 |
3 | git://github.com/%%REPOSITORY%%.git
4 | git
5 | %%REVISION%%
6 | dashboards
7 | LICENSE
8 | 1.0.2+git.%ct.%h
9 | grafana-sap-hana-dashboards
10 |
11 |
12 | grafana-sap-hana-dashboards.spec
13 |
14 |
15 | *.tar
16 | gz
17 |
18 |
19 |
--------------------------------------------------------------------------------
/packaging/obs/grafana-sap-hana-dashboards/grafana-sap-hana-dashboards.changes:
--------------------------------------------------------------------------------
1 | -------------------------------------------------------------------
2 | Fri Jun 11 19:30:21 UTC 2021 - Bernd Schubert
3 |
4 | - Release 1.0.3
5 | * The HANA system replication history panel could not show data, because of
6 | the node_name variable does not match to the output string from the exporter.
7 | Changing "host=" value to "src_host=" solves the issue. Fix is tested with
8 | HANA SPS040 and HANA SPS050.
9 | (bgz#1187243)
10 |
11 | -------------------------------------------------------------------
12 | Wed Sep 16 13:35:38 UTC 2020 - Dario Maiocchi
13 |
14 | - Release 1.0.2
15 | * Remove useless macro and fix typo in the meanwhile
16 |
17 | -------------------------------------------------------------------
18 | Wed Aug 5 11:34:45 UTC 2020 - Stefano Torresi
19 |
20 | - Release 1.0.1
21 | * Remove "detail" word from file names for simplicity
22 | * Update title and description
23 |
24 | -------------------------------------------------------------------
25 | Wed Jul 8 11:33:47 UTC 2020 - Stefano Torresi
26 |
27 | - First release
28 |
--------------------------------------------------------------------------------
/packaging/obs/grafana-sap-hana-dashboards/grafana-sap-hana-dashboards.spec:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2020 SUSE LLC
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # https://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | Name: grafana-sap-hana-dashboards
17 | # Version will be processed via set_version source service
18 | Version: 0
19 | Release: 0
20 | License: Apache-2.0
21 | Summary: Grafana Dashboards displaying metrics about SAP HANA databases.
22 | Group: System/Monitoring
23 | Url: https://github.com/SUSE/hanadb_exporter
24 | Source: %{name}-%{version}.tar.gz
25 | BuildArch: noarch
26 | Requires: grafana-sap-providers
27 | BuildRequires: grafana-sap-providers
28 |
29 | %description
30 | Grafana Dashboards displaying metrics about SAP HANA databases.
31 |
32 | %prep
33 | %setup -q
34 |
35 | %build
36 |
37 | %install
38 | install -dm0755 %{buildroot}%{_localstatedir}/lib/grafana/dashboards/sles4sap
39 | install -m644 dashboards/*.json %{buildroot}%{_localstatedir}/lib/grafana/dashboards/sles4sap
40 |
41 | %files
42 | %defattr(-,root,root)
43 | %doc dashboards/README.md
44 | %license LICENSE
45 | %attr(0755,grafana,grafana) %dir %{_localstatedir}/lib/grafana/dashboards/sles4sap
46 | %attr(0644,grafana,grafana) %config %{_localstatedir}/lib/grafana/dashboards/sles4sap/*
47 |
48 | %changelog
49 |
--------------------------------------------------------------------------------
/packaging/obs/prometheus-hanadb_exporter/_service:
--------------------------------------------------------------------------------
1 |
2 |
3 | https://github.com/%%REPOSITORY%%.git
4 | git
5 | .git
6 | .ci
7 | .github
8 | dashboards
9 | Makefile
10 | packaging
11 | prometheus-hanadb_exporter
12 | %%VERSION%%
13 | %%REVISION%%
14 |
15 |
16 |
17 | *.tar
18 | gz
19 |
20 |
21 |
22 | prometheus-hanadb_exporter
23 |
24 |
25 |
--------------------------------------------------------------------------------
/packaging/obs/prometheus-hanadb_exporter/prometheus-hanadb_exporter.spec:
--------------------------------------------------------------------------------
1 | #
2 | # spec file for package prometheus-hanadb_exporter
3 | #
4 | # Copyright (c) 2022-2024 SUSE LLC
5 | #
6 | # All modifications and additions to the file contributed by third parties
7 | # remain the property of their copyright owners, unless otherwise agreed
8 | # upon. The license for this file, and modifications and additions to the
9 | # file, is the same license as for the pristine package itself (unless the
10 | # license for the pristine package is not an Open Source License, in which
11 | # case the license is the MIT License). An "Open Source License" is a
12 | # license that conforms to the Open Source Definition (Version 1.9)
13 | # published by the Open Source Initiative.
14 |
15 | # Please submit bugfixes or comments via https://bugs.opensuse.org/
16 | #
17 |
18 | %if 0%{?suse_version} < 1500
19 | %bcond_with test
20 | %else
21 | %bcond_without test
22 | %endif
23 |
24 | %define _prefix /usr
25 | %define oldsyscondir /etc
26 | %define _sysconfdir %{_prefix}/etc
27 | %define pythons python3
28 |
29 | %{?!python_module:%define python_module() python3-%{**}}
30 | Name: prometheus-hanadb_exporter
31 | Version: 0
32 | Release: 0
33 | Summary: SAP HANA database metrics exporter
34 | License: Apache-2.0
35 | Group: System/Monitoring
36 | URL: https://github.com/SUSE/hanadb_exporter
37 | Source: %{name}-%{version}.tar.gz
38 | %if %{with test}
39 | BuildRequires: %{python_module pytest}
40 | BuildRequires: %{python_module boto3}
41 | BuildRequires: %{python_module certifi}
42 | %endif
43 | BuildRequires: %{python_module setuptools}
44 | BuildRequires: python-rpm-macros
45 | Provides: hanadb_exporter = %{version}-%{release}
46 | BuildRequires: fdupes
47 | BuildRequires: systemd-rpm-macros
48 | %{?systemd_requires}
49 | Requires: %{python_module prometheus_client} >= 0.6.0
50 | Requires: %{python_module shaptools}
51 | Requires: %{python_module boto3}
52 | Recommends: %{python_module certifi}
53 | BuildArch: noarch
54 |
55 | %description
56 | SAP HANA database metrics exporter
57 |
58 | %define shortname hanadb_exporter
59 |
60 | %prep
61 | %setup -q -n %{name}-%{version}
62 |
63 | %build
64 | %python_build
65 |
66 | %install
67 | %python_install
68 | %python_expand %fdupes %{buildroot}%{$python_sitelib}
69 | # do not install tests
70 | %python_expand rm -r %{buildroot}%{$python_sitelib}/tests
71 |
72 | # Add daemon files
73 | mkdir -p %{buildroot}%{oldsyscondir}/%{shortname}
74 | mkdir -p %{buildroot}%{_sysconfdir}/%{shortname}
75 | install -D -m 644 daemon/%{shortname}@.service %{buildroot}%{_unitdir}/%{name}@.service
76 |
77 | install -D -m 0644 config.json.example %{buildroot}%{_docdir}/%{name}/config.json.example
78 | install -D -m 0644 metrics.json %{buildroot}%{_docdir}/%{name}/metrics.json
79 | install -D -m 0644 logging_config.ini %{buildroot}%{_docdir}/%{name}/logging_config.ini
80 | install -D -m 755 bin/supportconfig-hanadb_exporter %{buildroot}%{_prefix}/lib/supportconfig/plugins/%{shortname}
81 |
82 | %post
83 | %service_add_post %{name}@.service
84 | rm -rf %{_sysconfdir}/%{shortname}/*
85 | ln -s %{_docdir}/%{name}/config.json.example %{_sysconfdir}/%{shortname}/config.json.example
86 | ln -s %{_docdir}/%{name}/metrics.json %{_sysconfdir}/%{shortname}/metrics.json
87 | ln -s %{_docdir}/%{name}/logging_config.ini %{_sysconfdir}/%{shortname}/logging_config.ini
88 |
89 | %pre
90 | %service_add_pre %{name}@.service
91 |
92 | %preun
93 | %service_del_preun %{name}@.service
94 |
95 | %postun
96 | %service_del_postun %{name}@.service
97 |
98 | %if %{with test}
99 | %check
100 | pytest tests
101 | %endif
102 |
103 | %files
104 | %defattr(-,root,root,-)
105 | %if 0%{?sle_version:1} && 0%{?sle_version} < 120300
106 | %doc README.md docs/METRICS.md LICENSE
107 | %else
108 | %doc README.md docs/METRICS.md
109 | %license LICENSE
110 | %endif
111 | %{python_sitelib}/*
112 | %{_bindir}/%{shortname}
113 |
114 | %dir %{_sysconfdir}
115 | %dir %{oldsyscondir}/%{shortname}
116 | %dir %{_sysconfdir}/%{shortname}
117 | %{_docdir}/%{name}/config.json.example
118 | %{_docdir}/%{name}/metrics.json
119 | %{_docdir}/%{name}/logging_config.ini
120 | %{_unitdir}/%{name}@.service
121 | %dir %{_prefix}/lib/supportconfig
122 | %dir %{_prefix}/lib/supportconfig/plugins
123 | %{_prefix}/lib/supportconfig/plugins/%{shortname}
124 |
125 | %changelog
126 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | python_files = *_test.py
3 | testpaths = tests
4 | norecursedirs =
5 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | prometheus-client>=0.6.0
2 | git+https://github.com/SUSE/shaptools.git#egg=shaptools>=0.3.2
3 | pyhdb>=0.3.4
4 | certifi>=2018.1.18
5 | boto3
6 | requests
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | Setup script.
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.de
7 |
8 | :since: 2018-11-13
9 | """
10 |
11 | import os
12 |
13 | from setuptools import find_packages
14 | try:
15 | from setuptools import setup
16 | except ImportError:
17 | from distutils.core import setup
18 |
19 | from hanadb_exporter import __version__
20 |
21 |
22 | def read(fname):
23 | """
24 | Utility function to read the README file. README file is used to create
25 | the long description.
26 | """
27 |
28 | return open(os.path.join(os.path.dirname(__file__), fname)).read()
29 |
30 | def read_dependencies(fname):
31 | """
32 | Read requirements
33 | """
34 | requirements = read(fname).splitlines()
35 | for requirement in requirements:
36 | # git repository url.
37 | if requirement.startswith("git+"):
38 | requirements.remove(requirement)
39 | return requirements
40 |
41 | def read_links(fname):
42 | """
43 | Read links
44 | """
45 | links = read(fname).splitlines()
46 | for link in links:
47 | # git repository url.
48 | if not link.startswith(("git+", "svn+", "hg+")):
49 | links.remove(link)
50 | return links
51 |
52 | VERSION = __version__
53 | NAME = "hanadb_exporter"
54 | DESCRIPTION = "SAP HANA database data exporter"
55 |
56 | AUTHOR = "xarbulu"
57 | AUTHOR_EMAIL = "xarbulu@suse.de"
58 | URL = ""
59 |
60 | LICENSE = read('LICENSE')
61 |
62 | CLASSIFIERS = [
63 | "Development Status :: 4 - Beta",
64 | "Environment :: Console",
65 | "Intended Audience :: Developers",
66 | "License :: Other/Proprietary License",
67 | "Natural Language :: English",
68 | "Operating System :: Unix",
69 | "Operating System :: Microsoft :: Windows",
70 | "Programming Language :: Python :: 3.6",
71 | "Programming Language :: Python :: 3 :: Only",
72 | ]
73 |
74 | SCRIPTS = ['bin/hanadb_exporter']
75 |
76 | DEPENDENCIES = read_dependencies('requirements.txt')
77 | LINKS = read_links('requirements.txt')
78 |
79 | PACKAGE_DATA = {}
80 | DATA_FILES = []
81 |
82 |
83 | SETUP_PARAMS = dict(
84 | name=NAME,
85 | version=VERSION,
86 | description=DESCRIPTION,
87 | author=AUTHOR,
88 | author_email=AUTHOR_EMAIL,
89 | url=URL,
90 | long_description=read('README.md'),
91 | packages=find_packages(),
92 | package_data=PACKAGE_DATA,
93 | license=LICENSE,
94 | scripts=SCRIPTS,
95 | data_files=DATA_FILES,
96 | install_requires=DEPENDENCIES,
97 | dependency_links=LINKS,
98 | classifiers=CLASSIFIERS,
99 | )
100 |
101 | def main():
102 | """
103 | Setup.py main.
104 | """
105 |
106 | setup(**SETUP_PARAMS)
107 |
108 | if __name__ == "__main__":
109 | main()
110 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/hanadb_exporter/4780a8a6f3509a0fd2658c423df39168271f7d20/tests/__init__.py
--------------------------------------------------------------------------------
/tests/db_manager_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for exporters/db_manager.py.
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.com
7 |
8 | :since: 2019-10-25
9 | """
10 |
11 | # pylint:disable=C0103,C0111,W0212,W0611
12 |
13 | import os
14 | import sys
15 | import pytest
16 |
17 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
18 |
19 | try:
20 | from unittest import mock
21 | except ImportError:
22 | import mock
23 |
24 | sys.modules['shaptools'] = mock.MagicMock()
25 | from hanadb_exporter import db_manager
26 |
27 |
28 | class TestDatabaseManager(object):
29 | """
30 | Unitary tests for hanadb_exporter/db_manager.py.
31 | """
32 |
33 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.HdbConnector')
34 | def setup(self, mock_hdb):
35 | """
36 | Test setUp.
37 | """
38 |
39 | self._db_manager = db_manager.DatabaseManager()
40 | mock_hdb.assert_called_once_with()
41 |
42 | @mock.patch('hanadb_exporter.db_manager.utils.format_query_result')
43 | def test_get_tenants_port(self, mock_format_query):
44 | self._db_manager._system_db_connector = mock.Mock()
45 | self._db_manager._system_db_connector.query.return_value = 'result'
46 | ports = ['30040', '30041']
47 | dbs = ['PRD', 'QAS']
48 | mock_format_query.return_value = [
49 | {'DATABASE_NAME': dbs[0], 'SQL_PORT': ports[0]},
50 | {'DATABASE_NAME': dbs[1], 'SQL_PORT': ports[1]},
51 | {'DATABASE_NAME': 'SYSTEMDB', 'SQL_PORT': '30013'}]
52 |
53 | for i, data in enumerate(self._db_manager._get_tenants_port()):
54 | assert data[0] == dbs[i]
55 | assert data[1] == int(ports[i])
56 | assert i == 1 # Check only the ports 30040 and 30041 are yielded
57 |
58 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.HdbConnector')
59 | def test_connect_tenants(self, mock_hdb):
60 |
61 | self._db_manager._get_tenants_port = mock.Mock(return_value=[
62 | ('db1', 1), ('db2', 2),('db3', 3)])
63 |
64 | mock_conn1 = mock.Mock()
65 | mock_conn2 = mock.Mock()
66 | mock_conn3 = mock.Mock()
67 |
68 | mock_hdb.side_effect = [mock_conn1, mock_conn2, mock_conn3]
69 |
70 | connection_data = {'mock_data': 'data'}
71 |
72 | self._db_manager._connect_tenants('10.10.10.10', connection_data)
73 |
74 | assert mock_hdb.call_count == 3
75 |
76 | mock_conn1.connect.assert_called_once_with('10.10.10.10', 1, **connection_data)
77 | mock_conn2.connect.assert_called_once_with('10.10.10.10', 2, **connection_data)
78 | mock_conn3.connect.assert_called_once_with('10.10.10.10', 3, **connection_data)
79 |
80 | assert self._db_manager._db_connectors == [mock_conn1, mock_conn2, mock_conn3]
81 |
82 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.HdbConnector')
83 | def test_connect_tenants_userkey(self, mock_hdb):
84 |
85 | self._db_manager._get_tenants_port = mock.Mock(return_value=[
86 | ('db1', 1), ('db2', 2),('db3', 3)])
87 |
88 | mock_conn1 = mock.Mock()
89 | mock_conn2 = mock.Mock()
90 | mock_conn3 = mock.Mock()
91 |
92 | mock_hdb.side_effect = [mock_conn1, mock_conn2, mock_conn3]
93 |
94 | connection_data = {'mock_data': 'data', 'userkey': 'userkey'}
95 | updated_connection_data = [
96 | {'mock_data': 'data', 'userkey': 'userkey', 'databaseName': 'db1'},
97 | {'mock_data': 'data', 'userkey': 'userkey', 'databaseName': 'db2'},
98 | {'mock_data': 'data', 'userkey': 'userkey', 'databaseName': 'db3'}
99 | ]
100 |
101 | self._db_manager._connect_tenants('10.10.10.10', connection_data)
102 |
103 | assert mock_hdb.call_count == 3
104 |
105 | mock_conn1.connect.assert_called_once_with('10.10.10.10', 1, **updated_connection_data[0])
106 | mock_conn2.connect.assert_called_once_with('10.10.10.10', 2, **updated_connection_data[1])
107 | mock_conn3.connect.assert_called_once_with('10.10.10.10', 3, **updated_connection_data[2])
108 |
109 | assert self._db_manager._db_connectors == [mock_conn1, mock_conn2, mock_conn3]
110 |
111 | @mock.patch('logging.Logger.warn')
112 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.connectors.base_connector')
113 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.HdbConnector')
114 | def test_connect_tenants_error_connecting(self, mock_hdb, mock_connector, mock_warn):
115 |
116 | self._db_manager._get_tenants_port = mock.Mock(return_value=[
117 | ('db1', 1), ('db2', 2),('db3', 3)])
118 |
119 | mock_connector.ConnectionError = Exception
120 | mock_conn1 = mock.Mock()
121 | mock_conn2 = mock.Mock()
122 | mock_conn3 = mock.Mock()
123 | mock_conn3.connect.side_effect = mock_connector.ConnectionError('err')
124 |
125 | mock_hdb.side_effect = [mock_conn1, mock_conn2, mock_conn3]
126 |
127 | connection_data = {'mock_data': 'data', 'userkey': 'userkey'}
128 | updated_connection_data = [
129 | {'mock_data': 'data', 'userkey': 'userkey', 'databaseName': 'db1'},
130 | {'mock_data': 'data', 'userkey': 'userkey', 'databaseName': 'db2'},
131 | {'mock_data': 'data', 'userkey': 'userkey', 'databaseName': 'db3'}
132 | ]
133 |
134 | self._db_manager._connect_tenants('10.10.10.10', connection_data)
135 |
136 | assert mock_hdb.call_count == 3
137 |
138 | mock_conn1.connect.assert_called_once_with('10.10.10.10', 1, **updated_connection_data[0])
139 | mock_conn2.connect.assert_called_once_with('10.10.10.10', 2, **updated_connection_data[1])
140 | mock_conn3.connect.assert_called_once_with('10.10.10.10', 3, **updated_connection_data[2])
141 |
142 | assert self._db_manager._db_connectors == [mock_conn1, mock_conn2]
143 | mock_warn.assert_called_once_with(
144 | 'Could not connect to TENANT database %s with error: %s', 'db3', str('err'))
145 |
146 | def test_get_connection_data_invalid_data(self):
147 |
148 | with pytest.raises(ValueError) as err:
149 | self._db_manager._get_connection_data(None, '', '')
150 | assert 'Provided user data is not valid. userkey or user/password pair must be provided' \
151 | in str(err.value)
152 |
153 | with pytest.raises(ValueError) as err:
154 | self._db_manager._get_connection_data(None, 'user', '')
155 | assert 'Provided user data is not valid. userkey or user/password pair must be provided' \
156 | in str(err.value)
157 |
158 | with pytest.raises(ValueError) as err:
159 | self._db_manager._get_connection_data(None, '', 'pass')
160 | assert 'Provided user data is not valid. userkey or user/password pair must be provided' \
161 | in str(err.value)
162 |
163 | @mock.patch('hanadb_exporter.db_manager.hdb_connector')
164 | def test_get_connection_data_not_supported(self, mock_api):
165 |
166 | mock_api.API = 'pyhdb'
167 | with pytest.raises(db_manager.UserKeyNotSupportedError) as err:
168 | self._db_manager._get_connection_data('userkey', '', '')
169 | assert 'userkey usage is not supported with pyhdb connector, hdbcli must be installed' \
170 | in str(err.value)
171 |
172 | @mock.patch('hanadb_exporter.db_manager.hdb_connector')
173 | @mock.patch('logging.Logger.warn')
174 | @mock.patch('logging.Logger.info')
175 | def test_get_connection_data_userkey(self, logger,logger_warn, mock_api):
176 |
177 | mock_api.API = 'dbapi'
178 | connection_data = self._db_manager._get_connection_data('userkey', '', '')
179 | assert connection_data == {
180 | 'userkey': 'userkey', 'user': '', 'password': '', 'RECONNECT': 'FALSE',
181 | 'encrypt': False, 'sslValidateCertificate': False, 'sslTrustStore': None}
182 | logger.assert_called_once_with(
183 | 'stored user key %s will be used to connect to the database', 'userkey')
184 | assert logger_warn.call_count == 0
185 |
186 | @mock.patch('hanadb_exporter.db_manager.hdb_connector')
187 | @mock.patch('logging.Logger.warn')
188 | @mock.patch('logging.Logger.info')
189 | def test_get_connection_data_userkey_warn(self, logger,logger_warn, mock_api):
190 |
191 | mock_api.API = 'dbapi'
192 | connection_data = self._db_manager._get_connection_data('userkey', 'user', '')
193 | assert connection_data == {
194 | 'userkey': 'userkey', 'user': 'user', 'password': '', 'RECONNECT': 'FALSE',
195 | 'encrypt': False, 'sslValidateCertificate': False, 'sslTrustStore': None}
196 | logger.assert_called_once_with(
197 | 'stored user key %s will be used to connect to the database', 'userkey')
198 | logger_warn.assert_called_once_with(
199 | 'userkey will be used to create the connection. user/password are omitted')
200 |
201 | @mock.patch('hanadb_exporter.db_manager.hdb_connector')
202 | @mock.patch('logging.Logger.info')
203 | def test_get_connection_data_pass(self, logger, mock_api):
204 | mock_api.API = 'dbapi'
205 | connection_data = self._db_manager._get_connection_data(None, 'user', 'pass')
206 | assert connection_data == {
207 | 'userkey': None, 'user': 'user', 'password': 'pass', 'RECONNECT': 'FALSE',
208 | 'encrypt': False, 'sslValidateCertificate': False, 'sslTrustStore': None}
209 | logger.assert_called_once_with(
210 | 'user/password combination will be used to connect to the database')
211 |
212 | @mock.patch('certifi.where')
213 | @mock.patch('hanadb_exporter.db_manager.hdb_connector')
214 | @mock.patch('logging.Logger.info')
215 | def test_get_connection_ssl(self, logger, mock_api, mock_where):
216 | mock_where.return_value = 'my.pem'
217 | mock_api.API = 'dbapi'
218 | connection_data = self._db_manager._get_connection_data(
219 | None, 'user', 'pass', ssl=True, ssl_validate_cert=True)
220 | assert connection_data == {
221 | 'userkey': None, 'user': 'user', 'password': 'pass', 'RECONNECT': 'FALSE',
222 | 'encrypt': True, 'sslValidateCertificate': True, 'sslTrustStore': 'my.pem'}
223 | logger.assert_has_calls([
224 | mock.call('user/password combination will be used to connect to the database'),
225 | mock.call('Using ssl connection...')
226 | ])
227 |
228 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.connectors.base_connector')
229 | @mock.patch('logging.Logger.error')
230 | @mock.patch('time.sleep')
231 | @mock.patch('time.time')
232 | def test_start_timeout(self, mock_time, mock_sleep, mock_logger, mock_exception):
233 |
234 | self._db_manager._get_connection_data = mock.Mock()
235 | connection_data = {'mock_data': 'data'}
236 | self._db_manager._get_connection_data.return_value = connection_data
237 |
238 | mock_exception.ConnectionError = Exception
239 | mock_time.side_effect = [0, 1, 2, 3]
240 | self._db_manager._system_db_connector = mock.Mock()
241 |
242 | self._db_manager._system_db_connector.connect.side_effect = [
243 | mock_exception.ConnectionError('err'),
244 | mock_exception.ConnectionError('err'),
245 | mock_exception.ConnectionError('err')]
246 |
247 | # Method under test
248 | with pytest.raises(mock_exception.ConnectionError) as err:
249 | self._db_manager.start(
250 | '10.10.10.10', 30013, user='user', password='pass', multi_tenant=False, timeout=2)
251 |
252 | assert 'timeout reached connecting the System database' in str(err.value)
253 |
254 | self._db_manager._system_db_connector.connect.assert_has_calls([
255 | mock.call('10.10.10.10', 30013, **connection_data),
256 | mock.call('10.10.10.10', 30013, **connection_data),
257 | mock.call('10.10.10.10', 30013, **connection_data)
258 | ])
259 |
260 | mock_sleep.assert_has_calls([
261 | mock.call(15),
262 | mock.call(15),
263 | mock.call(15)
264 | ])
265 |
266 | mock_logger.assert_has_calls([
267 | mock.call('the connection to the system database failed. error message: %s', 'err'),
268 | mock.call('the connection to the system database failed. error message: %s', 'err'),
269 | mock.call('the connection to the system database failed. error message: %s', 'err')
270 | ])
271 |
272 | assert self._db_manager._db_connectors == []
273 |
274 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.connectors.base_connector')
275 | @mock.patch('logging.Logger.error')
276 | @mock.patch('time.time')
277 | def test_start_invalid_key(self, mock_time, mock_logger, mock_exception):
278 |
279 | self._db_manager._get_connection_data = mock.Mock()
280 | connection_data = {'mock_data': 'data'}
281 | self._db_manager._get_connection_data.return_value = connection_data
282 |
283 | mock_exception.ConnectionError = Exception
284 | mock_time.side_effect = [0, 1, 2, 3]
285 | self._db_manager._system_db_connector = mock.Mock()
286 | self._db_manager._connect_tenants = mock.Mock()
287 |
288 | self._db_manager._system_db_connector.connect.side_effect = [
289 | mock_exception.ConnectionError('Error: Invalid value for KEY')]
290 |
291 | # Method under test
292 | with pytest.raises(mock_exception.ConnectionError) as err:
293 | self._db_manager.start(
294 | '10.10.10.10', 30013, user='user', password='pass', multi_tenant=False, timeout=2)
295 |
296 | assert 'provided userkey is not valid. Check if dbapi is installed correctly' in str(err.value)
297 |
298 | self._db_manager._system_db_connector.connect.assert_called_once_with(
299 | '10.10.10.10', 30013, **connection_data)
300 |
301 | mock_logger.assert_called_once_with(
302 | 'the connection to the system database failed. error message: %s',
303 | 'Error: Invalid value for KEY')
304 |
305 | self._db_manager._connect_tenants.assert_not_called()
306 |
307 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.connectors.base_connector')
308 | @mock.patch('logging.Logger.error')
309 | @mock.patch('time.sleep')
310 | @mock.patch('time.time')
311 | def test_start_correct(self, mock_time, mock_sleep, mock_logger, mock_exception):
312 |
313 | self._db_manager._get_connection_data = mock.Mock()
314 | connection_data = {'mock_data': 'data'}
315 | self._db_manager._get_connection_data.return_value = connection_data
316 |
317 | mock_exception.ConnectionError = Exception
318 | mock_time.side_effect = [0, 1, 2, 3]
319 | self._db_manager._system_db_connector = mock.Mock()
320 | self._db_manager._connect_tenants = mock.Mock()
321 |
322 | self._db_manager._system_db_connector.connect.side_effect = [
323 | mock_exception.ConnectionError('err'),
324 | mock_exception.ConnectionError('err'),
325 | None]
326 |
327 | self._db_manager.start(
328 | '10.10.10.10', 30013, user='user', password='pass', multi_tenant=False, timeout=2)
329 |
330 | self._db_manager._system_db_connector.connect.assert_has_calls([
331 | mock.call('10.10.10.10', 30013, **connection_data),
332 | mock.call('10.10.10.10', 30013, **connection_data),
333 | mock.call('10.10.10.10', 30013, **connection_data)
334 | ])
335 |
336 | mock_sleep.assert_has_calls([
337 | mock.call(15),
338 | mock.call(15)
339 | ])
340 |
341 | mock_logger.assert_has_calls([
342 | mock.call('the connection to the system database failed. error message: %s', 'err'),
343 | mock.call('the connection to the system database failed. error message: %s', 'err')
344 | ])
345 |
346 | assert self._db_manager._db_connectors == [self._db_manager._system_db_connector]
347 | self._db_manager._connect_tenants.assert_not_called()
348 |
349 | @mock.patch('hanadb_exporter.db_manager.hdb_connector.connectors.base_connector')
350 | @mock.patch('logging.Logger.error')
351 | @mock.patch('time.sleep')
352 | @mock.patch('time.time')
353 | def test_start_correct_multitenant(self, mock_time, mock_sleep, mock_logger, mock_exception):
354 |
355 | self._db_manager._get_connection_data = mock.Mock()
356 | connection_data = {'mock_data': 'data'}
357 | self._db_manager._get_connection_data.return_value = connection_data
358 |
359 | mock_exception.ConnectionError = Exception
360 | mock_time.side_effect = [0, 1, 2, 3]
361 | self._db_manager._system_db_connector = mock.Mock()
362 | self._db_manager._connect_tenants = mock.Mock()
363 |
364 | self._db_manager._system_db_connector.connect.side_effect = [
365 | mock_exception.ConnectionError('err'),
366 | mock_exception.ConnectionError('err'),
367 | None]
368 |
369 | self._db_manager.start(
370 | '10.10.10.10', 30013, user='user', password='pass', multi_tenant=True, timeout=2)
371 |
372 | self._db_manager._system_db_connector.connect.assert_has_calls([
373 | mock.call('10.10.10.10', 30013, **connection_data),
374 | mock.call('10.10.10.10', 30013, **connection_data),
375 | mock.call('10.10.10.10', 30013, **connection_data)
376 | ])
377 |
378 | mock_sleep.assert_has_calls([
379 | mock.call(15),
380 | mock.call(15)
381 | ])
382 |
383 | mock_logger.assert_has_calls([
384 | mock.call('the connection to the system database failed. error message: %s', 'err'),
385 | mock.call('the connection to the system database failed. error message: %s', 'err')
386 | ])
387 |
388 | assert self._db_manager._db_connectors == [self._db_manager._system_db_connector]
389 | self._db_manager._connect_tenants.assert_called_once_with('10.10.10.10', connection_data)
390 |
391 |
392 | def test_get_connectors(self):
393 | self._db_manager._db_connectors = 'conns'
394 | assert 'conns' == self._db_manager.get_connectors()
395 |
--------------------------------------------------------------------------------
/tests/main_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for exporters/main.py.
3 |
4 | :author: abelarbi, xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: abelarbi@suse.de, xarbulu@suse.com
7 |
8 | :since: 2019-06-11
9 | """
10 |
11 | # pylint:disable=C0103,C0111,W0212,W0611
12 |
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
16 |
17 | import logging
18 |
19 | try:
20 | from unittest import mock
21 | except ImportError:
22 | import mock
23 |
24 | import pytest
25 |
26 | sys.modules['shaptools'] = mock.MagicMock()
27 | sys.modules['prometheus_client'] = mock.MagicMock()
28 | sys.modules['prometheus_client.core'] = mock.MagicMock()
29 |
30 | from hanadb_exporter import __version__
31 | from hanadb_exporter import main
32 |
33 |
34 | class TestMain(object):
35 | """
36 | Unitary tests for hanadb_exporter/main.py.
37 | """
38 |
39 | @mock.patch('json.load')
40 | @mock.patch('hanadb_exporter.main.open')
41 | def test_parse_config(self, mock_open, mock_load):
42 | main.parse_config('config.json')
43 | mock_open.assert_called_once_with('config.json', 'r')
44 | assert mock_load.call_count == 1
45 |
46 | @mock.patch('argparse.ArgumentParser')
47 | def test_parse_arguments(self, mock_parser):
48 | mocked_parser = mock.Mock()
49 | mock_parser.return_value = mocked_parser
50 | mocked_parser.parse_args.return_value = 'parsed_arguments'
51 |
52 | parsed_arguments = main.parse_arguments()
53 |
54 | mock_parser.assert_called_once_with()
55 | mocked_parser.add_argument.assert_has_calls([
56 | mock.call(
57 | "-c", "--config", help="Path to hanadb_exporter configuration file"),
58 | mock.call(
59 | "-m", "--metrics", help="Path to hanadb_exporter metrics file"),
60 | mock.call(
61 | "-d", "--daemon", action="store_true", help="Start the exporter as a systemd daemon. Only used when the the application "\
62 | "is managed by systemd"),
63 | mock.call(
64 | "--identifier",
65 | help="Identifier of the configuration file from /etc/hanadb_exporter"),
66 | mock.call(
67 | "-v", "--verbosity",
68 | help="Python logging level. Options: DEBUG, INFO, WARN, ERROR (INFO by default)"),
69 | mock.call(
70 | "-V", "--version", action="store_true",
71 | help="Print the hanadb_exporter version")
72 | ])
73 |
74 | mocked_parser.parse_args.assert_called_once_with()
75 |
76 | assert parsed_arguments == 'parsed_arguments'
77 |
78 | @mock.patch('hanadb_exporter.main.parse_arguments')
79 | @mock.patch('builtins.print')
80 | def test_version(self, mock_print, mock_parse_arguments):
81 | mock_arguments = mock.Mock(version=True)
82 | mock_parse_arguments.return_value = mock_arguments
83 | main.run()
84 | mock_print.assert_called_once_with('hanadb_exporter %s' % (__version__))
85 |
86 | @mock.patch('hanadb_exporter.main.fileConfig')
87 | def test_setup_logging(self, mock_file_config):
88 | config = {
89 | 'hana': {
90 | 'host': '123.123.123.123',
91 | 'port': 1234
92 | },
93 | 'logging': {
94 | 'log_file': 'my_file',
95 | 'config_file': 'my_config_file'
96 | }
97 | }
98 |
99 | main.setup_logging(config)
100 |
101 | config['logging'].pop('log_file')
102 | main.setup_logging(config)
103 |
104 | mock_file_config.assert_has_calls([
105 | mock.call('my_config_file', defaults={'logfilename': 'my_file'}),
106 | mock.call('my_config_file', defaults={'logfilename': '/var/log/hanadb_exporter_123.123.123.123_1234'})
107 | ])
108 |
109 | @mock.patch('os.path.isfile')
110 | def test_lookup_etc_folder(self, mock_isfile):
111 | mock_isfile.return_value = True
112 | metric_file = main.lookup_etc_folder(main.METRICS_FILES)
113 | assert metric_file == main.METRICS_FILES[0]
114 |
115 | @mock.patch('os.path.isfile')
116 | def test_lookup_etc_folder_error(self, mock_isfile):
117 | mock_isfile.side_effect = [False, False]
118 | with pytest.raises(ValueError) as err:
119 | main.lookup_etc_folder(main.METRICS_FILES)
120 | assert 'configuration file does not exist in {}'.format(",".join(main.METRICS_FILES)) in str(err.value)
121 |
122 | @mock.patch('hanadb_exporter.utils.systemd_ready')
123 | @mock.patch('hanadb_exporter.main.LOGGER')
124 | @mock.patch('hanadb_exporter.main.parse_arguments')
125 | @mock.patch('hanadb_exporter.main.parse_config')
126 | @mock.patch('hanadb_exporter.main.setup_logging')
127 | @mock.patch('hanadb_exporter.main.db_manager.DatabaseManager')
128 | @mock.patch('hanadb_exporter.main.prometheus_exporter.SapHanaCollectors')
129 | @mock.patch('hanadb_exporter.main.REGISTRY.register')
130 | @mock.patch('hanadb_exporter.main.start_http_server')
131 | @mock.patch('logging.getLogger')
132 | @mock.patch('time.sleep')
133 | def test_run(
134 | self, mock_sleep, mock_get_logger, mock_start_server, mock_registry,
135 | mock_exporters, mock_db_manager, mock_setup_logging,
136 | mock_parse_config, mock_parse_arguments, mock_logger, mock_systemd):
137 |
138 | mock_arguments = mock.Mock(config='config', metrics='metrics', daemon=False, version=False)
139 | mock_parse_arguments.return_value = mock_arguments
140 |
141 | config = {
142 | 'listen_address': '127.0.0.1',
143 | 'hana': {
144 | 'host': '10.10.10.10',
145 | 'port': 1234,
146 | 'user': 'user',
147 | 'password': 'pass',
148 | 'ssl': True,
149 | 'ssl_validate_cert': True
150 | },
151 | 'logging': {
152 | 'log_file': 'my_file',
153 | 'config_file': 'my_config_file'
154 | }
155 | }
156 | mock_parse_config.return_value = config
157 |
158 | db_instance = mock.Mock()
159 | db_instance.get_connectors.return_value = 'connectors'
160 | mock_db_manager.return_value = db_instance
161 |
162 | mock_collector = mock.Mock()
163 | mock_exporters.return_value = mock_collector
164 |
165 | mock_sleep.side_effect = Exception
166 |
167 | with pytest.raises(Exception):
168 | main.run()
169 |
170 | mock_parse_arguments.assert_called_once_with()
171 | mock_parse_config.assert_called_once_with(mock_arguments.config)
172 | mock_setup_logging.assert_called_once_with(config)
173 | mock_db_manager.assert_called_once_with()
174 | db_instance.start.assert_called_once_with(
175 | '10.10.10.10', 1234, user='user', password='pass',
176 | userkey=None, multi_tenant=True, timeout=30, ssl=True, ssl_validate_cert=True)
177 | db_instance.get_connectors.assert_called_once_with()
178 | mock_exporters.assert_called_once_with(
179 | connectors='connectors', metrics_file='metrics')
180 |
181 | mock_registry.assert_called_once_with(mock_collector)
182 | mock_logger.info.assert_has_calls([
183 | mock.call('exporter successfully registered'),
184 | mock.call('starting to serve metrics')
185 | ])
186 | mock_start_server.assert_called_once_with(9668, '127.0.0.1')
187 | mock_sleep.assert_called_once_with(1)
188 | assert mock_systemd.call_count == 0
189 |
190 | @mock.patch('hanadb_exporter.utils.systemd_ready')
191 | @mock.patch('hanadb_exporter.main.LOGGER')
192 | @mock.patch('hanadb_exporter.main.lookup_etc_folder')
193 | @mock.patch('hanadb_exporter.main.parse_arguments')
194 | @mock.patch('hanadb_exporter.main.parse_config')
195 | @mock.patch('hanadb_exporter.main.setup_logging')
196 | @mock.patch('hanadb_exporter.main.db_manager.DatabaseManager')
197 | @mock.patch('hanadb_exporter.main.prometheus_exporter.SapHanaCollectors')
198 | @mock.patch('hanadb_exporter.main.REGISTRY.register')
199 | @mock.patch('hanadb_exporter.main.start_http_server')
200 | @mock.patch('logging.getLogger')
201 | @mock.patch('time.sleep')
202 | def test_run_defaults(
203 | self, mock_sleep, mock_get_logger, mock_start_server, mock_registry,
204 | mock_exporters, mock_db_manager, mock_setup_logging, mock_parse_config,
205 | mock_parse_arguments, mock_lookup_etc_folder, mock_logger, mock_systemd):
206 |
207 | mock_arguments = mock.Mock(
208 | config=None, metrics=None, identifier='config', daemon=True, version=False)
209 | mock_parse_arguments.return_value = mock_arguments
210 |
211 | mock_lookup_etc_folder.return_value = 'new_metrics'
212 |
213 | config = {
214 | 'hana': {
215 | 'host': '10.10.10.10',
216 | 'port': 1234,
217 | 'user': 'user',
218 | 'password': 'pass'
219 | },
220 | 'logging': {
221 | 'log_file': 'my_file',
222 | 'config_file': 'my_config_file'
223 | }
224 | }
225 | mock_parse_config.return_value = config
226 |
227 | db_instance = mock.Mock()
228 | db_instance.get_connectors.return_value = 'connectors'
229 | mock_db_manager.return_value = db_instance
230 |
231 | mock_collector = mock.Mock()
232 | mock_exporters.return_value = mock_collector
233 |
234 | mock_sleep.side_effect = Exception
235 |
236 | with pytest.raises(Exception):
237 | main.run()
238 |
239 | mock_parse_arguments.assert_called_once_with()
240 | mock_parse_config.assert_called_once_with("new_metrics")
241 | mock_setup_logging.assert_called_once_with(config)
242 | mock_db_manager.assert_called_once_with()
243 | db_instance.start.assert_called_once_with(
244 | '10.10.10.10', 1234, user='user', password='pass',
245 | userkey=None, multi_tenant=True, timeout=30, ssl=False, ssl_validate_cert=False)
246 | db_instance.get_connectors.assert_called_once_with()
247 | mock_exporters.assert_called_once_with(
248 | connectors='connectors', metrics_file='new_metrics')
249 |
250 | mock_registry.assert_called_once_with(mock_collector)
251 | mock_logger.info.assert_has_calls([
252 | mock.call('exporter successfully registered'),
253 | mock.call('starting to serve metrics')
254 | ])
255 | mock_start_server.assert_called_once_with(9668, '0.0.0.0')
256 | mock_sleep.assert_called_once_with(1)
257 | mock_systemd.assert_called_once_with()
258 |
259 | @mock.patch('hanadb_exporter.main.parse_arguments')
260 | def test_run_invalid_args(self, mock_parse_arguments):
261 |
262 | mock_arguments = mock.Mock(config=None, identifier=None, version=False)
263 | mock_parse_arguments.return_value = mock_arguments
264 |
265 | with pytest.raises(ValueError) as err:
266 | main.run()
267 |
268 | assert 'configuration file or identifier must be used' in str(err.value)
269 |
270 | @mock.patch('hanadb_exporter.main.LOGGER')
271 | @mock.patch('hanadb_exporter.main.parse_arguments')
272 | @mock.patch('hanadb_exporter.main.parse_config')
273 | @mock.patch('hanadb_exporter.main.db_manager.DatabaseManager')
274 | @mock.patch('logging.getLogger')
275 | @mock.patch('logging.basicConfig')
276 | def test_run_malformed(
277 | self, mock_logging, mock_get_logger, mock_db_manager,
278 | mock_parse_config, mock_parse_arguments, mock_logger):
279 |
280 | mock_arguments = mock.Mock(
281 | config='config', metrics='metrics', verbosity='DEBUG', version=False)
282 | mock_parse_arguments.return_value = mock_arguments
283 |
284 | config = {
285 | 'hana': {
286 | 'port': 1234,
287 | 'user': 'user',
288 | 'password': 'pass'
289 | }
290 | }
291 | mock_parse_config.return_value = config
292 |
293 | with pytest.raises(KeyError) as err:
294 | main.run()
295 |
296 | mock_parse_arguments.assert_called_once_with()
297 | mock_parse_config.assert_called_once_with(mock_arguments.config)
298 | mock_logging.assert_called_once_with(level='DEBUG')
299 | mock_db_manager.assert_called_once_with()
300 | assert 'Configuration file {} is malformed: {} not found'.format(
301 | 'config', '\'host\'') in str(err.value)
302 |
303 | @mock.patch('hanadb_exporter.utils.systemd_ready')
304 | @mock.patch('hanadb_exporter.main.LOGGER')
305 | @mock.patch('hanadb_exporter.main.parse_arguments')
306 | @mock.patch('hanadb_exporter.main.parse_config')
307 | @mock.patch('hanadb_exporter.main.setup_logging')
308 | @mock.patch('hanadb_exporter.main.db_manager.DatabaseManager')
309 | @mock.patch('hanadb_exporter.main.prometheus_exporter.SapHanaCollectors')
310 | @mock.patch('hanadb_exporter.main.REGISTRY.register')
311 | @mock.patch('hanadb_exporter.main.start_http_server')
312 | @mock.patch('logging.getLogger')
313 | @mock.patch('time.sleep')
314 | @mock.patch('hanadb_exporter.main.secrets_manager.get_db_credentials')
315 | def test_run_secret_manager(
316 | self, mock_secret_manager, mock_sleep, mock_get_logger, mock_start_server, mock_registry,
317 | mock_exporters, mock_db_manager, mock_setup_logging,
318 | mock_parse_config, mock_parse_arguments, mock_logger, mock_systemd):
319 |
320 | mock_arguments = mock.Mock(config='config', metrics='metrics', daemon=False, version=False)
321 | mock_parse_arguments.return_value = mock_arguments
322 | mock_secret_manager.return_value = {
323 | 'username': 'db_user',
324 | 'password': 'db_pass'
325 | }
326 |
327 | config = {
328 | 'hana': {
329 | 'host': '10.10.10.10',
330 | 'port': 1234,
331 | 'aws_secret_name': 'db_secret',
332 | 'user': 'user',
333 | 'password': 'pass'
334 | },
335 | 'logging': {
336 | 'log_file': 'my_file',
337 | 'config_file': 'my_config_file'
338 | }
339 | }
340 |
341 | mock_parse_config.return_value = config
342 |
343 | db_instance = mock.Mock()
344 | db_instance.get_connectors.return_value = 'connectors'
345 | mock_db_manager.return_value = db_instance
346 |
347 | mock_collector = mock.Mock()
348 | mock_exporters.return_value = mock_collector
349 |
350 | mock_sleep.side_effect = Exception
351 |
352 | with pytest.raises(Exception):
353 | main.run()
354 |
355 | mock_parse_arguments.assert_called_once_with()
356 | mock_parse_config.assert_called_once_with(mock_arguments.config)
357 | mock_setup_logging.assert_called_once_with(config)
358 | mock_db_manager.assert_called_once_with()
359 | db_instance.start.assert_called_once_with(
360 | '10.10.10.10', 1234, user='db_user', password='db_pass',
361 | userkey=None, multi_tenant=True, timeout=30, ssl=False, ssl_validate_cert=False)
362 | db_instance.get_connectors.assert_called_once_with()
363 | mock_exporters.assert_called_once_with(
364 | connectors='connectors', metrics_file='metrics')
365 |
366 | mock_registry.assert_called_once_with(mock_collector)
367 | mock_logger.info.assert_has_calls([
368 | mock.call('AWS secret name is going to be used to read the database username and password'),
369 | mock.call('exporter successfully registered'),
370 | mock.call('starting to serve metrics')
371 | ])
372 | mock_start_server.assert_called_once_with(9668, '0.0.0.0')
373 | mock_sleep.assert_called_once_with(1)
374 | assert mock_systemd.call_count == 0
375 |
--------------------------------------------------------------------------------
/tests/prometheus_exporter_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for exporters/prometheus_exporter.py.
3 |
4 | :author: abelarbi
5 | :organization: SUSE Linux GmbH
6 | :contact: abelarbi@suse.de
7 |
8 | :since: 2019-06-11
9 | """
10 |
11 | # pylint:disable=C0103,C0111,W0212,W0611
12 |
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
16 |
17 | import logging
18 |
19 | try:
20 | from unittest import mock
21 | except ImportError:
22 | import mock
23 |
24 | import pytest
25 |
26 | sys.modules['shaptools'] = mock.MagicMock()
27 | sys.modules['prometheus_client'] = mock.MagicMock()
28 |
29 | from hanadb_exporter import prometheus_exporter
30 |
31 | class TestSapHanaCollectors(object):
32 | """
33 | Unitary tests for SapHanaCollectors.
34 | """
35 |
36 | @mock.patch('hanadb_exporter.prometheus_exporter.SapHanaCollector')
37 | def test_init(self, mock_collector):
38 |
39 | conn1 = mock.Mock()
40 | conn2 = mock.Mock()
41 |
42 | connectors = [conn1, conn2]
43 |
44 | coll1 = mock.Mock()
45 | coll2 = mock.Mock()
46 | mock_collector.side_effect = [coll1, coll2]
47 |
48 | collectors = prometheus_exporter.SapHanaCollectors(connectors, 'metrics.json')
49 |
50 | mock_collector.assert_has_calls([
51 | mock.call(conn1, 'metrics.json'),
52 | mock.call(conn2, 'metrics.json')
53 | ])
54 |
55 | assert collectors._collectors == [coll1, coll2]
56 |
57 | @mock.patch('hanadb_exporter.prometheus_exporter.SapHanaCollector')
58 | def test_collect(self, mock_collector):
59 |
60 | conn1 = mock.Mock()
61 | conn2 = mock.Mock()
62 |
63 | connectors = [conn1, conn2]
64 |
65 | metrics = ['metric1', 'metric2', 'metric3', 'metric4']
66 | coll1 = mock.Mock()
67 | coll1.collect.return_value=[metrics[0], metrics[1]]
68 | coll2 = mock.Mock()
69 | coll2.collect.return_value=[metrics[2], metrics[3]]
70 |
71 | mock_collector.side_effect = [coll1, coll2]
72 |
73 | collectors = prometheus_exporter.SapHanaCollectors(connectors, 'metrics.json')
74 |
75 | for i, metric in enumerate(collectors.collect()):
76 | assert metric == metrics[i]
77 |
78 | coll1.collect.assert_called_once_with()
79 | coll2.collect.assert_called_once_with()
80 |
81 |
82 | class TestSapHanaCollector(object):
83 | """
84 | Unitary tests for SapHanaCollector.
85 | """
86 |
87 | @mock.patch('hanadb_exporter.prometheus_exporter.SapHanaCollector.retrieve_metadata')
88 | @mock.patch('hanadb_exporter.prometheus_metrics.PrometheusMetrics')
89 | def setup(self, mock_metrics, mock_retrieve_metadata):
90 | """
91 | Test setUp.
92 | """
93 | self._mock_metrics_config = mock.Mock()
94 | mock_metrics.return_value = self._mock_metrics_config
95 | self._mock_connector = mock.Mock()
96 | self._collector = prometheus_exporter.SapHanaCollector(self._mock_connector, 'metrics.json')
97 |
98 | self._collector._sid = 'prd'
99 | self._collector._insnr = '00'
100 | self._collector._database_name = 'db_name'
101 | self._collector._hana_version = '2.0'
102 |
103 | mock_retrieve_metadata.assert_called_once_with()
104 |
105 | def test_metadata_labels(self):
106 | assert ['prd', '00', 'db_name'] == self._collector.metadata_labels
107 |
108 | @mock.patch('hanadb_exporter.utils.format_query_result')
109 | @mock.patch('logging.Logger.info')
110 | def test_retrieve_metadata(self, mock_logger, mock_format_query):
111 |
112 | mock_result = mock.Mock()
113 | self._collector._hdb_connector.query = mock.Mock(return_value=mock_result)
114 | mock_format_query.return_value = [
115 | {'SID': 'ha1', 'INSNR': '10', 'DATABASE_NAME': 'DB_SYSTEM', 'VERSION': '1.2.3'}]
116 |
117 | self._collector.retrieve_metadata()
118 |
119 | mock_logger.assert_has_calls([
120 | mock.call('Querying database metadata...'),
121 | mock.call(
122 | 'Metadata retrieved. version: %s, sid: %s, insnr: %s, database: %s',
123 | '1.2.3', 'ha1', '10', 'DB_SYSTEM')
124 | ])
125 | self._collector._hdb_connector.query.assert_called_once_with(
126 | """SELECT
127 | (SELECT value
128 | FROM M_SYSTEM_OVERVIEW
129 | WHERE section = 'System'
130 | AND name = 'Instance ID') SID,
131 | (SELECT value
132 | FROM M_SYSTEM_OVERVIEW
133 | WHERE section = 'System'
134 | AND name = 'Instance Number') INSNR,
135 | m.database_name,
136 | m.version
137 | FROM m_database m;"""
138 | )
139 | mock_format_query.assert_called_once_with(mock_result)
140 | assert self._collector._sid == 'ha1'
141 | assert self._collector._insnr == '10'
142 | assert self._collector._database_name == 'DB_SYSTEM'
143 | assert self._collector._hana_version == '1.2.3'
144 |
145 | @mock.patch('hanadb_exporter.prometheus_exporter.core')
146 | @mock.patch('logging.Logger.debug')
147 | def test_manage_gauge(self, mock_logger, mock_core):
148 |
149 | mock_gauge_instance = mock.Mock()
150 | mock_gauge_instance.samples = 'samples'
151 | mock_core.GaugeMetricFamily = mock.Mock()
152 | mock_core.GaugeMetricFamily.return_value = mock_gauge_instance
153 |
154 | mock_metric = mock.Mock()
155 | mock_metric.name = 'name'
156 | mock_metric.description = 'description'
157 | mock_metric.labels = ['column1', 'column2']
158 | mock_metric.unit = 'mb'
159 | mock_metric.value = 'column3'
160 |
161 | formatted_query = [
162 | {'column1':'data1', 'column2':'data2', 'column3':'data3'},
163 | {'column1':'data4', 'column2':'data5', 'column3':'data6'},
164 | {'column1':'data7', 'column2':'data8', 'column3':'data9'}
165 | ]
166 |
167 | metric_obj = self._collector._manage_gauge(mock_metric, formatted_query)
168 |
169 | mock_core.GaugeMetricFamily.assert_called_once_with(
170 | 'name', 'description', None,
171 | ['sid', 'insnr', 'database_name', 'column1', 'column2'], 'mb')
172 |
173 | mock_gauge_instance.add_metric.assert_has_calls([
174 | mock.call(['prd', '00', 'db_name', 'data1', 'data2'], 'data3'),
175 | mock.call(['prd', '00', 'db_name', 'data4', 'data5'], 'data6'),
176 | mock.call(['prd', '00', 'db_name', 'data7', 'data8'], 'data9')
177 | ])
178 |
179 | mock_logger.assert_called_once_with('%s \n', 'samples')
180 | assert metric_obj == mock_gauge_instance
181 |
182 | @mock.patch('hanadb_exporter.prometheus_exporter.core')
183 | @mock.patch('logging.Logger.warn')
184 | @mock.patch('logging.Logger.debug')
185 | def test_manage_gauge_incorrect_label(self, logger_debug, logger_warn, mock_core):
186 |
187 | mock_gauge_instance = mock.Mock()
188 | mock_gauge_instance.samples = []
189 | mock_core.GaugeMetricFamily = mock.Mock()
190 | mock_core.GaugeMetricFamily.return_value = mock_gauge_instance
191 |
192 | mock_metric = mock.Mock()
193 | mock_metric.name = 'name'
194 | mock_metric.description = 'description'
195 | mock_metric.labels = ['column4', 'column5']
196 | mock_metric.unit = 'mb'
197 | mock_metric.value = 'column3'
198 |
199 | formatted_query = [
200 | {'column1': 'data1', 'column2': 'data2', 'column3': 'data3'}
201 | ]
202 |
203 | metric_obj = self._collector._manage_gauge(mock_metric, formatted_query)
204 |
205 | mock_core.GaugeMetricFamily.assert_called_once_with(
206 | 'name', 'description', None,
207 | ['sid', 'insnr', 'database_name', 'column4', 'column5'], 'mb')
208 |
209 | logger_warn.assert_called_once_with(
210 | 'One or more label(s) specified in metrics.json '
211 | 'for metric "%s" that are not found in the query result',
212 | 'name')
213 |
214 | assert mock_gauge_instance.call_count == 0
215 | logger_debug.assert_called_once_with('%s \n', [])
216 |
217 | @mock.patch('hanadb_exporter.prometheus_exporter.core')
218 | @mock.patch('logging.Logger.warn')
219 | @mock.patch('logging.Logger.debug')
220 | def test_manage_gauge_incorrect_value(self, logger_debug, logger_warn, mock_core):
221 |
222 | mock_gauge_instance = mock.Mock()
223 | mock_gauge_instance.samples = []
224 | mock_core.GaugeMetricFamily = mock.Mock()
225 | mock_core.GaugeMetricFamily.return_value = mock_gauge_instance
226 |
227 | mock_metric = mock.Mock()
228 | mock_metric.name = 'name'
229 | mock_metric.description = 'description'
230 | mock_metric.labels = ['column1', 'column2']
231 | mock_metric.unit = 'mb'
232 | mock_metric.value = 'column4'
233 |
234 | formatted_query = [
235 | {'column1': 'data1', 'column2': 'data2', 'column3': 'data3'},
236 | {'column1': 'data4', 'column4': None, 'column3': 'data6'}
237 | ]
238 |
239 | metric_obj = self._collector._manage_gauge(mock_metric, formatted_query)
240 |
241 | mock_core.GaugeMetricFamily.assert_called_once_with(
242 | 'name', 'description', None,
243 | ['sid', 'insnr', 'database_name', 'column1', 'column2'], 'mb')
244 |
245 | logger_warn.assert_has_calls([
246 | mock.call(
247 | 'Specified value in metrics.json for metric "%s": (%s) not found or it is '\
248 | 'invalid (None) in the query result',
249 | 'name', 'column4'),
250 | mock.call(
251 | 'Specified value in metrics.json for metric "%s": (%s) not found or it is '\
252 | 'invalid (None) in the query result',
253 | 'name', 'column4')
254 | ])
255 | assert mock_gauge_instance.call_count == 0
256 | logger_debug.assert_called_once_with('%s \n', [])
257 |
258 | def test_reconnect_connected(self):
259 | self._mock_connector.isconnected.return_value = True
260 | self._collector.reconnect()
261 | self._mock_connector.isconnected.assert_called_once_with()
262 | self._mock_connector.reconnect.assert_not_called()
263 |
264 | def test_reconnect_not_connected(self):
265 | self._mock_connector.isconnected.return_value = False
266 | self._collector.retrieve_metadata = mock.Mock()
267 | self._collector.reconnect()
268 | self._mock_connector.isconnected.assert_called_once_with()
269 | self._mock_connector.reconnect.assert_called_once_with()
270 | self._collector.retrieve_metadata.assert_called_once_with()
271 |
272 | @mock.patch('hanadb_exporter.utils.format_query_result')
273 | @mock.patch('hanadb_exporter.utils.check_hana_range')
274 | @mock.patch('logging.Logger.error')
275 | def test_collect_value_error(self, mock_logger, mock_hana_range, mock_format_query):
276 | """
277 | Test that when _manage_gauge is called and return ValueError (labels or value)
278 | are incorrect, that the ValueError is catched by collect() and a error is raised
279 | """
280 | self._collector.reconnect = mock.Mock()
281 | self._collector._manage_gauge = mock.Mock()
282 |
283 | self._collector._manage_gauge.side_effect = ValueError('test')
284 | mock_hana_range.return_value = True
285 |
286 | metrics1_1 = mock.Mock(type='gauge')
287 | metrics1 = [metrics1_1]
288 | query1 = mock.Mock(enabled=True, query='query1', metrics=metrics1, hana_version_range=['1.0'])
289 |
290 | self._collector._metrics_config.queries = [query1]
291 |
292 | for _ in self._collector.collect():
293 | continue
294 |
295 | self._collector.reconnect.assert_called_once_with()
296 | mock_logger.assert_called_once_with('test')
297 |
298 | @mock.patch('hanadb_exporter.utils.format_query_result')
299 | @mock.patch('hanadb_exporter.utils.check_hana_range')
300 | @mock.patch('logging.Logger.warning')
301 | @mock.patch('logging.Logger.info')
302 | def test_collect(self, mock_logger, mock_logger_warning, mock_hana_range, mock_format_query):
303 |
304 | self._collector.reconnect = mock.Mock()
305 | self._collector._manage_gauge = mock.Mock()
306 |
307 | self._mock_connector.query.side_effect = [
308 | 'result1', 'result2', '']
309 | mock_format_query.side_effect = [
310 | 'form_result1', 'form_result2', '']
311 |
312 | mock_hana_range.side_effect = [True, True, False, True]
313 |
314 | self._collector._manage_gauge.side_effect = [
315 | 'gauge1', 'gauge2', 'gauge3', 'gauge4', 'gauge5']
316 |
317 | metrics1_1 = mock.Mock(type='gauge')
318 | metrics1_2 = mock.Mock(type='gauge')
319 | metrics1 = [metrics1_1, metrics1_2]
320 | query1 = mock.Mock(enabled=True, query='query1', metrics=metrics1, hana_version_range=['1.0'])
321 | metrics2_1 = mock.Mock(type='gauge')
322 | metrics2_2 = mock.Mock(type='gauge')
323 | metrics2 = [metrics2_1, metrics2_2]
324 | query2 = mock.Mock(enabled=False, query='query2', metrics=metrics2, hana_version_range=['2.0'])
325 | metrics3_1 = mock.Mock(type='gauge')
326 | metrics3_2 = mock.Mock(type='gauge')
327 | metrics3_3 = mock.Mock(type='gauge')
328 | metrics3 = [metrics3_1, metrics3_2, metrics3_3]
329 | query3 = mock.Mock(enabled=True, query='query3', metrics=metrics3, hana_version_range=['3.0'])
330 | metrics4_1 = mock.Mock(type='gauge')
331 | metrics4_2 = mock.Mock(type='gauge')
332 | metrics4 = [metrics2_1, metrics2_2]
333 | query4 = mock.Mock(enabled=True, query='query4', metrics=metrics4, hana_version_range=['1.0.0', '2.0.0'])
334 | query5 = mock.Mock(enabled=True, query='query5', metrics=[], hana_version_range=['4.0'])
335 |
336 | self._collector._metrics_config.queries = [
337 | query1, query2, query3, query4, query5
338 | ]
339 |
340 | for index, element in enumerate(self._collector.collect()):
341 | assert element == 'gauge{}'.format(index+1)
342 |
343 | self._collector.reconnect.assert_called_once_with()
344 | self._mock_connector.query.assert_has_calls([
345 | mock.call('query1'),
346 | mock.call('query3'),
347 | mock.call('query5')])
348 |
349 | mock_format_query.assert_has_calls([
350 | mock.call('result1'),
351 | mock.call('result2'),
352 | mock.call('')
353 | ])
354 |
355 | mock_hana_range.assert_has_calls([
356 | mock.call('2.0', ['1.0']),
357 | mock.call('2.0', ['3.0']),
358 | mock.call('2.0', ['1.0.0', '2.0.0']),
359 | mock.call('2.0', ['4.0'])
360 | ])
361 |
362 | self._collector._manage_gauge.assert_has_calls([
363 | mock.call(metrics1_1, 'form_result1'),
364 | mock.call(metrics1_2, 'form_result1'),
365 | mock.call(metrics3_1, 'form_result2'),
366 | mock.call(metrics3_2, 'form_result2'),
367 | mock.call(metrics3_3, 'form_result2'),
368 | ])
369 |
370 | mock_logger.assert_has_calls([
371 | mock.call('Query %s is disabled', 'query2'),
372 | mock.call('Query %s out of the provided hana version range: %s',
373 | 'query4', ['1.0.0', '2.0.0'])
374 | ])
375 |
376 | mock_logger_warning.assert_called_once_with(
377 | 'Query %s ... has not returned any record', 'query5')
378 |
379 | @mock.patch('hanadb_exporter.utils.format_query_result')
380 | @mock.patch('hanadb_exporter.utils.check_hana_range')
381 | def test_collect_incorrect_type(self, mock_hana_range, mock_format_query):
382 |
383 | self._collector.reconnect = mock.Mock()
384 | self._collector._manage_gauge = mock.Mock()
385 |
386 | self._mock_connector.query.side_effect = [
387 | 'result1', 'result2']
388 | mock_format_query.side_effect = [
389 | 'form_result1', 'form_result2']
390 |
391 | mock_hana_range.side_effect = [True, True, True]
392 |
393 | self._collector._manage_gauge.side_effect = [
394 | 'gauge1', 'gauge2', 'gauge3', 'gauge4', 'gauge5']
395 |
396 | metrics1_1 = mock.Mock(type='gauge')
397 | metrics1_2 = mock.Mock(type='gauge')
398 | metrics1 = [metrics1_1, metrics1_2]
399 | query1 = mock.Mock(enabled=True, query='query1', metrics=metrics1, hana_version_range=['1.0'])
400 | metrics2_1 = mock.Mock(type='gauge')
401 | metrics2_2 = mock.Mock(type='gauge')
402 | metrics2 = [metrics2_1, metrics2_2]
403 | query2 = mock.Mock(enabled=False, query='query2', metrics=metrics2, hana_version_range=['2.0'])
404 | metrics3_1 = mock.Mock(type='gauge')
405 | metrics3_2 = mock.Mock(type='other')
406 | metrics3_3 = mock.Mock(type='gauge')
407 | metrics3 = [metrics3_1, metrics3_2, metrics3_3]
408 | query3 = mock.Mock(enabled=True, query='query3', metrics=metrics3, hana_version_range=['3.0'])
409 |
410 | self._collector._metrics_config.queries = [
411 | query1, query2, query3
412 | ]
413 |
414 | with pytest.raises(NotImplementedError) as err:
415 | for index, element in enumerate(self._collector.collect()):
416 | assert element == 'gauge{}'.format(index+1)
417 |
418 | self._collector.reconnect.assert_called_once_with()
419 | assert '{} type not implemented'.format('other') in str(err.value)
420 |
421 | self._mock_connector.query.assert_has_calls([
422 | mock.call('query1'),
423 | mock.call('query3')])
424 |
425 | mock_format_query.assert_has_calls([
426 | mock.call('result1'),
427 | mock.call('result2')
428 | ])
429 |
430 | mock_hana_range.assert_has_calls([
431 | mock.call('2.0', ['1.0']),
432 | mock.call('2.0', ['3.0'])
433 | ])
434 |
435 | self._collector._manage_gauge.assert_has_calls([
436 | mock.call(metrics1_1, 'form_result1'),
437 | mock.call(metrics1_2, 'form_result1'),
438 | mock.call(metrics3_1, 'form_result2')
439 | ])
440 |
441 | @mock.patch('hanadb_exporter.utils.check_hana_range')
442 | @mock.patch('hanadb_exporter.prometheus_exporter.hdb_connector.connectors.base_connector')
443 | @mock.patch('logging.Logger.error')
444 | def test_collect_incorrect_query(self, mock_logger, mock_base_connector, mock_hana_range):
445 |
446 | self._collector.reconnect = mock.Mock()
447 | mock_base_connector.QueryError = Exception
448 |
449 | self._mock_connector.query.side_effect = Exception('error')
450 | mock_hana_range.return_value = True
451 |
452 | query1 = mock.Mock(enabled=True, query='query1', hana_version_range=['1.0'])
453 |
454 | self._collector._metrics_config.queries = [query1]
455 |
456 | for _ in self._collector.collect():
457 | continue
458 |
459 | self._collector.reconnect.assert_called_once_with()
460 | self._mock_connector.query.assert_called_once_with('query1')
461 |
462 | mock_hana_range.assert_has_calls([
463 | mock.call('2.0', ['1.0']),
464 | ])
465 |
466 | mock_logger.assert_has_calls([
467 | mock.call('Failure in query: %s, skipping...', 'query1'),
468 | ])
469 |
--------------------------------------------------------------------------------
/tests/prometheus_metrics_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for exporters/prometheus_metrics.py.
3 |
4 | :author: abelarbi, xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: abelarbi@suse.de, xarbulu@suse.com
7 |
8 | :since: 2019-06-11
9 | """
10 |
11 | # pylint:disable=C0103,C0111,W0212,W0611
12 |
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
16 |
17 | import logging
18 |
19 | try:
20 | from unittest import mock
21 | except ImportError:
22 | import mock
23 |
24 | import pytest
25 |
26 | from hanadb_exporter import prometheus_metrics
27 |
28 | class TestMetric(object):
29 | """
30 | Unitary tests for Metric.
31 | """
32 |
33 | def test_metric_new(self):
34 | correct_data = {
35 | 'name': 'name',
36 | 'description': 'description',
37 | 'labels': list(),
38 | 'value': 'value',
39 | 'unit': 'unit',
40 | 'type': 'type'
41 | }
42 | modeled_metric = prometheus_metrics.Metric(**correct_data)
43 | assert modeled_metric.name == 'name'
44 | assert modeled_metric.description == 'description'
45 | assert modeled_metric.labels == list()
46 | assert modeled_metric.value == 'value'
47 | assert modeled_metric.unit == 'unit'
48 | assert modeled_metric.type == 'type'
49 | assert modeled_metric.enabled == True
50 | assert modeled_metric.hana_version_range == ['1.0.0']
51 |
52 | correct_data = {
53 | 'name': 'name',
54 | 'description': 'description',
55 | 'labels': list(),
56 | 'value': 'value',
57 | 'unit': 'unit',
58 | 'type': 'type',
59 | 'enabled': False,
60 | 'hana_version_range': ['1.0.0', '2.0.0']
61 | }
62 |
63 | modeled_metric = prometheus_metrics.Metric(**correct_data)
64 | assert modeled_metric.name == 'name'
65 | assert modeled_metric.description == 'description'
66 | assert modeled_metric.labels == list()
67 | assert modeled_metric.value == 'value'
68 | assert modeled_metric.unit == 'unit'
69 | assert modeled_metric.type == 'type'
70 | assert modeled_metric.enabled == False
71 | assert modeled_metric.hana_version_range == ['1.0.0', '2.0.0']
72 |
73 | missing_data = {
74 | 'name': 'name',
75 | 'description': 'description',
76 | 'labels': list(),
77 | 'value': 'value',
78 | 'type': 'type',
79 | 'enabled': False
80 | }
81 | with pytest.raises(TypeError) as err:
82 | modeled_metric = prometheus_metrics.Metric(**missing_data)
83 |
84 | incorrect_data = {
85 | 'name': 'name',
86 | 'descriptio': 'description',
87 | 'labels': list(),
88 | 'value': 'value',
89 | 'unit': 'unit',
90 | 'type': 'type',
91 | 'enabled': False
92 | }
93 | with pytest.raises(TypeError) as err:
94 | modeled_metric = prometheus_metrics.Metric(**missing_data)
95 |
96 | additional_data = {
97 | 'name': 'name',
98 | 'description': 'description',
99 | 'labels': list(),
100 | 'value': 'value',
101 | 'unit': 'unit',
102 | 'type': 'type',
103 | 'extra': False
104 | }
105 | with pytest.raises(TypeError) as err:
106 | modeled_metric = prometheus_metrics.Metric(**missing_data)
107 |
108 | def test_metric_new_error(self):
109 | correct_data = {
110 | 'name': 'name',
111 | 'description': 'description',
112 | 'labels': list(),
113 | 'value': '',
114 | 'unit': 'unit',
115 | 'type': 'type'
116 | }
117 |
118 | with pytest.raises(ValueError) as err:
119 | prometheus_metrics.Metric(**correct_data)
120 |
121 | assert 'No value specified in metrics.json for {}'.format('name') in str(err.value)
122 |
123 | class TestQuery(object):
124 | """
125 | Unitary tests for Query.
126 | """
127 |
128 | def setup(self):
129 | self._query = prometheus_metrics.Query()
130 |
131 | @mock.patch('hanadb_exporter.prometheus_metrics.Metric')
132 | def test_parse(self, mock_metric):
133 | mocked_data1 = {'data1': 'value1'}
134 | mocked_data2 = {'data2': 'value2'}
135 | query_data = {'metrics': [mocked_data1, mocked_data2], 'enabled': False}
136 | mock_metric.side_effect = ['modeled_data1', 'modeled_data2']
137 |
138 | self._query.parse('query', query_data)
139 |
140 | mock_metric.assert_has_calls([
141 | mock.call(data1='value1'),
142 | mock.call(data2='value2')
143 | ])
144 | assert self._query.query == 'query'
145 | assert self._query.enabled == False
146 | assert self._query.metrics == ['modeled_data1', 'modeled_data2']
147 |
148 | @mock.patch('hanadb_exporter.prometheus_metrics.Query.__new__')
149 | def test_get_model(self, mock_query):
150 | mock_query_instance = mock.Mock()
151 | mock_query.return_value = mock_query_instance
152 | modeled_query = prometheus_metrics.Query.get_model('query', ['metric1', 'metric2'])
153 | mock_query_instance.parse.assert_called_once_with('query', ['metric1', 'metric2'])
154 | assert modeled_query == mock_query_instance
155 |
156 |
157 | class TestPrometheusMetrics(object):
158 | """
159 | Unitary tests for PrometheusMetrics.
160 | """
161 |
162 | @mock.patch('hanadb_exporter.prometheus_metrics.PrometheusMetrics.load_metrics')
163 | def test_init(self, mock_load):
164 | mock_load.return_value = 'queries'
165 | metrics = prometheus_metrics.PrometheusMetrics('metrics_file')
166 | mock_load.assert_called_once_with('metrics_file')
167 | assert metrics.queries == 'queries'
168 |
169 | @mock.patch('hanadb_exporter.prometheus_metrics.Query.get_model')
170 | @mock.patch('json.load')
171 | @mock.patch('hanadb_exporter.prometheus_metrics.open')
172 | def test_load_metrics(self, mock_open, mock_json_load, mock_get_model):
173 | query1_data = mock.Mock()
174 | query2_data = mock.Mock()
175 | mock_json_load.return_value = {'query1': query1_data, 'query2': query2_data}
176 |
177 | mock_get_model.side_effect = ['data1', 'data2']
178 |
179 | queries = prometheus_metrics.PrometheusMetrics.load_metrics('metrics.json')
180 | mock_open.assert_called_once_with('metrics.json', 'r')
181 |
182 | mock_get_model.assert_has_calls([
183 | mock.call('query1', query1_data),
184 | mock.call('query2', query2_data)
185 | ], any_order=True)
186 |
187 | assert queries == ['data1', 'data2']
188 |
189 | @mock.patch('hanadb_exporter.prometheus_metrics.Query.get_model')
190 | @mock.patch('json.load')
191 | @mock.patch('hanadb_exporter.prometheus_metrics.open')
192 | @mock.patch('logging.Logger.error')
193 | def test_load_metrics_error(self, mock_logger, mock_open, mock_json_load, mock_get_model):
194 | query1_data = mock.Mock()
195 | query2_data = mock.Mock()
196 | mock_json_load.return_value = {'query1': query1_data, 'query2': query2_data}
197 |
198 | mock_get_model.side_effect = ['data1', TypeError('my-error')]
199 |
200 | with pytest.raises(TypeError) as err:
201 | prometheus_metrics.PrometheusMetrics.load_metrics('metrics.json')
202 |
203 | assert 'my-error' in str(err.value)
204 | mock_open.assert_called_once_with('metrics.json', 'r')
205 |
206 | mock_get_model.assert_has_calls([
207 | mock.call('query1', query1_data),
208 | mock.call('query2', query2_data)
209 | ], any_order=True)
210 |
211 | assert mock_logger.call_count == 2
212 |
--------------------------------------------------------------------------------
/tests/secrets_manager_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for exporters/secrets_manager.py.
3 |
4 | :author: elturkym, schniber
5 |
6 | :since: 2021-07-15
7 | """
8 |
9 | import json
10 |
11 | try:
12 | from unittest import mock
13 | except ImportError:
14 | import mock
15 |
16 | import pytest
17 |
18 | from hanadb_exporter import secrets_manager
19 | from botocore.exceptions import ClientError
20 | from requests.exceptions import HTTPError
21 |
22 |
23 | class TestSecretsManager(object):
24 | """
25 | Unitary tests for hanadb_exporter/secrets_manager.py.
26 | """
27 |
28 | @mock.patch('hanadb_exporter.secrets_manager.LOGGER')
29 | @mock.patch('hanadb_exporter.secrets_manager.requests')
30 | @mock.patch('hanadb_exporter.secrets_manager.boto3.session')
31 | def test_get_db_credentials(self, mock_boto3, mock_requests, mock_logger):
32 | mock_ec2_response = mock.Mock()
33 | mock_requests.get.return_value = mock_ec2_response
34 | mock_ec2_response.json.return_value = json.loads('{"region":"test_region"}')
35 | mock_session = mock.Mock()
36 | mock_sm_client = mock.Mock()
37 | mock_boto3.Session.return_value = mock_session
38 | mock_session.client.return_value = mock_sm_client
39 | mock_sm_client.get_secret_value.return_value = json.loads(
40 | '{"SecretString" : "{\\"username\\": \\"db_user\\", \\"password\\":\\"db_pass\\"}"}')
41 |
42 | actual_secret = secrets_manager.get_db_credentials("test_secret")
43 |
44 | mock_session.client.assert_called_once_with(service_name='secretsmanager', region_name='test_region')
45 | mock_sm_client.get_secret_value.assert_called_once_with(SecretId='test_secret')
46 | mock_logger.info.assert_has_calls([
47 | mock.call('retrieving AWS secret details')
48 | ])
49 | assert actual_secret['username'] == 'db_user'
50 | assert actual_secret['password'] == 'db_pass'
51 |
52 | @mock.patch('hanadb_exporter.secrets_manager.LOGGER')
53 | @mock.patch('hanadb_exporter.secrets_manager.requests')
54 | @mock.patch('hanadb_exporter.secrets_manager.boto3.session')
55 | def test_get_db_credentials_imdsv2(self, mock_boto3, mock_requests, mock_logger):
56 | mock_ec2_unauthorized = mock.Mock()
57 | mock_ec2_unauthorized.status_code = 401
58 |
59 | mock_ec2_response = mock.Mock()
60 | mock_ec2_response.json.return_value = json.loads('{"region":"test_region_imdsv2"}')
61 |
62 | mock_requests.get.side_effect = [mock_ec2_unauthorized, mock_ec2_response]
63 |
64 | mock_ec2_put = mock.Mock()
65 | mock_ec2_put.content = 'my-test-token'
66 |
67 | mock_requests.put.return_value = mock_ec2_put
68 |
69 | mock_session = mock.Mock()
70 | mock_sm_client = mock.Mock()
71 | mock_boto3.Session.return_value = mock_session
72 | mock_session.client.return_value = mock_sm_client
73 | mock_sm_client.get_secret_value.return_value = json.loads(
74 | '{"SecretString" : "{\\"username\\": \\"db_user\\", \\"password\\":\\"db_pass\\"}"}')
75 |
76 | actual_secret = secrets_manager.get_db_credentials("test_secret")
77 |
78 | mock_session.client.assert_called_once_with(service_name='secretsmanager', region_name='test_region_imdsv2')
79 | mock_sm_client.get_secret_value.assert_called_once_with(SecretId='test_secret')
80 | mock_logger.info.assert_has_calls([
81 | mock.call('retrieving AWS secret details')
82 | ])
83 |
84 | mock_requests.get.assert_has_calls([
85 | mock.call("http://169.254.169.254/latest/dynamic/instance-identity/document"),
86 | mock.call("http://169.254.169.254/latest/dynamic/instance-identity/document",
87 | headers={'X-aws-ec2-metadata-token': 'my-test-token'})
88 | ])
89 |
90 | mock_requests.put.assert_called_with("http://169.254.169.254/latest/api/token",
91 | headers={"X-aws-ec2-metadata-token-ttl-seconds": "21600"})
92 |
93 | assert actual_secret['username'] == 'db_user'
94 | assert actual_secret['password'] == 'db_pass'
95 |
96 | @mock.patch('hanadb_exporter.secrets_manager.requests')
97 | def test_get_db_credentials_ec2_request_error(self, mock_requests):
98 | ec2_info_response = mock.Mock()
99 | mock_requests.get.return_value = ec2_info_response
100 | ec2_info_response.raise_for_status.side_effect=HTTPError
101 |
102 | with pytest.raises(secrets_manager.SecretsManagerError) as err:
103 | secrets_manager.get_db_credentials("test_secret")
104 | assert 'EC2 information request failed' in str(err.value)
105 |
106 | @mock.patch('hanadb_exporter.secrets_manager.requests')
107 | @mock.patch('hanadb_exporter.secrets_manager.boto3.session')
108 | def test_get_db_credentials_secret_request_error(self, mock_boto3, mock_requests):
109 | mock_ec2_response = mock.Mock()
110 | mock_requests.get.return_value = mock_ec2_response
111 | mock_ec2_response.json.return_value = json.loads('{"region":"test_region"}')
112 | mock_session = mock.Mock()
113 | mock_sm_client = mock.Mock()
114 | mock_boto3.Session.return_value = mock_session
115 | mock_session.client.return_value = mock_sm_client
116 | mock_sm_client.get_secret_value.side_effect = ClientError({}, "test_operation")
117 |
118 | with pytest.raises(secrets_manager.SecretsManagerError) as err:
119 | secrets_manager.get_db_credentials("test_secret")
120 | assert 'Couldn\'t retrieve secret details' in str(err.value)
--------------------------------------------------------------------------------
/tests/utils_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Unitary tests for utils.py.
3 |
4 | :author: xarbulu
5 | :organization: SUSE Linux GmbH
6 | :contact: xarbulu@suse.com
7 |
8 | :since: 2019-07-05
9 | """
10 |
11 | # pylint:disable=C0103,C0111,W0212,W0611
12 |
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
16 |
17 | import logging
18 |
19 | try:
20 | from unittest import mock
21 | except ImportError:
22 | import mock
23 |
24 | import pytest
25 |
26 | sys.modules['prometheus_client'] = mock.MagicMock()
27 |
28 | from hanadb_exporter import utils
29 |
30 |
31 | class TestUtils(object):
32 | """
33 | Unitary tests for utils.
34 | """
35 |
36 |
37 | def test_format_query_result(self):
38 | query_results = mock.Mock()
39 | query_results.metadata = [
40 | ('column1', 'other_data',), ('column2', 'other_data'), ('column3', 'other_data')]
41 | query_results.records = [
42 | ('data1', 'data2', 'data3'),
43 | ('data4', 'data5', 'data6'),
44 | ('data7', 'data8', 'data9')
45 | ]
46 | formatted_result = utils.format_query_result(query_results)
47 |
48 | assert formatted_result == [
49 | {'column1':'data1', 'column2':'data2', 'column3':'data3'},
50 | {'column1':'data4', 'column2':'data5', 'column3':'data6'},
51 | {'column1':'data7', 'column2':'data8', 'column3':'data9'}
52 | ]
53 |
54 | def test_check_hana_range(self):
55 |
56 | assert utils.check_hana_range('1.0.0.0', ['1.0.0.1']) == False
57 | assert utils.check_hana_range('1.0.0.0', ['1.0.0']) == True
58 | assert utils.check_hana_range('1.0.0.0', ['1.0.0']) == True
59 | assert utils.check_hana_range('1.0.0.1', ['1.0.0.0']) == True
60 |
61 | assert utils.check_hana_range('1.0.0.0', ['1.0.0.1', '2.0.0']) == False
62 | assert utils.check_hana_range('2.0.1.0', ['1.0.0.1', '2.0.0.0']) == False
63 | assert utils.check_hana_range('1.0.0.0', ['1.0.1.0', '2.0.0.0']) == False
64 | assert utils.check_hana_range('1.0.1.0', ['1.0.1.1', '2.0.0.0']) == False
65 | assert utils.check_hana_range('1.0.0.1', ['1.0.1', '2.0.0.0']) == False
66 |
67 | assert utils.check_hana_range('1.0.0.0', ['1.0.0', '2.0.0']) == True
68 | assert utils.check_hana_range('1.0.1', ['1.0.0.1', '2.0.0']) == True
69 | assert utils.check_hana_range('1.0.1', ['1.0.0.0', '2.0.0']) == True
70 | assert utils.check_hana_range('2.0.0.0', ['1.0.0.1', '2.0.0.0']) == True
71 | assert utils.check_hana_range('1.0.0.1', ['1.0.0', '2.0.0.0']) == True
72 |
73 | with pytest.raises(ValueError) as err:
74 | utils.check_hana_range('1.0.0.0', [])
75 |
76 | assert 'provided availability range does not have the correct number of elements' in str(err.value)
77 |
78 | with pytest.raises(ValueError) as err:
79 | utils.check_hana_range('1.0.0.0', ['1.0.0.0', '2.0.0.0', '3.0.0.0'])
80 |
81 | assert 'provided availability range does not have the correct number of elements' in str(err.value)
82 |
83 | @mock.patch('os.getenv')
84 | @mock.patch('socket.socket')
85 | def test_systemd_ready_error(self, mock_socket, mock_getenv):
86 | mock_getenv.return_value = None
87 | with pytest.raises(utils.NotSystemdException) as err:
88 | utils.systemd_ready()
89 |
90 | assert 'Exporter is not running as systemd deamon' in str(err.value)
91 |
92 | @mock.patch('os.getenv')
93 | @mock.patch('socket.socket')
94 | def test_systemd_ready(self, mock_socket, mock_getenv):
95 | mock_getenv.return_value = '@notify'
96 | sock_instance = mock.Mock()
97 | mock_socket.return_value = sock_instance
98 | utils.systemd_ready()
99 |
100 | sock_instance.connect.assert_called_once_with('\0notify')
101 | sock_instance.sendall.assert_called_once_with(b'READY=1')
102 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py{36,38},pylint
3 |
4 | [testenv]
5 | deps =
6 | pytest~=6.0
7 | boto3
8 |
9 | commands =
10 | py.test tests -vv {posargs}
11 |
12 | [testenv:coverage]
13 | deps =
14 | pytest
15 | pytest-cov
16 |
17 | commands =
18 | py.test tests -vv --cov=hanadb_exporter --cov-config .coveragerc --cov-report term-missing --cov-report html --cov-report xml {posargs}
19 |
20 | [testenv:pylint]
21 | deps = pylint
22 | -rrequirements.txt
23 |
24 | commands = pylint hanadb_exporter
25 |
--------------------------------------------------------------------------------