├── .circleci
└── config.yml
├── .gitignore
├── API.rst
├── LICENSE
├── README.rst
├── SECURITY.md
├── atlasapi
├── __init__.py
├── alerts.py
├── api_keys.py
├── atlas.py
├── atlas_types.py
├── atlas_users.py
├── cloud_backup.py
├── clusters.py
├── errors.py
├── events.py
├── events_event_types.py
├── lib.py
├── logs.py
├── maintenance_window.py
├── measurements.py
├── network.py
├── organizations.py
├── projects.py
├── settings.py
├── specs.py
├── teams.py
└── whitelist.py
├── atlascli
├── __init__.py
├── atlaserrors.py
├── atlaskey.py
├── cli.py
└── listcommand.py
├── bumpversion.sh
├── doc_corrections.diff
├── gendocs
├── Makefile
├── README.rst
├── atlasapi-atlas-nested.rst
├── atlasapi.rst
├── atlascli.rst
├── conf.py
├── index.rst
└── requirements.txt
├── requirements.txt
├── setup.py
├── tests
├── __init__.py
├── alerts_test.py
├── atlascll_test.py
├── database_users_test.py
├── monitoring_logs.py
├── test_api_keys.py
├── test_cloudbackup.py
├── test_clusters.py
├── test_events.py
├── test_getting_logs.py
├── test_maint_window.py
├── test_monitoring.py
├── test_network.py
├── test_organizations.py
├── test_projects.py
├── unittest.cfg
└── whitelist_test.py
├── unittest.cfg
└── utils
└── convert_java_classes_to_enum.py
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2.1
2 | workflows:
3 | build_and_test:
4 | jobs:
5 | - build:
6 | filters:
7 | tags:
8 | only: /.*/
9 | # - deploy:
10 | # requires:
11 | # - build
12 | # filters:
13 | # tags:
14 | # only: /^v.*/
15 | # branches:
16 | # ignore: /.*/
17 | jobs:
18 | build:
19 | executor: python/default
20 | steps:
21 | - checkout
22 | - python/install-packages:
23 | pkg-manager: pip
24 | - python/install-packages:
25 | args: nose2
26 | pkg-manager: pip
27 | pypi-cache: true
28 | - run:
29 | command: |
30 | nose2 -s tests/ --verbose --config tests/unitest.cfg -A basic
31 | name: Test
32 | - store_test_results:
33 | path: nose2-junit.xml
34 |
35 | docs:
36 | executor: python/default
37 | steps:
38 | - checkout
39 | - run:
40 | name: Build Documentation
41 | command: |
42 | cd gendocs && make clean && make html && cd ..
43 | rsync -crv --delete --exclude=README.rst gendocs/_build/html/ docs/
44 |
45 | # deploy:
46 | # executor: python/default
47 | # steps:
48 | # - checkout
49 | # - python/load-cache
50 | # - python/install-deps
51 | # - python/save-cache
52 | # - run:
53 | # name: init .pypirc
54 | # command: |
55 | # echo -e "[pypi]" >> ~/.pypirc
56 | # echo -e "username = levlaz" >> ~/.pypirc
57 | # echo -e "password = $PYPI_PASSWORD" >> ~/.pypirc
58 | # - run:
59 | # name: Build package
60 | # command: |
61 | # python3 setup.py clean
62 | # python3 setup.py sdist
63 | # python3 setup.py bdist_wheel
64 | # - run:
65 | # name: upload to pypi
66 | # command: |
67 | # . venv/bin/activate
68 | # twine upload dist/*
69 | orbs:
70 | python: circleci/python@2.0.3
71 | pylint: qventus/python-lint@0.0.6
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | *$py.class
4 | build/
5 | *.kate-swp
6 | dist
7 | *.egg-info
8 | .eggs
9 | gendocs/_build/
10 | /docs/
11 |
--------------------------------------------------------------------------------
/API.rst:
--------------------------------------------------------------------------------
1 | Atlas API Support
2 | =================
3 |
4 | Status about API implementation
5 |
6 | Database Users
7 | --------------
8 |
9 | Status : [100%] :heavy_check_mark:
10 |
11 | - Get All Database Users :ballot_box_with_check:
12 | - Get a Single Database User :ballot_box_with_check:
13 | - Create a Database User :ballot_box_with_check:
14 | - Update a Database User :ballot_box_with_check:
15 | - Delete a Database User :ballot_box_with_check:
16 |
17 |
18 |
19 |
20 | Custom MongoDB Roles
21 | ---------------------
22 |
23 | Status : [0%]
24 |
25 | - Get all custom MongoDB roles in the project.
26 | - Get the custom MongoDB role named {ROLE-NAME}.
27 | - Create a new custom MongoDB role in the project.
28 | - Update a custom MongoDB role in the project.
29 | - Delete a custom MongoDB role from the project.
30 |
31 | Projects
32 | --------
33 |
34 | Status : [66%]
35 |
36 | - Get All Projects :ballot_box_with_check:
37 | - Get One Project :ballot_box_with_check:
38 | - Create a Project :ballot_box_with_check:
39 |
40 | Clusters
41 | --------
42 |
43 | Status : [100%]
44 |
45 | - Get All Clusters :ballot_box_with_check:
46 | - Get a Single Cluster :ballot_box_with_check:
47 | - Create a Cluster :ballot_box_with_check:
48 | - Modify a Cluster :ballot_box_with_check:
49 | - Delete a Cluster :ballot_box_with_check:
50 | - Get Advanced Configuration Options for One Cluster :ballot_box_with_check:
51 | - Modify Advanced Configuration Options for One Cluster :ballot_box_with_check:
52 | - Test Failover :ballot_box_with_check:
53 |
54 | - (Helper) Modify cluster instance size :ballot_box_with_check:
55 | - (Helper) Pause/Unpause Cluster :ballot_box_with_check:
56 |
57 | Alerts
58 | ------
59 |
60 | Status : [50%]
61 |
62 | - Get All Alerts :ballot_box_with_check:
63 | - Get an Alert :ballot_box_with_check:
64 | - Acknowledge an Alert (include Unacknowledge) (BROKEN)
65 |
66 | Alert Configurations
67 | --------------------
68 |
69 | Status : [0%]
70 |
71 | VPC
72 | ---
73 |
74 | Status : [0%]
75 |
76 | Monitoring and Logs
77 | -------------------
78 |
79 | Processes
80 | +++++++++
81 |
82 | - Get all processes for the specified group. [Completed]
83 | - Get information for the specified process in the specified group.
84 |
85 |
86 | Hosts
87 | +++++
88 |
89 | - Get measurements for the specified host.
90 | - Get logfile for the specified host.
91 | - Get Loglines for the specified host.
92 |
93 | Databases
94 | +++++++++
95 |
96 | - Get the list of databases for the specified host.
97 | - Get measurements of the specified database for the specified host.
98 |
99 | Disks
100 | +++++
101 |
102 | - Get the list of disks or partitions for the specified host.
103 | - Get measurements of specified disk for the specified host.
104 |
105 |
106 | Logs
107 | ++++
108 |
109 | Status : [50%]
110 |
111 |
112 | - Get the log file for a host in the cluster. :ballot_box_with_check:
113 | - Get loglines for a host in the cluster. :ballot_box_with_check:
114 | - Get log files for all hosts in a cluster (#24)
115 | - Get log files for all hosts in a project (#25) :ballot_box_with_check:
116 |
117 |
118 | IP Whitelist
119 | ------------
120 |
121 | Status : [80%]
122 |
123 | - Get All Entries
124 | - Add a single entry
125 | - Delete a entry
126 | - update a entry(missing)
127 |
128 | Events
129 | ++++++
130 |
131 | Status: [50%]
132 |
133 | - Get All Organization Events
134 | - Get One Organization Event
135 | - Get All Project Events
136 | - Ge One Project Event
137 |
138 | Organizations
139 | --------------
140 |
141 | Status: [0%]
142 |
143 |
144 | Maintenance Windows
145 | --------------------
146 |
147 | Status: [60%]
148 |
149 | - Get Maintenance Window Settings :ballot_box_with_check:
150 | - Update Maintenance Window Settings :ballot_box_with_check:
151 | - Defer Maintenance for one week :ballot_box_with_check:
152 | - Commence Maintenance ASAP
153 | - Clear Maintenance Window
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
204 |
205 | ## Runtime Library Exception to the Apache 2.0 License: ##
206 |
207 |
208 | As an exception, if you use this Software to compile your source code and
209 | portions of this Software are embedded into the binary product as a result,
210 | you may redistribute such product without providing attribution as would
211 | otherwise be required by Sections 4(a), 4(b) and 4(d) of the License.
212 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Atlas API
2 | ==========
3 |
4 | Python Bindings for the Atlas Public API
5 |
6 | This project intends to create a fairly opinionated set of bindings for the Atlas Public API which makes interacting
7 | with Atlas using Python easier. The API makes extensive use of enums and other helper type objects to take some
8 | of the guess work of administering Atlas clusters with Python.
9 |
10 | In most cases objects will be returned based upon the structure of the json returned but the API Endpoints. These objects
11 | are defined either in the `specs.py` module or in a module named after the objects themselves (`alerts.py` for example).
12 |
13 |
14 | All calls to the Atlas API require API credentials, you can configure them in your Atlas project.
15 |
16 |
17 | `Atlas API `__
18 |
19 | `Configure Atlas API Access `__
20 |
21 | `Current state of the python-atlasapi support `__
22 |
23 |
24 | .. image:: https://img.shields.io/pypi/l/atlasapi.svg
25 | :target: https://pypi.org/project/atlasapi/
26 |
27 | .. image:: https://img.shields.io/pypi/status/atlasapi.svg
28 | :target: https://pypi.org/project/atlasapi/
29 |
30 | .. image:: https://img.shields.io/pypi/pyversions/atlasapi.svg
31 | :target: https://pypi.org/project/atlasapi/
32 |
33 |
34 | Documentation
35 | -------------
36 | .. image:: https://readthedocs.org/projects/python-atlasapi/badge/?version=latest
37 | :target: https://python-atlasapi.readthedocs.io/en/latest/?badge=latest Found at https://python-atlasapi.readthedocs.io/
38 |
39 | Found at https://python-atlasapi.readthedocs.io/
40 |
41 | Autobuilt on each commit.
42 |
43 | Installation
44 | ------------
45 |
46 | This package is available for Python 3.6+.
47 |
48 | .. image:: https://badge.fury.io/py/atlasapi.svg
49 | :target: https://pypi.org/project/atlasapi/
50 |
51 |
52 | You can install the latest released version from pypi.
53 |
54 | .. code:: bash
55 |
56 | pip3 install atlasapi
57 |
58 |
59 |
60 |
61 | Usage
62 | -----
63 |
64 | Get All Database Users
65 | ^^^^^^^^^^^^^^^^^^^^^^
66 |
67 | .. code:: python
68 |
69 | from atlasapi.atlas import Atlas
70 |
71 | a = Atlas("","","")
72 |
73 | # Low level Api
74 | details = a.DatabaseUsers.get_all_database_users(pageNum=1, itemsPerPage=100)
75 |
76 | # Iterable
77 | for user in a.DatabaseUsers.get_all_database_users(iterable=True):
78 | print(user["username"])
79 |
80 | Create a Database User
81 | ^^^^^^^^^^^^^^^^^^^^^^
82 |
83 | .. code:: python
84 |
85 | from atlasapi.atlas import Atlas
86 | from atlasapi.specs import DatabaseUsersPermissionsSpecs, RoleSpecs
87 |
88 | a = Atlas("","","")
89 |
90 | p = DatabaseUsersPermissionsSpecs("test", "password for test user")
91 | p.add_roles("test-db",
92 | [RoleSpecs.dbAdmin,
93 | RoleSpecs.readWrite])
94 | p.add_role("other-test-db", RoleSpecs.readWrite, "a_collection")
95 |
96 | details = a.DatabaseUsers.create_a_database_user(p)
97 |
98 | Update a Database User
99 | ^^^^^^^^^^^^^^^^^^^^^^
100 |
101 | .. code:: python
102 |
103 | from atlasapi.atlas import Atlas
104 | from atlasapi.specs import DatabaseUsersUpdatePermissionsSpecs, RoleSpecs
105 |
106 | a = Atlas("","","")
107 |
108 | # Update roles and password
109 | p = DatabaseUsersUpdatePermissionsSpecs("password for test user")
110 | p.add_role("test-db", RoleSpecs.read, "a_collection")
111 |
112 | details = a.DatabaseUsers.update_a_database_user("test", p)
113 |
114 | Delete a Database User
115 | ^^^^^^^^^^^^^^^^^^^^^^
116 |
117 | .. code:: python
118 |
119 | from atlasapi.atlas import Atlas
120 |
121 | a = Atlas("","","")
122 |
123 | details = a.DatabaseUsers.delete_a_database_user("test")
124 |
125 | Get a Single Database User
126 | ^^^^^^^^^^^^^^^^^^^^^^^^^^
127 |
128 | .. code:: python
129 |
130 | from atlasapi.atlas import Atlas
131 |
132 | a = Atlas("","","")
133 |
134 | details = a.DatabaseUsers.get_a_single_database_user("test")
135 |
136 | Clusters
137 | ^^^^^^^^
138 |
139 | .. code:: python
140 |
141 | from atlasapi.atlas import Atlas
142 | from atlasapi.clusters import AdvancedOptions
143 |
144 | a = Atlas("","","")
145 |
146 | # Is existing cluster ?
147 | a.Clusters.is_existing_cluster("cluster-dev")
148 |
149 | # Get All Clusters
150 | for cluster in a.Clusters.get_all_clusters(iterable=True):
151 | print(cluster["name"])
152 |
153 | # Get a Single Cluster
154 | details = a.Clusters.get_single_cluster("cluster-dev")
155 |
156 | # Delete a Cluster (dry run, raise ErrConfirmationRequested)
157 | details = a.Clusters.delete_cluster("cluster-dev")
158 |
159 | # Delete a Cluster (approved)
160 | details = a.Clusters.delete_cluster("cluster-dev", areYouSure=True)
161 |
162 | # Create a Simple Replica Set Cluster
163 |
164 | details = a.Clusters.create_basic_rs(name="cluster-dev")
165 |
166 | # Create a cluster
167 |
168 | provider_settings: ProviderSettings = ProviderSettings()
169 | regions_config = RegionConfig()
170 | replication_specs = ReplicationSpecs(regions_config={provider_settings.region_name: regions_config.__dict__})
171 |
172 | cluster_config = ClusterConfig(name='test2',
173 | providerSettings=provider_settings,
174 | replication_specs=replication_specs)
175 |
176 | output = a.Clusters.create_cluster(cluster_config)
177 |
178 |
179 | # Modify a cluster
180 | existing_config = a.Clusters.get_single_cluster_as_obj(cluster=TEST_CLUSTER_NAME)
181 | out.providerSettings.instance_size_name = InstanceSizeName.M10
182 | out.disk_size_gb = 13
183 | new_config = a.Clusters.modify_cluster('pyAtlasAPIClustersTest', out)
184 | pprint(new_config)
185 |
186 | # Modify cluster instance size
187 |
188 | a.Clusters.modify_cluster_instanct_size(cluster='pyAtlasAPIClustersTest',new_cluster_size=InstanceSizeName.M20)
189 |
190 | # Pause(unpause) a cluster
191 |
192 | a.Clusters.pause_cluster(cluster='pyAtlasAPIClustersTest', toggle_if_paused=True)
193 |
194 |
195 | # Get Advanced Options
196 | a.Clusters.get_single_cluster_advanced_options(cluster='pyAtlasAPIClustersTest')
197 |
198 | # Set Advanced Options
199 | options = AdvancedOptions(failIndexKeyTooLong=True)
200 | self.a.Clusters.modify_cluster_advanced_options(cluster='pyAtlasAPIClustersTest',
201 | advanced_options=options)
202 |
203 | Alerts
204 | ^^^^^^
205 |
206 | .. code:: python
207 |
208 | from atlasapi.atlas import Atlas
209 | from atlasapi.specs import AlertStatusSpec
210 |
211 | a = Atlas("","","")
212 |
213 | # Get All Alerts in OPEN status
214 | for alert in a.Alerts.get_all_alerts(AlertStatusSpec.OPEN, iterable=True):
215 | print(alert["id"])
216 |
217 | # Get an Alert
218 | details = a.Alerts.get_an_alert("597f221fdf9db113ce1755cd")
219 |
220 | # Acknowledge an Alert (BROKEN)
221 | # until (now + 6 hours)
222 | from datetime import datetime, timezone, timedelta
223 | now = datetime.now(timezone.utc)
224 | until = now + timedelta(hours=6)
225 | details = a.Alerts.acknowledge_an_alert("597f221fdf9db113ce1755cd", until, "Acknowledge reason")
226 |
227 | # forever (BROKEN)
228 | details = a.Alerts.acknowledge_an_alert_forever("597f221fdf9db113ce1755cd", "Acknowledge reason")
229 |
230 | # Unacknowledge an Alert (BROKEN
231 | details = a.Alerts.unacknowledge_an_alert("597f221fdf9db113ce1755cd")
232 |
233 |
234 |
235 | Metrics (Measurements)
236 | ^^^^^^^^^^^^^^^^^^^^^^
237 | Examples coming soon.
238 |
239 | Logs
240 | ^^^^^^^^^^^^^^^^^^^
241 |
242 | .. code:: python
243 |
244 | from atlasapi.atlas import Atlas
245 | from atlasapi.specs import AlertStatusSpec
246 |
247 | atlas = Atlas("","","")
248 |
249 | atlas.Hosts.fill_host_list()
250 | test_host = atlas.Hosts.host_list[0]
251 | print(f'Will get a mongod log for {test_host.hostname}')
252 | out = atlas.Hosts.get_loglines_for_host(host_obj=test_host, log_name=AtlasLogNames.MONGODB)
253 | for each_line in out:
254 | print(each_line.__dict__)
255 |
256 |
257 | Whitelists
258 | ^^^^^^^^^^
259 | Examples coming soon.
260 |
261 | Maintenance Windows
262 | ^^^^^^^^^^^^^^^^^^^
263 |
264 | Examples coming soon.
265 |
266 |
267 |
268 |
269 |
270 | Error Types
271 | -----------
272 |
273 | About ErrAtlasGeneric
274 | ^^^^^^^^^^^^^^^^^^^^^
275 |
276 | All ErrAtlas* Exception class inherit from ErrAtlasGeneric.
277 |
278 | .. code:: python
279 |
280 | try:
281 | ...
282 | except ErrAtlasGeneric as e:
283 | c, details = e.getAtlasResponse()
284 |
285 | - 'c'
286 | HTTP return code (4xx or 5xx for an error, 2xx otherwise)
287 | - 'details'
288 | Response payload
289 |
290 | Exceptions
291 | ^^^^^^^^^^
292 |
293 | - ErrRole
294 | A role is not compatible with Atlas
295 | - ErrPagination
296 | An issue occurs during a "Get All" function with 'iterable=True'
297 | - ErrPaginationLimits
298 | Out of limit on 'pageNum' or 'itemsPerPage' parameters
299 | - ErrAtlasBadRequest
300 | Something was wrong with the client request.
301 | - ErrAtlasUnauthorized
302 | Authentication is required
303 | - ErrAtlasForbidden
304 | Access to the specified resource is not permitted.
305 | - ErrAtlasNotFound
306 | The requested resource does not exist.
307 | - ErrAtlasMethodNotAllowed
308 | The HTTP method is not supported for the specified resource.
309 | - ErrAtlasConflict
310 | This is typically the response to a request to create or modify a property of an entity that is unique when an existing entity already exists with the same value for that property.
311 | - ErrAtlasServerErrors
312 | Something unexpected went wrong.
313 | - ErrConfirmationRequested
314 | Confirmation requested to execute the call.
315 |
316 |
317 |
318 | Bugs or Issues
319 | --------------
320 |
321 | Please report bugs, issues or feature requests to `Github
322 | Issues `__
323 |
324 | Testing
325 | -------
326 |
327 | `Circle Ci `__
328 |
329 | develop
330 |
331 | .. image:: https://circleci.com/gh/mgmonteleone/python-atlasapi/tree/develop.svg?style=svg&circle-token=34ce5f4745b141a0ee643bd212d85359c0594884
332 | :target: https://circleci.com/gh/mgmonteleone/python-atlasapi/tree/develop
333 |
334 | master
335 |
336 | .. image:: https://circleci.com/gh/mgmonteleone/python-atlasapi/tree/master.svg?style=svg&circle-token=34ce5f4745b141a0ee643bd212d85359c0594884
337 | :target: https://circleci.com/gh/mgmonteleone/python-atlasapi/tree/master
338 |
339 | .. image:: https://readthedocs.org/projects/python-atlasapi/badge/?version=latest
340 | :target: https://python-atlasapi.readthedocs.io/en/latest/?badge=latest
341 | :alt: Documentation Status
342 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | | Version | Supported |
6 | | ------- | ------------------ |
7 | | 0.10.x | :white_check_mark: |
8 |
9 | ## Reporting a Vulnerability
10 |
11 | Please report any security issues with this package directly to mgm@mgm.dev.
12 |
13 |
--------------------------------------------------------------------------------
/atlasapi/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | # __init__.py
16 |
17 | # Version of the realpython-reader package
18 | __version__ = "2.0.11"
19 |
--------------------------------------------------------------------------------
/atlasapi/alerts.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Modifications copyright (C) 2019 Matthew Monteleone
15 |
16 | from enum import Enum
17 | from typing import List, NewType, Optional, Union
18 | from pprint import pprint
19 | from datetime import datetime
20 | import pytz
21 | import uuid
22 |
23 | FORMAT = '%Y-%m-%dT%H:%M:%SZ'
24 |
25 |
26 | class Alert(object):
27 | def __init__(self, data_dict: dict):
28 | self.alertConfigId: str = data_dict.get('alertConfigId', None)
29 | try:
30 | self.alertConfigId: Union[datetime, None] = datetime.strptime(data_dict.get("created", None),
31 | FORMAT).astimezone(tz=pytz.UTC)
32 | except ValueError:
33 | self.alertConfigId: Union[datetime, None] = None
34 |
35 | try:
36 | self.lastNotified: Union[datetime, None] = datetime.strptime(data_dict.get("lastNotified", None),
37 | FORMAT).astimezone(tz=pytz.UTC)
38 | except ValueError:
39 | self.lastNotified: Union[datetime, None] = None
40 |
41 | try:
42 | self.resolved: Union[datetime, None] = datetime.strptime(data_dict.get("resolved", None),
43 | FORMAT).astimezone(tz=pytz.UTC)
44 | except ValueError:
45 | self.resolved: Union[datetime, None] = None
46 |
47 | try:
48 | self.updated: Union[datetime, None] = datetime.strptime(data_dict.get("updated", None),
49 | FORMAT).astimezone(tz=pytz.UTC)
50 | except ValueError:
51 | self.updated: Union[datetime, None] = None
52 |
53 | self.currentValue: dict = data_dict.get('currentValue', None)
54 | self.eventTypeName: str = data_dict.get('eventTypeName', None)
55 | self.groupId: str = data_dict.get('groupId', None)
56 | self.hostnameAndPort: str = data_dict.get('hostnameAndPort', None)
57 | self.id: str = data_dict.get('id', None)
58 | self.links: list = data_dict.get('links', None)
59 | self.metricName: str = data_dict.get('metricName', None)
60 | self.replicaSetName: str = data_dict.get('replicaSetName', None)
61 | self.status: str = data_dict.get('status', None)
62 | self.typeName: str = data_dict.get('typeName', None)
63 |
--------------------------------------------------------------------------------
/atlasapi/api_keys.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from datetime import datetime
16 | from typing import Tuple, NewType, List, Optional
17 |
18 | from dateutil.parser import parse
19 | from enum import Enum
20 | from pprint import pprint
21 | from atlasapi.lib import logger
22 | from atlasapi.whitelist import WhitelistEntry
23 | import ipaddress
24 | import requests
25 | from awspublicranges.ranges import AwsIpRanges, AwsPrefix
26 |
27 |
28 | class Roles(Enum):
29 | """Enum of the roles which can be assigned to a Atlas Public API Key
30 |
31 | """
32 | ORG_OWNER = 'ORG_OWNER'.replace('_', ' ').capitalize()
33 | ORG_MEMBER = 'ORG_OWNER'.replace('_', ' ').capitalize()
34 | ORG_GROUP_CREATOR = 'ORG_GROUP_CREATOR'.replace('_', ' ').capitalize()
35 | ORG_BILLING_ADMIN = 'ORG_BILLING_ADMIN'.replace('_', ' ').capitalize()
36 | ORG_READ_ONLY = 'ORG_READ_ONLY'.replace('_', ' ').capitalize()
37 |
38 | GROUP_CHARTS_ADMIN = 'GROUP_CHARTS_ADMIN'.replace('_', ' ').capitalize()
39 | GROUP_CLUSTER_MANAGER = 'GROUP_CLUSTER_MANAGER'.replace('_', ' ').capitalize()
40 | GROUP_DATA_ACCESS_ADMIN = 'GROUP_DATA_ACCESS_ADMIN'.replace('_', ' ').capitalize()
41 | GROUP_DATA_ACCESS_READ_ONLY = 'GROUP_DATA_ACCESS_READ_ONLY'.replace('_', ' ').capitalize()
42 | GROUP_DATA_ACCESS_READ_WRITE = 'GROUP_DATA_ACCESS_READ_WRITE'.replace('_', ' ').capitalize()
43 | GROUP_OWNER = 'GROUP_OWNER'.replace('_', ' ').capitalize()
44 | GROUP_READ_ONLY = 'GROUP_OWNER'.replace('_', ' ').capitalize()
45 |
46 |
47 | class ApiKeyRoles(object):
48 | def __init__(self, group_id: str, org_id: str, role_name: Roles):
49 | """API roles assigned to an API key.
50 |
51 | Args:
52 | group_id:
53 | org_id:
54 | role_name: The role name itself.
55 | """
56 | self.role_name: Roles = role_name
57 | self.org_id: str = org_id
58 | self.group_id: str = group_id
59 |
60 | @classmethod
61 | def fill_from_dict(cls, data_dict: dict):
62 | """
63 | Fills the object from an Atlas Dict
64 |
65 | :param data_dict: A dict as returned from Atlas
66 | :return:
67 | """
68 | group_id = data_dict.get('groupId', None)
69 | org_id = data_dict.get('orgId', None)
70 | role_name: Roles = data_dict.get('roleName', None)
71 |
72 | return cls(group_id, org_id, role_name)
73 |
74 |
75 | class ApiKey(object):
76 | def __init__(self, desc: str = None, id: str = None, private_key: str = None, public_key: str = None,
77 | roles: List[ApiKeyRoles] = None):
78 | """An Atlas Pubic API access key
79 |
80 | Includes the roles assigned to each key.
81 |
82 | Args:
83 | desc:
84 | id:
85 | private_key:
86 | public_key:
87 | roles:
88 | """
89 | self.roles = roles
90 | self.public_key = public_key
91 | self.private_key = private_key
92 | self.id = id
93 | self.desc = desc
94 |
95 | @classmethod
96 | def fill_from_dict(cls, data_dict: dict):
97 | """
98 | Fills the object from an Atlas Dict
99 |
100 | :param data_dict: A dict as returned from Atlas
101 | :return:
102 | """
103 | desc = data_dict.get('desc', None)
104 | public_key = data_dict.get('publicKey', None)
105 | private_key = data_dict.get('privateKey', None)
106 | id = data_dict.get('id', None)
107 | roles_raw: List[dict] = data_dict.get('roles', None)
108 | roles: Optional[List[ApiKeyRoles]] = []
109 | for each_role in roles_raw:
110 | roles.append(ApiKeyRoles.fill_from_dict(data_dict=each_role))
111 |
112 | return cls(desc, id, private_key, public_key, roles)
113 |
--------------------------------------------------------------------------------
/atlasapi/atlas_types.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, NewType, List
2 | from datetime import datetime
3 |
4 | OptionalInt = NewType('OptionalInt', Optional[int])
5 | OptionalStr = NewType('OptionalStr', Optional[str])
6 | OptionalDateTime = NewType('OptionalDateTime', Optional[datetime])
7 | OptionalBool = NewType('OptionalBool', Optional[bool])
8 | ListOfStr = NewType('ListofStr', List[str])
9 | ListofDict = NewType('ListOfDict', List[str])
10 | OptionalFloat = NewType('OptionalFloat', Optional[float])
--------------------------------------------------------------------------------
/atlasapi/atlas_users.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List
2 |
3 |
4 | class AtlasUser:
5 | def __init__(self, country: str, email_address: str, first_name: str, last_name: str, roles: Optional[List[dict]],
6 | username: Optional[str], links: Optional[List[str]] = None, mobile_number: str = None,
7 | password: str = None, team_ids: Optional[List[str]] = None, id: str = None):
8 | self.username: str = username
9 | self.team_ids: Optional[List[str]] = team_ids
10 | self.roles: Optional[List[dict]] = roles
11 | self.password: str = password
12 | self.mobile_number: str = mobile_number
13 | self.links: Optional[List[str]] = links
14 | self.last_name: str = last_name
15 | self.id: Optional[str] = id
16 | self.first_name: str = first_name
17 | self.email_address: str = email_address
18 | self.country: str = country
19 |
20 | @classmethod
21 | def from_dict(cls, data_dict: dict):
22 | country = data_dict.get("country", None)
23 | email_address = data_dict.get("emailAddress", None)
24 | first_name = data_dict.get("firstName", None)
25 | last_name = data_dict.get("lastName", None)
26 | roles = data_dict.get("roles", [])
27 | username = data_dict.get("username", None)
28 | links = data_dict.get("links", None)
29 | mobile_number = data_dict.get("mobileNumber", None)
30 | password = data_dict.get("password", None)
31 | team_ids = data_dict.get("teamIds", None)
32 | id = data_dict.get("id", None)
33 | return cls(
34 | country, email_address, first_name, last_name, roles, username, links, mobile_number, password, team_ids,
35 | id)
36 |
--------------------------------------------------------------------------------
/atlasapi/cloud_backup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Cloud Backups Module
17 |
18 | Provides access to Cloud Backups and Cloud backup restore endpoints
19 | """
20 |
21 | from enum import Enum
22 | from typing import List, NewType, Optional
23 | from datetime import datetime
24 | from atlasapi.lib import ProviderName, ClusterType
25 | from dateutil.parser import parse
26 | from pprint import pprint
27 | from distutils.util import strtobool
28 | import logging
29 |
30 | logger = logging.getLogger(name='Atlas_cloud_backup')
31 |
32 |
33 | def try_date(str_in: str) -> Optional[datetime]:
34 | try:
35 | datetime_out = parse(str_in)
36 | except (ValueError, TypeError, AttributeError):
37 | logger.debug(f'Could not parse a date from : {str_in}')
38 | datetime_out = None
39 | return datetime_out
40 |
41 |
42 | def try_bool(str_in: str) -> bool:
43 | if type(str_in) != bool:
44 | try:
45 | bool_out = strtobool(str_in)
46 | except (ValueError, TypeError, AttributeError):
47 | logger.debug(f'Could not parse a bool from : {str_in}')
48 | bool_out = False
49 | else:
50 | bool_out = str_in
51 | return bool_out
52 |
53 |
54 | class SnapshotType(Enum):
55 | ONDEMAND = "On Demand"
56 | SCHEDULED = "Scheduled"
57 | FALLBACK = "Fallback"
58 |
59 |
60 | class SnapshotStatus(Enum):
61 | QUEUED = "Queued"
62 | INPROGRESS = "In Progress"
63 | COMPLETED = "Completed"
64 | FAILED = "Failed"
65 |
66 |
67 | class DeliveryType(Enum):
68 | automated = "Automated restore to Atlas cluster"
69 | download = "manual download of archived data directory"
70 | pointInTime = "Automated point in time restore to Atlas Cluster"
71 |
72 |
73 | class SnapshotRestore(object):
74 | def __init__(self, delivery_type: DeliveryType,
75 | snapshot_id: str,
76 | target_cluster_name: str = None,
77 | target_group_id: str = None):
78 | self.deliveryType = delivery_type
79 | self.snapshot_id = snapshot_id
80 | self.target_cluster_name = target_cluster_name
81 | self.target_group_id = target_group_id
82 |
83 | @property
84 | def as_dict(self) -> dict:
85 | return dict(
86 | deliveryType=self.deliveryType.name,
87 | snapshotId=self.snapshot_id,
88 | targetClusterName=self.target_cluster_name,
89 | targetGroupId=self.target_group_id
90 | )
91 |
92 |
93 | class SnapshotRestoreResponse(SnapshotRestore):
94 | def __init__(self, restore_id: str, delivery_type: DeliveryType, snapshot_id: str, target_cluster_name: str,
95 | target_group_id: str, cancelled: bool = False,
96 | created_at: datetime = None, expired: bool = False, expires_at: datetime = None,
97 | finished_at: datetime = None, links: list = None, snapshot_timestamp: datetime = None,
98 | target_deployment_item_name: str = None,
99 | delivery_url: str = None):
100 | super().__init__(delivery_type, snapshot_id, target_cluster_name, target_group_id)
101 | self.delivery_url = delivery_url
102 | self.target_deployment_item_name = target_deployment_item_name
103 | self.snapshot_timestamp = snapshot_timestamp
104 | self.links = links
105 | self.finished_at = finished_at
106 | self.expires_at = expires_at
107 | self.expired = expired
108 | self.created_at = created_at
109 | self.cancelled = cancelled
110 | self.restore_id = restore_id
111 |
112 | @classmethod
113 | def from_dict(cls, data_dict):
114 | restore_id = data_dict.get('id')
115 | snapshot_id = data_dict.get('snapshotId')
116 | try:
117 | delivery_type = DeliveryType[data_dict.get('deliveryType')]
118 | except KeyError:
119 | logger.warning(f'Got an unmapped deliveryType : {data_dict.get("deliveryType")}.')
120 | delivery_type = None
121 | target_cluster_name = data_dict.get('targetClusterName')
122 | target_group_id = data_dict.get('targetGroupId')
123 | cancelled = try_bool(data_dict.get('cancelled'))
124 | created_at = try_date(data_dict.get('createdAt')) # Does not appear to be retunred at all, potential docs issue?
125 | expired = try_bool(data_dict.get('expired'))
126 | expires_at = try_date(data_dict.get('expiresAt')) # Does not appear to be retunred at all, potential docs issue?
127 | finished_at = try_date(data_dict.get('finishedAt')) # Does not appear to be retunred at all, potential docs issue?
128 | links = data_dict.get('links')
129 | snapshot_timestamp = try_date(data_dict.get('timestamp'))
130 | target_deployment_item_name=data_dict.get('targetDeploymentItemName') # missing in documentation TODO: File docs ticket.
131 | delivery_url = data_dict.get('deliveryUrl',None) # missing in documentation TODO: File docs ticket
132 |
133 | return cls(restore_id=restore_id,
134 | delivery_type=delivery_type,
135 | snapshot_id=snapshot_id,
136 | target_cluster_name=target_cluster_name,
137 | target_group_id=target_group_id,
138 | cancelled=cancelled,
139 | created_at=created_at,
140 | expired=expired,
141 | expires_at=expires_at,
142 | finished_at=finished_at,
143 | links=links,
144 | snapshot_timestamp=snapshot_timestamp,
145 | target_deployment_item_name=target_deployment_item_name,
146 | delivery_url=delivery_url)
147 |
148 |
149 | class CloudBackupRequest(object):
150 | def __init__(self, cluster_name: str, retention_days: int = 1, description: str = 'Created by pyAtlasAPI') -> None:
151 | self.description = description
152 | self.retentionInDays = retention_days
153 | self.cluster_name = cluster_name
154 |
155 | @property
156 | def as_dict(self):
157 | return dict(description=self.description, retentionInDays=self.retentionInDays)
158 |
159 |
160 | class CloudBackupSnapshot(object):
161 | def __init__(self, id: Optional[str] = None,
162 | cloud_provider: Optional[ProviderName] = None,
163 | created_at: Optional[datetime] = None,
164 | description: Optional[str] = None,
165 | expires_at: Optional[datetime] = None,
166 | links: Optional[List] = None,
167 | masterkey_uuid: Optional[str] = None,
168 | members: Optional[list] = None,
169 | mongod_version: Optional[str] = None,
170 | replica_set_name: Optional[str] = None,
171 | snapshot_ids: Optional[list] = None,
172 | snapshot_type: Optional[SnapshotType] = None,
173 | status: Optional[SnapshotStatus] = None,
174 | storage_size_bytes: Optional[int] = None,
175 | type: Optional[ClusterType] = None):
176 | """
177 | Details of a Cloud Provider Snapshot.
178 |
179 | Args:
180 | id: Unique identifier of the snapshot.
181 | cloud_provider: Cloud provider that stores this snapshot. Atlas returns this parameter
182 | when "type": "replicaSet".
183 | created_at:
184 | description: Description of the snapshot. Atlas returns this parameter when "status": "onDemand".
185 | expires_at:
186 | links: One or more links to sub-resources and/or related resources. The relations between URLs are
187 | explained in the Web Linking Specification
188 | masterkey_uuid: Unique identifier of the AWS KMS Customer Master Key used to encrypt the snapshot.
189 | Atlas returns this value for clusters using Encryption at Rest via Customer KMS.
190 | members: List of snapshots and the cloud provider where the snapshots are stored.
191 | Atlas returns this parameter when "type": "shardedCluster".
192 | mongod_version:
193 | replica_set_name: Label given to the replica set from which Atlas took this snapshot.
194 | Atlas returns this parameter when "type": "replicaSet".
195 | snapshot_ids: Unique identifiers of the snapshots created for the shards and config server
196 | for a sharded cluster. Atlas returns this parameter when "type": "shardedCluster".
197 | These identifiers should match those given in the members[n].id parameters.
198 | This allows you to map a snapshot to its shard or config server name.
199 | snapshot_type: Type of snapshot. Atlas can return onDemand or scheduled.
200 | status: Current status of the snapshot. Atlas can return one of the following values:
201 | (queued, inProgress, completed, failed)
202 | storage_size_bytes:
203 | type: Type of cluster. Atlas can return replicaSet or shardedCluster.
204 |
205 |
206 | """
207 | self.type: Optional[ClusterType] = type
208 | self.storage_size_bytes: Optional[int] = storage_size_bytes
209 | self.status: Optional[SnapshotStatus] = status
210 | self.snapshot_type: Optional[SnapshotType] = snapshot_type
211 | self.snapshot_ids: Optional[list] = snapshot_ids
212 | self.replica_set_name: Optional[str] = replica_set_name
213 | self.mongod_version: Optional[str] = mongod_version
214 | self.members: Optional[list] = members
215 | self.masterkey_uuid: Optional[str] = masterkey_uuid
216 | self.links: Optional[list] = links
217 | self.expires_at: Optional[datetime] = expires_at
218 | self.description: Optional[str] = description
219 | self.created_at: Optional[datetime] = created_at
220 | self.cloud_provider: Optional[ProviderName] = cloud_provider
221 | self.id: Optional[str] = id
222 |
223 | @classmethod
224 | def from_dict(cls, data_dict: dict):
225 | id = data_dict.get('id', None)
226 | try:
227 | cloud_provider = ProviderName[data_dict.get('cloudProvider')]
228 | except KeyError:
229 | cloud_provider = ProviderName.TENANT
230 | created_at = try_date(data_dict.get('createdAt'))
231 | expires_at = try_date(data_dict.get('expiresAt'))
232 | description = data_dict.get('description')
233 | snapshot_type = SnapshotType[data_dict.get('snapshotType').upper()]
234 | cluster_type = ClusterType[data_dict.get('type').upper()]
235 | snapshot_status = SnapshotStatus[data_dict.get('status').upper()]
236 | storage_size_bytes = data_dict.get('storageSizeBytes')
237 | replica_set_name = data_dict.get('replicaSetName')
238 | links = data_dict.get('links')
239 | masterkey = data_dict.get('masterKeyUUID')
240 | members = data_dict.get('members')
241 | mongod_version = data_dict.get('mongodVersion')
242 | snapshot_ids = data_dict.get('snapshotIds')
243 | return cls(id=id,
244 | cloud_provider=cloud_provider,
245 | created_at=created_at,
246 | expires_at=expires_at,
247 | description=description,
248 | snapshot_type=snapshot_type,
249 | type=cluster_type,
250 | status=snapshot_status,
251 | storage_size_bytes=storage_size_bytes,
252 | replica_set_name=replica_set_name,
253 | links=links,
254 | masterkey_uuid=masterkey,
255 | members=members,
256 | mongod_version=mongod_version,
257 | snapshot_ids=snapshot_ids
258 | )
259 |
--------------------------------------------------------------------------------
/atlasapi/errors.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Errors module
17 |
18 | Provides all specific Exceptions
19 | """
20 |
21 | from atlasapi.settings import Settings
22 | from pprint import pprint
23 | import logging
24 | from typing import Tuple
25 | logger = logging.getLogger('Error_Handler')
26 |
27 |
28 | class ErrRole(Exception):
29 | """A role is not compatible with Atlas"""
30 | pass
31 |
32 |
33 | class ErrPagination(Exception):
34 | """An issue occurs during a "Get All" function"""
35 |
36 | def __init__(self):
37 | super().__init__("Issue occurs during the pagination.")
38 |
39 |
40 | class ErrPaginationLimits(Exception):
41 | """Out of limit on 'pageNum' or 'itemsPerPage' parameters
42 |
43 | Constructor
44 |
45 | Args:
46 | error_code (int): ERR_PAGE_NUM or ERR_ITEMS_PER_PAGE
47 | """
48 | ERR_PAGE_NUM = 0
49 | ERR_ITEMS_PER_PAGE = 1
50 |
51 | def __init__(self, error_code):
52 | if error_code == ErrPaginationLimits.ERR_PAGE_NUM:
53 | super().__init__("pageNum can't be smaller than 1")
54 | elif error_code == ErrPaginationLimits.ERR_ITEMS_PER_PAGE:
55 | super().__init__("itemsPerPage can't be smaller than %d and greater than %d" % (
56 | Settings.itemsPerPageMin, Settings.itemsPerPageMax))
57 | else:
58 | super().__init__(str(error_code))
59 |
60 | def checkAndRaise(pageNum, itemsPerPage):
61 | """Check and Raise an Exception if needed
62 |
63 | Args:
64 | pageNum (int): Page number
65 | itemsPerPage (int): Number of items per Page
66 |
67 | Raises:
68 | ErrPaginationLimits: If we are out of limits
69 |
70 | """
71 | if pageNum < 1:
72 | raise ErrPaginationLimits(ErrPaginationLimits.ERR_PAGE_NUM)
73 |
74 | if itemsPerPage < Settings.itemsPerPageMin or itemsPerPage > Settings.itemsPerPageMax:
75 | raise ErrPaginationLimits(ErrPaginationLimits.ERR_ITEMS_PER_PAGE)
76 |
77 |
78 | class ErrAtlasGeneric(Exception):
79 | """Atlas Generic Exception
80 |
81 | Constructor
82 |
83 | Args:
84 | msg (str): Short description of the error
85 | c (int): HTTP code
86 | details (dict): Response payload
87 | """
88 |
89 | def __init__(self, msg: str, c: int, details: dict):
90 | super().__init__(msg)
91 | self.code = c
92 | self.details = details
93 |
94 | def getAtlasResponse(self) -> Tuple[int, dict]:
95 | """Get details about the Atlas response
96 |
97 | Returns:
98 | int, str: HTTP code, Response payload
99 |
100 | """
101 | return self.code, self.details
102 |
103 |
104 | class ErrMaintenanceError(ErrAtlasGeneric):
105 | """Atlas : Atlas MaintenanceRelatedError
106 |
107 | Constructor
108 |
109 | Args:
110 | c (int): HTTP code
111 | details (dict): Response payload
112 | """
113 |
114 | def __init__(self, c, details):
115 | super().__init__(details.get('detail', f'Atlas Maintenance Error: {details.get("detail")} '), c, details)
116 |
117 |
118 | class ErrAtlasBadRequest(ErrAtlasGeneric):
119 | """Atlas : Bad Request
120 |
121 | Constructor
122 |
123 | Args:
124 | c (int): HTTP code
125 | details (dict): Response payload
126 | """
127 |
128 | def __init__(self, c, details):
129 |
130 | if details.get('errorCode', None) == 'DUPLICATE_CLUSTER_NAME':
131 | raise (ErrAtlasDuplicateClusterName(c, details))
132 | if details.get('errorCode', None) == 'RESOURCE_NOT_FOUND_FOR_JOB':
133 | raise (ErrAtlasJobError(c, details))
134 | if details.get('errorCode', None) == 'CANNOT_CANCEL_AUTOMATED_RESTORE':
135 | raise (ErrAtlasBackupError(c, details))
136 | if details.get('errorCode', None) in ['ATLAS_MAINTENANCE_ALREADY_SCHEDULED',
137 | 'ATLAS_NUM_MAINTENANCE_DEFERRALS_EXCEEDED']:
138 | raise (ErrMaintenanceError(c, details))
139 | else:
140 | logger.critical(f"A generic error was raised")
141 | logger.critical(details)
142 |
143 | super().__init__(f"Something was wrong with the client request. ({details.get('detail', None)})"
144 | f" [{details.get('errorCode', None)}]", c, details)
145 |
146 |
147 | class ErrAtlasJobError(ErrAtlasGeneric):
148 | """Atlas : Job error Clustername
149 |
150 | Constructor
151 |
152 | Args:
153 | c (int): HTTP code
154 | details (dict): Response payload
155 | """
156 |
157 | def __init__(self, c, details):
158 | super().__init__(details.get('detail', 'Duplicate Error'), c, details)
159 |
160 |
161 | class ErrAtlasDuplicateClusterName(ErrAtlasGeneric):
162 | """Atlas : Duplicate Clustername
163 |
164 | Constructor
165 |
166 | Args:
167 | c (int): HTTP code
168 | details (dict): Response payload
169 | """
170 |
171 | def __init__(self, c, details):
172 | super().__init__(details.get('detail', 'Duplicate Error'), c, details)
173 |
174 |
175 | class ErrAtlasBackupError(ErrAtlasGeneric):
176 | """Atlas : Atlas Backup
177 |
178 | Constructor
179 |
180 | Args:
181 | c (int): HTTP code
182 | details (dict): Response payload
183 | """
184 |
185 | def __init__(self, c, details):
186 | super().__init__(details.get('detail', 'Atlas Backup error'), c, details)
187 |
188 |
189 | class ErrAtlasUnauthorized(ErrAtlasGeneric):
190 | """Atlas : Unauthorized
191 |
192 | Constructor
193 |
194 | Args:
195 | c (int): HTTP code
196 | details (dict): Response payload
197 | """
198 |
199 | def __init__(self, c, details):
200 | super().__init__("Authentication is required", c, details)
201 |
202 |
203 | class ErrAtlasForbidden(ErrAtlasGeneric):
204 | """Atlas : Forbidden
205 |
206 | Constructor
207 |
208 | Args:
209 | c (int): HTTP code
210 | details (dict): Response payload
211 | """
212 |
213 | def __init__(self, c, details):
214 | if details['errorCode'] == 'ORG_REQUIRES_WHITELIST':
215 | raise ErrAtlasForbiddenWL(c, details)
216 | else:
217 | super().__init__("Access to the specified resource is not permitted.", c, details)
218 |
219 |
220 | class ErrAtlasForbiddenWL(ErrAtlasGeneric):
221 | """Atlas : Forbidden by WhiteList
222 |
223 | Constructor
224 |
225 | Args:
226 | c (int): HTTP code
227 | details (dict): Response payload
228 | """
229 |
230 | def __init__(self, c, details):
231 | super().__init__("This organization requires access through a whitelist of ip ranges.", c, details)
232 |
233 |
234 | class ErrAtlasNotFound(ErrAtlasGeneric):
235 | """Atlas : Not Found
236 |
237 | Constructor
238 |
239 | Args:
240 | c (int): HTTP code
241 | details (dict): Response payload
242 | """
243 |
244 | def __init__(self, c, details):
245 | super().__init__("The requested resource does not exist.", c, details)
246 |
247 |
248 | class ErrAtlasMethodNotAllowed(ErrAtlasGeneric):
249 | """Atlas : Method Not Allowed
250 |
251 | Constructor
252 |
253 | Args:
254 | c (int): HTTP code
255 | details (dict): Response payload
256 | """
257 |
258 | def __init__(self, c, details):
259 | super().__init__("The HTTP method is not supported for the specified resource.", c, details)
260 |
261 |
262 | class ErrAtlasConflict(ErrAtlasGeneric):
263 | """Atlas : Conflict
264 |
265 | Constructor
266 |
267 | Args:
268 | c (int): HTTP code
269 | details (dict): Response payload
270 | """
271 |
272 | def __init__(self, c, details):
273 | super().__init__(
274 | "This is typically the response to a request to create or modify a property of an entity that is unique "
275 | "when an existing entity already exists with the same value for that property.",
276 | c, details)
277 |
278 |
279 | class ErrAtlasRestoreConflictError(ErrAtlasGeneric):
280 | """Atlas : RestoreConflictError
281 |
282 | Constructor
283 |
284 | Args:
285 | c (int): HTTP code
286 | details (dict): Response payload
287 | """
288 |
289 | def __init__(self, c, details):
290 | super().__init__(
291 | "This is an error when there are issues with cluster state during restore which prevents API actions",
292 | c, details)
293 |
294 |
295 | class ErrAtlasServerErrors(ErrAtlasGeneric):
296 | """Atlas : Server Errors
297 |
298 | Constructor
299 |
300 | Args:
301 | c (int): HTTP code
302 | details (dict): Response payload
303 | """
304 |
305 | def __init__(self, c, details):
306 | super().__init__("Something unexpected went wrong.", c, details)
307 | pprint(details)
308 |
309 |
310 | class ErrConfirmationRequested(Exception):
311 | """No Confirmation provided
312 |
313 | Constructor
314 |
315 | Args:
316 | msg (str): Short description of the error
317 | """
318 |
319 | def __init__(self, msg):
320 | super().__init__(msg)
321 |
--------------------------------------------------------------------------------
/atlasapi/events.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from typing import NewType, List, Optional, Union, Dict
16 | from dateutil.parser import parse
17 | import logging
18 | from datetime import datetime
19 |
20 | from atlasapi.events_event_types import AtlasEventTypes
21 | import ipaddress
22 | from copy import copy
23 |
24 | logger = logging.getLogger(name='Atlas_events')
25 |
26 |
27 | class _AtlasBaseEvent(object):
28 | def __init__(self, value_dict: dict) -> None:
29 | self.created_date = None
30 | try:
31 | self.created_date = parse(value_dict.get("created", None))
32 | except ValueError as e:
33 | logger.warning("Could not parse datetime value for created_date: {}".format(e))
34 | pass
35 | self.event_type = AtlasEventTypes[value_dict.get('eventTypeName', 'UNKNOWN')] # type: AtlasEventTypes
36 | self.group_id = value_dict.get('groupId', None) # type: str
37 | self.id = value_dict.get('id', None) # type: str
38 | self.is_global_admin = value_dict.get('isGlobalAdmin', False) # type: bool
39 | self.links = value_dict.get('links', None) # type: list
40 | self.event_dict = value_dict # type: dict
41 | self.additional_data = value_dict.get('raw', None)
42 |
43 | def as_dict(self):
44 | original_dict = self.__dict__
45 | return_dict = copy(original_dict)
46 | del return_dict['event_dict']
47 | del return_dict['additional_data']
48 | return_dict['created_date'] = datetime.isoformat(self.created_date)
49 | return_dict['event_type'] = self.event_type.name
50 | return_dict['event_type_desc'] = self.event_type.value
51 |
52 | if return_dict.get('remote_address'):
53 | return_dict['remote_address'] = return_dict['remote_address'].__str__()
54 | return return_dict
55 |
56 |
57 | class _AtlasUserBaseEvent(_AtlasBaseEvent):
58 | def __init__(self, value_dict: dict) -> None:
59 | super().__init__(value_dict)
60 | self.user_id = value_dict.get('userId', None) # type: str
61 | self.username = value_dict.get('username') # type: str
62 | try:
63 | self.remote_address = ipaddress.ip_address(value_dict.get('remoteAddress', None)) # type: ipaddress
64 | except ValueError:
65 | logger.info('No IP address found')
66 | self.remote_address = None
67 |
68 |
69 | class AtlasEvent(_AtlasBaseEvent):
70 | def __init__(self, value_dict: dict) -> None:
71 | super().__init__(value_dict)
72 |
73 |
74 | class AtlasCPSEvent(_AtlasBaseEvent):
75 | def __init__(self, value_dict: dict) -> None:
76 | """
77 | Atlas Events for Cloud Provider Snapshot related events.
78 |
79 | Contains extra data points directly related to CPS events. There
80 | !NOTE! The extra data points are extracted from the "raw" property which is not guaranteed to be stable.
81 |
82 | Args:
83 | value_dict: The original dict of values from the Atlas API
84 | """
85 | super().__init__(value_dict)
86 | if self.additional_data:
87 | self.snapshot_id: Optional[str] = self.additional_data.get('snapshotId', None)
88 | self.snapshot_completion_date: Optional[datetime] = None
89 | self.snapshot_scheduled_creation_date: Optional[datetime] = None
90 | self.cluster_name: str = self.additional_data.get('clusterName', None)
91 | self.cluster_id: str = self.additional_data.get('clusterId', None)
92 | snapshot_completion_date = None
93 | snapshot_scheduled_creation_date = None
94 | try:
95 | snapshot_completion_date = self.additional_data.get('snapshotCompletionDate', None)
96 | self.snapshot_completion_date: Optional[datetime] = parse(snapshot_completion_date)
97 | except (ValueError, TypeError, AttributeError):
98 | logger.debug(f'Could not parse a CPS snapshot completion date: {snapshot_completion_date}')
99 | try:
100 | snapshot_scheduled_creation_date = self.additional_data.get('snapshotScheduledCreationDate', None)
101 | self.snapshot_scheduled_creation_date: Optional[datetime] = parse(snapshot_scheduled_creation_date)
102 | except (ValueError, TypeError, AttributeError):
103 | logger.debug(
104 | f'Could not parse a CPS snapshot scheduled creation date: {snapshot_scheduled_creation_date}')
105 |
106 |
107 | class AtlasDataExplorerEvent(_AtlasUserBaseEvent):
108 | def __init__(self, value_dict: dict) -> None:
109 | super().__init__(value_dict)
110 | self.database = value_dict.get('database', None) # type: str
111 | self.collection = value_dict.get('collection', None) # type: str
112 | self.op_type = value_dict.get('opType', None) # type: str
113 |
114 |
115 | class AtlasClusterEvent(_AtlasBaseEvent):
116 | def __init__(self, value_dict: dict) -> None:
117 | super().__init__(value_dict)
118 | self.replica_set_name = value_dict.get('replicaSetName', None) # type: str
119 | self.cluster_name = value_dict.get('clusterName', None) # type: str
120 |
121 |
122 | class AtlasHostEvent(_AtlasBaseEvent):
123 | def __init__(self, value_dict: dict) -> None:
124 | super().__init__(value_dict)
125 | self.hostname = value_dict.get('hostname', None) # type: str
126 | self.port = value_dict.get('port', None) # type: int
127 | self.replica_set_name = value_dict.get('replicaSetName', None) # type: str
128 |
129 |
130 | class AtlasFeatureEvent(_AtlasUserBaseEvent):
131 | def __init__(self, value_dict: dict) -> None:
132 | super().__init__(value_dict)
133 | self.hostname = value_dict.get('hostname', None) # type: str
134 | self.feature_name = value_dict.get('featureName', None) # type: str
135 |
136 |
137 | def atlas_event_factory(value_dict: dict) -> Union[
138 | AtlasEvent, AtlasDataExplorerEvent, AtlasClusterEvent, AtlasHostEvent, AtlasFeatureEvent, AtlasCPSEvent]:
139 | if 'CPS_' in value_dict.get("eventTypeName", None):
140 | return AtlasCPSEvent(value_dict=value_dict)
141 | elif value_dict.get("featureName", None):
142 | return AtlasFeatureEvent(value_dict=value_dict)
143 | elif value_dict.get("hostname", None):
144 | return AtlasHostEvent(value_dict=value_dict)
145 |
146 | elif value_dict.get("clusterName", None):
147 | return AtlasClusterEvent(value_dict=value_dict)
148 |
149 | elif value_dict.get("database", None):
150 | return AtlasDataExplorerEvent(value_dict=value_dict)
151 |
152 |
153 | else:
154 | return AtlasEvent(value_dict=value_dict)
155 |
156 |
157 | ListOfEvents = NewType('ListOfEvents', List[Union[Dict, _AtlasBaseEvent]])
158 |
--------------------------------------------------------------------------------
/atlasapi/lib.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from isodate import Duration, duration_isoformat, parse_datetime
16 | from isodate.isoerror import ISO8601Error
17 | from typing import Iterator
18 | import logging
19 | from enum import Enum
20 | from datetime import datetime
21 |
22 | logger = logging.getLogger('atlasapi.lib')
23 |
24 |
25 | class AtlasLogNames(Enum):
26 | """
27 | The name of the log file that you want to retrieve:
28 |
29 | """
30 | MONGODB = "mongodb.gz"
31 | MONGOS = "mongos.gz"
32 | MONGOD_AUDIT = "mongodb-audit-log.gz"
33 | MONGOS_AUDIT = "mongos-audit-log.gz"
34 |
35 |
36 | class LogLine(object):
37 | def __init__(self, raw_line):
38 | self.raw_line = raw_line
39 | try:
40 | raw_line_data = self.raw_line.rstrip().split(maxsplit=4)
41 | self.date: datetime = parse_datetime(raw_line_data[0])
42 | self.level: str = raw_line_data[1]
43 | self.facility: str = raw_line_data[2]
44 | self.user: str = raw_line_data[3].replace('[', '').replace(']', '')
45 | self.line: str = raw_line_data[-1]
46 | self.type: str = "Full"
47 | except IndexError:
48 | raw_line_data = raw_line.rstrip().split(maxsplit=1)
49 | self.date: datetime = parse_datetime(raw_line_data[0])
50 | self.line: str = raw_line_data[-1]
51 | self.type: str = "SHORT"
52 | except ISO8601Error:
53 | logger.error(f'Error Parsing line: {raw_line}')
54 | pass
55 |
56 |
57 | class AtlasUnits(Enum):
58 | SCALAR_PER_SECOND = 'SCALAR_PER_SECOND'
59 | SCALAR = 'SCALAR'
60 | PERCENT = 'PERCENT'
61 | MILLISECONDS = 'MILLISECONDS'
62 | BYTES = 'BYTES'
63 | GIGABYTES = 'GIGABYTES'
64 | BYTES_PER_SECOND = 'BYTES_PER_SECOND'
65 | MEGABYTES_PER_SECOND = 'MEGABYTES_PER_SECOND'
66 | GIGABYTES_PER_HOUR = 'GIGABYTES_PER_HOUR'
67 |
68 |
69 | class AtlasGranularities(object):
70 | """Helper class to create ISO 8601 durations to pass to the API
71 |
72 | To add more possible granularities, add them here.
73 |
74 | """
75 | TEN_SECOND = duration_isoformat(Duration(seconds=10))
76 | MINUTE = duration_isoformat(Duration(minutes=1))
77 | FIVE_MINUTE = duration_isoformat(Duration(minutes=5))
78 | HOUR = duration_isoformat(Duration(hours=1))
79 | DAY = duration_isoformat(Duration(days=1))
80 |
81 |
82 |
83 | class AtlasPeriods(object):
84 | """Helper class to create ISO 8601 durations to send to the Atlas period parameter.
85 |
86 | To add more periods, add them here.
87 | """
88 | MINUTES_15 = duration_isoformat(Duration(minutes=15))
89 | HOURS_1 = duration_isoformat(Duration(hours=1))
90 | HOURS_8 = duration_isoformat(Duration(hours=8))
91 | HOURS_24 = duration_isoformat(Duration(hours=24))
92 | HOURS_48 = duration_isoformat(Duration(hours=48))
93 | WEEKS_1 = duration_isoformat(Duration(weeks=1))
94 | WEEKS_4 = duration_isoformat(Duration(weeks=4))
95 | MONTHS_1 = duration_isoformat(Duration(months=1))
96 | MONTHS_2 = duration_isoformat(Duration(months=2))
97 | YEARS_1 = duration_isoformat(Duration(years=1))
98 | YEARS_2 = duration_isoformat(Duration(years=2))
99 |
100 |
101 | # noinspection PyCallByClass
102 | class _GetAll(object):
103 | is_leaf = False
104 |
105 | @classmethod
106 | def get_all(cls) -> Iterator[str]:
107 | out = cls.__dict__
108 | for item in out:
109 | if '_' not in item and not item[0].isupper():
110 | yield cls.__getattribute__(cls, item)
111 | elif '_' not in item and item[0].isupper():
112 | sub_out = cls.__getattribute__(cls, item).__dict__
113 | for sub_item in sub_out:
114 | if '_' not in sub_item and not sub_item[0].isupper():
115 | yield cls.__getattribute__(cls, item).__dict__.get(sub_item)
116 | if '_' not in sub_item and sub_item[0].isupper():
117 | sub_sub_out = cls.__getattribute__(cls, item).__dict__.get(sub_item).__dict__
118 | for sub_sub_item in sub_sub_out:
119 | if '_' not in sub_sub_item and not sub_sub_item[0].isupper():
120 | yield sub_sub_out.get(sub_sub_item)
121 |
122 |
123 | class _GetAllLeaf(_GetAll):
124 | is_leaf = True
125 |
126 |
127 | class ProviderName(Enum):
128 | AWS = 'Amazon Web Services'
129 | GCP = 'Google Cloud Platform'
130 | AZURE = 'Microsoft Azure'
131 | TENANT = 'Shared Tier'
132 |
133 |
134 | class MongoDBMajorVersion(Enum):
135 | v3_4 = '3.4'
136 | v3_6 = '3.6'
137 | v4_0 = '4.0'
138 | v4_2 = '4.2'
139 | v4_4 = '4.4'
140 | v5_0 = '5.0'
141 | vX_x = 'Unknown'
142 |
143 |
144 | class ClusterType(Enum):
145 | """
146 | The types of clusteres available in Atlas.
147 |
148 | GEOSHARDED is a Global write cluster sharded by geo information.
149 |
150 | """
151 | REPLICASET = 'Replica Set'
152 | SHARDED = 'Sharded Cluster'
153 | SHARDEDCLUSTER = 'Sharded Cluster'
154 | GEOSHARDED = 'Global Cluster'
--------------------------------------------------------------------------------
/atlasapi/logs.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 |
--------------------------------------------------------------------------------
/atlasapi/maintenance_window.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Maint Window Module
17 |
18 | The maintenanceWindow resource provides access to retrieve or update the current Atlas project maintenance window.
19 | To learn more about Maintenance Windows, see the Set Preferred Cluster Maintenance Start Time
20 | setting on the View/Modify Project Settings page.
21 |
22 | """
23 | from enum import Enum
24 | from pprint import pprint
25 | import logging
26 | from json import dumps
27 | from enum import IntEnum
28 |
29 | logger = logging.getLogger('maintenance_window')
30 |
31 |
32 | class Weekdays(Enum):
33 | SUNDAY = 1
34 | MONDAY = 2
35 | TUESDAY = 3
36 | WEDNESDAY = 4
37 | THURSDAY = 5
38 | FRIDAY = 6
39 | SATURDAY = 7
40 |
41 |
42 | class MaintenanceWindow(object):
43 | def __init__(self, day_of_week: Weekdays = Weekdays.SUNDAY, hour_of_day: int = 23, number_of_deferrals: int = 1,
44 | start_asap: bool = False):
45 | """
46 | Stores the definition of maint window configuration for a group/project.
47 |
48 | Args:
49 | day_of_week: The day of week that maint should run
50 | hour_of_day: the hour of the day (24 based) to run maint
51 | number_of_deferrals:
52 | start_asap:
53 | """
54 | self.startASAP = start_asap
55 | self.numberOfDeferrals = number_of_deferrals
56 | self.hourOfDay = hour_of_day
57 | self.dayOfWeek = day_of_week
58 |
59 | @classmethod
60 | def from_dict(cls, data_dict: dict):
61 | """
62 | Creates a maint window definition from a dict.
63 | Args:
64 | data_dict: An atlas formated dict
65 |
66 | Returns:
67 |
68 | """
69 | day_of_week: Weekdays = Weekdays(data_dict.get('dayOfWeek', None))
70 | hour_of_day = data_dict.get('hourOfDay', None)
71 | number_of_deferrals = data_dict.get('numberOfDeferrals', None)
72 | start_asap = data_dict.get('startASAP', None)
73 |
74 | return cls(day_of_week, hour_of_day, number_of_deferrals, start_asap)
75 |
76 | def as_dict(self) -> dict:
77 | """
78 | Returns the Maintenance object as a serializable dict
79 |
80 | Converts enums
81 | Returns:
82 |
83 | """
84 | out_dict = self.__dict__
85 | if type(out_dict['dayOfWeek']) == Weekdays:
86 | out_dict['dayOfWeek'] = out_dict['dayOfWeek'].value
87 | return out_dict
88 |
89 | def as_update_dict(self) -> dict:
90 | """
91 | Returns a dict with immutable properties removed.
92 | Returns: dict
93 | """
94 | update_dict = self.as_dict()
95 | del update_dict['numberOfDeferrals']
96 | for key, val in update_dict.items():
97 | if val is None:
98 | del update_dict[key]
99 |
100 |
101 | return update_dict
102 |
--------------------------------------------------------------------------------
/atlasapi/measurements.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, absolute_import, print_function, unicode_literals
2 |
3 | from datetime import datetime
4 | from statistics import mean, StatisticsError
5 | from typing import Optional, Tuple, List, Iterable, NewType
6 | import logging
7 | import humanfriendly as hf
8 | from dateutil.parser import parse
9 |
10 | from atlasapi.atlas_types import OptionalFloat
11 | from atlasapi.lib import _GetAll, AtlasPeriods, AtlasGranularities
12 |
13 | logger: logging.Logger = logging.getLogger('Atlas.measurements')
14 |
15 |
16 | class StatisticalValues:
17 | def __init__(self, data_list: list):
18 | try:
19 | self.samples: int = len(clean_list(data_list))
20 | self.mean: float = float(mean(clean_list(data_list)))
21 | self.min: float = float(min(clean_list(data_list)))
22 | self.max: float = float(max(clean_list(data_list)))
23 | except StatisticsError:
24 | logger.warning('Could not compute statistical values.')
25 | self.samples: int = 0
26 | self.mean: float = 0
27 | self.min: float = 0
28 | self.max: float = 0
29 |
30 |
31 | class StatisticalValuesFriendly:
32 | def __init__(self, data_list: list, data_type: str = None) -> None:
33 | """Returns human-readable values for stats
34 |
35 | Args:
36 | data_list:
37 | data_type: The datatype either bytes or number
38 | """
39 | try:
40 | if data_type is None:
41 | data_type = 'SCALAR_PER_SECOND'
42 | if data_type == 'BYTES':
43 | self.mean: str = hf.format_size(mean(clean_list(data_list)))
44 | self.min: str = hf.format_size(min(clean_list(data_list)))
45 | self.max: str = hf.format_size(max(clean_list(data_list)))
46 | elif data_type == 'SCALAR':
47 | self.mean: str = hf.format_number(mean(clean_list(data_list)))
48 | self.min: str = hf.format_number(min(clean_list(data_list)))
49 | self.max: str = hf.format_number(max(clean_list(data_list)))
50 | else:
51 | self.mean: str = hf.format_number(mean(clean_list(data_list)))
52 | self.min: str = hf.format_number(min(clean_list(data_list)))
53 | self.max: str = hf.format_number(max(clean_list(data_list)))
54 |
55 | except StatisticsError:
56 | logger.warning('Could not compute statistical values.')
57 | self.mean: str = 'No Value'
58 | self.min: str = 'No value'
59 | self.max: str = 'No value'
60 |
61 |
62 | class AtlasMeasurementTypes(_GetAll):
63 | """
64 | Helper class for all available atlas measurements.
65 |
66 | All classes and embedded classes have a get_all class method that returns an iterator of all measurements
67 | and sub measurements.
68 |
69 | """
70 | connections = 'CONNECTIONS'
71 |
72 | class Asserts(_GetAll):
73 | regular = 'ASSERT_REGULAR'
74 | warning = 'ASSERT_WARNING'
75 | msg = 'ASSERT_MSG'
76 | user = 'ASSERT_USER'
77 |
78 | class Cache(_GetAll):
79 | bytes_read = 'CACHE_BYTES_READ_INTO'
80 | bytes_written = 'CACHE_BYTES_WRITTEN_FROM'
81 | dirty = 'CACHE_DIRTY_BYTES'
82 | used = 'CACHE_USED_BYTES'
83 |
84 | class Cursors(_GetAll):
85 | open = 'CURSORS_TOTAL_OPEN'
86 | timed_out = 'CURSORS_TOTAL_TIMED_OUT'
87 |
88 | class Db(_GetAll):
89 | storage = 'DB_STORAGE_TOTAL'
90 | data_size = 'DB_DATA_SIZE_TOTAL'
91 |
92 | class DocumentMetrics(_GetAll):
93 | returned = 'DOCUMENT_METRICS_RETURNED'
94 | inserted = 'DOCUMENT_METRICS_INSERTED'
95 | updated = 'DOCUMENT_METRICS_UPDATED'
96 | deleted = 'DOCUMENT_METRICS_DELETED'
97 |
98 | class ExtraInfo(_GetAll):
99 | page_faults = 'EXTRA_INFO_PAGE_FAULTS'
100 |
101 | class GlobalLockCurrentQueue(_GetAll):
102 | total = 'GLOBAL_LOCK_CURRENT_QUEUE_TOTAL'
103 | readers = 'GLOBAL_LOCK_CURRENT_QUEUE_READERS'
104 | writers = 'GLOBAL_LOCK_CURRENT_QUEUE_WRITERS'
105 |
106 | class Memory(_GetAll):
107 | resident = 'MEMORY_RESIDENT'
108 | virtual = 'MEMORY_VIRTUAL'
109 | mapped = 'MEMORY_MAPPED'
110 |
111 | class Network(_GetAll):
112 | bytes_id = 'NETWORK_BYTES_IN' # initial typo, kept for backwards compatibility
113 | bytes_in = 'NETWORK_BYTES_IN'
114 | bytes_out = 'NETWORK_BYTES_OUT'
115 | num_requests = 'NETWORK_NUM_REQUESTS'
116 |
117 | class Opcounter(_GetAll):
118 | cmd = 'OPCOUNTER_CMD'
119 | query = 'OPCOUNTER_QUERY'
120 | update = 'OPCOUNTER_UPDATE'
121 | delete = 'OPCOUNTER_DELETE'
122 | getmore = 'OPCOUNTER_GETMORE'
123 | insert = 'OPCOUNTER_INSERT'
124 |
125 | class Repl(_GetAll):
126 | cmd = 'OPCOUNTER_REPL_CMD'
127 | update = 'OPCOUNTER_REPL_UPDATE'
128 | delete = 'OPCOUNTER_REPL_DELETE'
129 | insert = 'OPCOUNTER_REPL_INSERT'
130 |
131 | class Operations(_GetAll):
132 | scan_and_order = 'OPERATIONS_SCAN_AND_ORDER'
133 |
134 | class ExecutionTime(_GetAll):
135 | reads = 'OP_EXECUTION_TIME_READS'
136 | writes = 'OP_EXECUTION_TIME_WRITES'
137 | commands = 'OP_EXECUTION_TIME_COMMANDS'
138 |
139 | class Oplog(_GetAll):
140 | master_time = 'OPLOG_MASTER_TIME'
141 | rate = 'OPLOG_RATE_GB_PER_HOUR'
142 |
143 | class QueryExecutor(_GetAll):
144 | scanned = 'QUERY_EXECUTOR_SCANNED'
145 | scanned_objects = 'QUERY_EXECUTOR_SCANNED_OBJECTS'
146 |
147 | class QueryTargetingScanned(_GetAll):
148 | per_returned = 'QUERY_TARGETING_SCANNED_PER_RETURNED'
149 | objects_per_returned = 'QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED'
150 |
151 | class TicketsAvailable(_GetAll):
152 | reads = 'TICKETS_AVAILABLE_READS'
153 | writes = 'TICKETS_AVAILABLE_WRITE'
154 |
155 | class CPU(_GetAll):
156 | class Process(_GetAll):
157 | user = 'PROCESS_CPU_USER'
158 | kernel = 'PROCESS_CPU_KERNEL'
159 | children_user = 'PROCESS_CPU_CHILDREN_USER'
160 | children_kernel = 'PROCESS_CPU_CHILDREN_KERNEL'
161 |
162 | class ProcessNormalized(_GetAll):
163 | user = 'PROCESS_NORMALIZED_CPU_USER'
164 | kernel = 'PROCESS_NORMALIZED_CPU_KERNEL'
165 | children_user = 'PROCESS_NORMALIZED_CPU_CHILDREN_USER'
166 | children_kernel = 'PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL'
167 |
168 | class System(_GetAll):
169 | user = 'SYSTEM_CPU_USER'
170 | kernel = 'SYSTEM_CPU_KERNEL'
171 | nice = 'SYSTEM_CPU_NICE'
172 | iowait = 'SYSTEM_CPU_IOWAIT'
173 | irq = 'SYSTEM_CPU_IRQ'
174 | softirq = 'SYSTEM_CPU_SOFTIRQ'
175 | guest = 'SYSTEM_CPU_GUEST'
176 | steal = 'SYSTEM_CPU_STEAL'
177 |
178 | class SystemNormalized(_GetAll):
179 | user = 'SYSTEM_NORMALIZED_CPU_USER'
180 | kernel = 'SYSTEM_NORMALIZED_CPU_KERNEL'
181 | nice = 'SYSTEM_NORMALIZED_CPU_NICE'
182 | iowait = 'SYSTEM_NORMALIZED_CPU_IOWAIT'
183 | irq = 'SYSTEM_NORMALIZED_CPU_IRQ'
184 | softirq = 'SYSTEM_NORMALIZED_CPU_SOFTIRQ'
185 | guest = 'SYSTEM_NORMALIZED_CPU_GUEST'
186 | steal = 'SYSTEM_NORMALIZED_CPU_STEAL'
187 |
188 | class Disk(_GetAll):
189 | class IOPS(_GetAll):
190 | read = 'DISK_PARTITION_IOPS_READ'
191 | read_max = 'MAX_DISK_PARTITION_IOPS_READ'
192 | write = 'DISK_PARTITION_IOPS_WRITE'
193 | write_max = 'MAX_DISK_PARTITION_IOPS_WRITE'
194 | total = 'DISK_PARTITION_IOPS_TOTAL'
195 | total_max = 'MAX_DISK_PARTITION_IOPS_TOTAL'
196 |
197 | class Util(_GetAll):
198 | util = 'DISK_PARTITION_UTILIZATION'
199 | util_max = 'MAX_DISK_PARTITION_UTILIZATION'
200 |
201 | class Latency(_GetAll):
202 | read = 'DISK_PARTITION_LATENCY_READ'
203 | read_max = 'MAX_DISK_PARTITION_LATENCY_READ'
204 | write = 'DISK_PARTITION_LATENCY_WRITE'
205 | write_max = 'MAX_DISK_PARTITION_LATENCY_WRITE'
206 |
207 | class Free(_GetAll):
208 | space_free = 'DISK_PARTITION_SPACE_FREE'
209 | space_free_max = 'MAX_DISK_PARTITION_SPACE_FREE'
210 | used = 'DISK_PARTITION_SPACE_USED'
211 | used_max = 'MAX_DISK_PARTITION_SPACE_USED'
212 | percent_fee = 'DISK_PARTITION_SPACE_PERCENT_FREE'
213 | percent_free_max = 'MAX_DISK_PARTITION_SPACE_PERCENT_FREE'
214 | percent_used = 'DISK_PARTITION_SPACE_PERCENT_USED'
215 | percent_used_max = 'MAX_DISK_PARTITION_SPACE_PERCENT_USED'
216 |
217 | class Namespaces(_GetAll):
218 | """Metrics regarding namespaces (databases) on each host.
219 |
220 | As found in dbstats (https://www.mongodb.com/docs/manual/reference/command/dbStats/)
221 | """
222 | object_size = 'DATABASE_AVERAGE_OBJECT_SIZE' # dbStats.avgObjSize Average size of each document in bytes. This is the dataSize divided by the number of documents. The scale argument does not affect the avgObjSize value.
223 | collection_count = 'DATABASE_COLLECTION_COUNT'
224 | data_size = 'DATABASE_DATA_SIZE' # Total size of the uncompressed data held in the database. The dataSize decreases when you remove documents.
225 | storage_size = 'DATABASE_STORAGE_SIZE' # Sum of the space allocated to all collections in the database for document storage, including free space. storageSize does not include space allocated to indexes. See indexSize for the total index size.
226 | index_size = 'DATABASE_INDEX_SIZE' # Sum of the space allocated to all indexes in the database, including free index space.
227 | index_count = 'DATABASE_INDEX_COUNT'
228 | extent_count = 'DATABASE_EXTENT_COUNT' # ?
229 | object_count = 'DATABASE_OBJECT_COUNT' # Number of objects (specifically, documents) in the database across all collections.
230 | view_count = 'DATABASE_VIEW_COUNT'
231 |
232 |
233 | # noinspection PyBroadException
234 | class AtlasMeasurementValue(object):
235 | def __init__(self, value_dict: dict):
236 | """
237 | Class for holding a measurement value
238 | :type value_dict: dict
239 | :param value_dict: An Atlas standard Measurement value dictionary.
240 | """
241 | timestamp: int = value_dict.get('timestamp', None)
242 | value: float = value_dict.get('value', None)
243 | try:
244 | self.timestamp: datetime = parse(timestamp)
245 | except (ValueError, TypeError):
246 | logger.warning('Could not parse "{}" as a datetime.')
247 | self.timestamp = None
248 | try:
249 | if value is None:
250 | self.value = None
251 | self.value: float = float(value)
252 | except ValueError as e:
253 | self.value = None
254 | logger.warning('Could not parse the metric value "{}". Error was {}'.format(value, e))
255 | except TypeError:
256 | logger.info('Value is none.')
257 | self.value = None
258 |
259 | # noinspection PyBroadException
260 | @property
261 | def value_int(self) -> Optional[int]:
262 | try:
263 | return int(self.value)
264 | except Exception as e:
265 | return None
266 |
267 | @property
268 | def value_float(self) -> Optional[float]:
269 | try:
270 | return float(self.value)
271 | except Exception:
272 | return None
273 |
274 | def as_dict(self) -> dict:
275 | return dict(timestamp=self.timestamp.__str__(), value=self.value, value_int=self.value_int,
276 | value_float=self.value_float)
277 |
278 | @property
279 | def as_tuple(self) -> Tuple[datetime, OptionalFloat]:
280 | """
281 | Returns a MeasurementValue as a tuple, timestamp first.
282 | :rtype: Tuple[datetime,OptionalFloat]
283 | :return: A tuple with a datetime and a float
284 | """
285 | return self.timestamp, self.value
286 |
287 |
288 | class AtlasMeasurement(object):
289 | """A point in time container for an Atlas measurement.
290 |
291 | For a certain period, granularity and measurementType holds a list fo measurementValues.
292 |
293 | Args:
294 | name (AtlasMeasurementTypes): The name of the measurement type
295 | units (Text): Descriptive text of units used.
296 | period (AtlasPeriods): The period the measurement covers
297 | granularity (AtlasGranularities): The granularity used for the measurement
298 | measurements (List[AtlasMeasurementValue]): A list of the actual measurement values
299 | """
300 |
301 | def __init__(self, name: AtlasMeasurementTypes, period: AtlasPeriods,
302 | granularity: AtlasGranularities, units: str = None, measurements: List[AtlasMeasurementValue] = None):
303 | if measurements is None:
304 | measurements = list()
305 | self.name: AtlasMeasurementTypes = name
306 | self.units: str = units
307 | self.period: AtlasPeriods = period
308 | self.granularity: AtlasGranularities = granularity
309 | self._measurements: List[AtlasMeasurementValue] = measurements
310 |
311 | @property
312 | def measurements(self) -> Iterable[AtlasMeasurementValue]:
313 | """
314 | Getter for the measurements.
315 |
316 | Returns:
317 | Iterator[AtlasMeasurementValue]: An iterator containing values objects.
318 | """
319 | for item in self._measurements:
320 | yield item
321 |
322 | @measurements.setter
323 | def measurements(self, value):
324 | if type(value) == list:
325 | self._measurements.extend(value)
326 | else:
327 | self._measurements.append(value)
328 |
329 | @measurements.deleter
330 | def measurements(self):
331 | self._measurements = []
332 |
333 | def measurements_as_tuples(self):
334 | if isinstance(self._measurements[0], AtlasMeasurementValue):
335 | for item in self._measurements:
336 | yield item.as_tuple
337 |
338 | @property
339 | def date_start(self):
340 | """The date of the first measurement.
341 |
342 | Returns:
343 | datetime: The date of the first measurement.
344 | """
345 | seq = [x.timestamp for x in self._measurements]
346 | return min(seq)
347 |
348 | @property
349 | def date_end(self):
350 | """The date of the last measurement
351 |
352 | Returns:
353 | datetime: The date of the last measurement.
354 |
355 | """
356 | seq = [x.timestamp for x in self._measurements]
357 | return max(seq)
358 |
359 | @property
360 | def measurements_count(self):
361 | """The count of measurements
362 |
363 | Returns:
364 | int: The count of measurements in the set
365 | """
366 | return len(self._measurements)
367 |
368 | @property
369 | def as_dict(self):
370 | """Returns the measurement as a dict, including the computed properties.
371 |
372 | Returns:
373 | dict:
374 | """
375 | return dict(measurements=self._measurements, date_start=self.date_start, date_end=self.date_end, name=self.name,
376 | units=self.units, period=self.period, granularity=self.granularity,
377 | measurements_count=self.measurements_count
378 | )
379 |
380 | @property
381 | def measurement_stats(self) -> StatisticalValues:
382 | """Returns a statistical info for measurement data"""
383 | data_list = list()
384 | for each_measurement in self.measurements:
385 | data_list.append(each_measurement.value_float)
386 | return StatisticalValues(data_list=data_list)
387 |
388 | @property
389 | def measurement_stats_friendly(self) -> StatisticalValuesFriendly:
390 | """Returns statistical info for measurement data in friendly bytes format"""
391 | data_list = list()
392 | for each_measurement in self.measurements:
393 | data_list.append(each_measurement.value_float)
394 | return StatisticalValuesFriendly(data_list=data_list, data_type=self.units)
395 |
396 | def __hash__(self):
397 | return hash(self.name + '-' + self.period)
398 |
399 | def __eq__(self, other):
400 | """
401 | Measurements are considered duplicate of name and period are the same
402 | :param other:
403 | :return:
404 | """
405 | if isinstance(other, AtlasMeasurement):
406 | return (self.name == other.name) and (self.period == other.period)
407 |
408 |
409 | ListOfAtlasMeasurementValues = NewType('ListOfAtlasMeasurementValues', List[Optional[AtlasMeasurementValue]])
410 | OptionalAtlasMeasurement = NewType('OptionalAtlasMeasurement', Optional[AtlasMeasurement])
411 |
412 |
413 | def clean_list(data_list: list) -> list:
414 | """Returns a list with any none values removed
415 |
416 | Args:
417 | data_list (list): The list to be cleaned
418 |
419 | Returns (list): The list cleaned of None values.
420 |
421 | """
422 | return list(filter(None, data_list))
423 |
--------------------------------------------------------------------------------
/atlasapi/network.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Network module
17 |
18 | Module which handles the basic network operations with the Atlas API>
19 | """
20 |
21 | import requests
22 | from requests.auth import HTTPDigestAuth, HTTPBasicAuth
23 | from atlasapi.settings import Settings
24 | from atlasapi.errors import *
25 | import logging
26 | from json import dumps
27 | from io import BytesIO
28 | from typing import Union
29 |
30 | logger = logging.getLogger('network')
31 | logger.setLevel(logging.WARNING)
32 |
33 |
34 | def merge(dict1, dict2):
35 | return dict2.update(dict1)
36 |
37 |
38 | class Network:
39 | """Network constructor
40 |
41 | Args:
42 | user (str): user
43 | password (str): password
44 | """
45 |
46 | def __init__(self, user, password, AuthMethod: Union[HTTPDigestAuth, HTTPBasicAuth] = HTTPDigestAuth):
47 | self.user: str = user
48 | self.password: str = password
49 | self.auth_method: Union[HTTPDigestAuth, HTTPBasicAuth] = AuthMethod
50 |
51 | def answer(self, c, details: Union[dict, BytesIO]):
52 | """Answer will provide all necessary feedback for the caller
53 |
54 | Args:
55 | c (int): HTTP Code
56 | details (dict): Response payload
57 |
58 | Returns:
59 | dict: Response payload
60 |
61 | Raises:
62 | ErrAtlasBadRequest
63 | ErrAtlasUnauthorized
64 | ErrAtlasForbidden
65 | ErrAtlasNotFound
66 | ErrAtlasMethodNotAllowed
67 | ErrAtlasConflict
68 | ErrAtlasServerErrors
69 |
70 | """
71 | if c in [Settings.SUCCESS, Settings.CREATED, Settings.ACCEPTED, Settings.NO_CONTENT]:
72 | return details
73 | elif c == Settings.BAD_REQUEST:
74 | raise ErrAtlasBadRequest(c, details)
75 | elif c == Settings.UNAUTHORIZED:
76 | raise ErrAtlasUnauthorized(c, details)
77 | elif c == Settings.FORBIDDEN:
78 | raise ErrAtlasForbidden(c, details)
79 | elif c == Settings.NOTFOUND:
80 | raise ErrAtlasNotFound(c, details)
81 | elif c == Settings.METHOD_NOT_ALLOWED:
82 | raise ErrAtlasMethodNotAllowed(c, details)
83 | elif c == Settings.CONFLICT:
84 | raise ErrAtlasConflict(c, details)
85 | else:
86 | # Settings.SERVER_ERRORS
87 | raise ErrAtlasServerErrors(c, details)
88 |
89 | def get_file(self, uri):
90 | """Get request which returns a binary file
91 |
92 | Args:
93 | uri (str): URI
94 |
95 | Returns:
96 | Binary File: API response as file
97 |
98 | Raises:
99 | Exception: Network issue
100 | """
101 | r = None
102 |
103 | try:
104 | file_obj = BytesIO()
105 | r = requests.get(uri,
106 | allow_redirects=False,
107 | stream=True,
108 | timeout=Settings.file_request_timeout,
109 | headers={},
110 | auth=self.auth_method(self.user, self.password))
111 | logger.debug("Auth information = {} {}".format(self.user, self.password))
112 |
113 | for chunk in r.iter_content(chunk_size=1024):
114 | # writing one chunk at a time to file
115 | if chunk:
116 | logger.debug("Writing 1 Kbyte chunk to the file like object")
117 | file_obj.write(chunk)
118 | logger.info("---- Completed downloading the file. ----")
119 | return self.answer(r.status_code, file_obj)
120 |
121 | except Exception:
122 | logger.warning('Request: {}'.format(r.request.__dict__))
123 | logger.warning('Response: {}'.format(r.__dict__))
124 | raise
125 | finally:
126 | if r:
127 | r.connection.close()
128 |
129 | def get(self, uri):
130 | """Get request
131 |
132 | Args:
133 | uri (str): URI
134 |
135 | Returns:
136 | Json: API response
137 |
138 | Raises:
139 | Exception: Network issue
140 | """
141 | r = None
142 |
143 | try:
144 | r = requests.get(uri,
145 | allow_redirects=True,
146 | timeout=Settings.requests_timeout,
147 | headers={},
148 | auth=self.auth_method(self.user, self.password))
149 | logger.debug("Auth information = {} {}".format(self.user, self.password))
150 |
151 | return self.answer(r.status_code, r.json())
152 | except Exception as e:
153 | logger.warning('Request: {}'.format(r.request.__dict__))
154 | logger.warning('Response: {}'.format(r.__dict__))
155 | raise e
156 | finally:
157 | if r:
158 | r.connection.close()
159 |
160 | def get_big(self, uri, params: dict = None):
161 | """Get request (max results)
162 |
163 | This is a temporary fix until we re-factor pagination.
164 |
165 | Args:
166 | params: dict of parameters that should be sent on the path
167 | uri (str): URI
168 |
169 | Returns:
170 | Json: API response
171 |
172 | Raises:
173 | Exception: Network issue
174 | """
175 | r = None
176 | if params:
177 | logger.debug(f"Recieved the following parameters to the get_big method : {params}")
178 | merge({'itemsPerPage': Settings.itemsPerPage}, params)
179 | logger.debug(f"The parameters are now {params}")
180 | else:
181 | params = {'itemsPerPage': Settings.itemsPerPage}
182 |
183 | try:
184 | logger.debug(f"The parameters object is {params}")
185 | r = requests.get(uri,
186 | allow_redirects=True,
187 | params=params,
188 | timeout=Settings.requests_timeout,
189 | headers={},
190 | auth=self.auth_method(self.user, self.password))
191 | logger.debug("Auth information = {} {}".format(self.user, self.password))
192 |
193 | return self.answer(r.status_code, r.json())
194 | except Exception as e:
195 | raise e
196 | finally:
197 | if r:
198 | r.connection.close()
199 |
200 | def post(self, uri, payload):
201 | """Post request
202 |
203 | Args:
204 | uri (str): URI
205 | payload (dict): Content to post
206 |
207 | Returns:
208 | Json: API response
209 |
210 | Raises:
211 | Exception: Network issue
212 | """
213 | r = None
214 |
215 | try:
216 | r = requests.post(uri,
217 | json=payload,
218 | allow_redirects=True,
219 | timeout=Settings.requests_timeout,
220 | headers={"Content-Type": "application/json"},
221 | auth=self.auth_method(self.user, self.password))
222 | return self.answer(r.status_code, r.json())
223 | except:
224 | raise
225 | finally:
226 | if r:
227 | r.connection.close()
228 |
229 | def patch(self, uri, payload):
230 | """Patch request
231 |
232 | Args:
233 | uri (str): URI
234 | payload (dict): Content to patch
235 |
236 | Returns:
237 | Json: API response
238 |
239 | Raises:
240 | Exception: Network issue
241 | """
242 | r = None
243 | try:
244 | r = requests.patch(uri,
245 | json=payload,
246 | allow_redirects=True,
247 | timeout=Settings.requests_timeout,
248 | headers={"Content-Type": "application/json"},
249 | auth=self.auth_method(self.user, self.password))
250 |
251 | try:
252 | output = r.json()
253 | except:
254 | logger.warning("PATCH doesnt return data!")
255 | output = {}
256 |
257 | return self.answer(r.status_code, output)
258 | except Exception as e:
259 |
260 | raise e
261 | finally:
262 | if r:
263 | r.connection.close()
264 |
265 | def delete(self, uri):
266 | """Delete request
267 |
268 | Args:
269 | uri (str): URI
270 |
271 | Returns:
272 | Json: API response
273 |
274 | Raises:
275 | Exception: Network issue
276 | """
277 | r = None
278 |
279 | try:
280 | r = requests.delete(uri,
281 | allow_redirects=True,
282 | timeout=Settings.requests_timeout,
283 | headers={},
284 | auth=self.auth_method(self.user, self.password))
285 | return self.answer(r.status_code, {"deleted": True})
286 | except Exception as e:
287 | raise e
288 | finally:
289 | if r:
290 | r.connection.close()
291 |
--------------------------------------------------------------------------------
/atlasapi/organizations.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 |
4 | class Organization:
5 | def __init__(self, name: str, is_deleted: bool = False, links: Optional[list] = None, id: Optional[str] = None):
6 | self.id = id
7 | self.links = links
8 | self.is_deleted = is_deleted
9 | self.name = name
10 |
11 | @classmethod
12 | def from_dict(cls, data_dict: dict):
13 | return cls(data_dict.get("name"), data_dict.get("isDeleted", False), data_dict.get("links", []),
14 | data_dict.get("id", None))
15 |
--------------------------------------------------------------------------------
/atlasapi/projects.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from isodate import isodatetime
3 | from datetime import datetime
4 | from typing import Optional
5 |
6 |
7 | class Project:
8 | def __init__(self, name: str, org_id: str, created_date: Optional[datetime] = None,
9 | cluster_count: Optional[int] = None, id: Optional[str] = None, links: list = None,
10 | with_default_alert_settings: Optional[bool] = True,
11 | project_owner_id: str = None) -> None:
12 | """A single Atlas Project/Group
13 |
14 | Groups and projects are synonymous terms. Your {GROUP-ID} is the same as your project ID. For existing groups,
15 | your group/project ID remains the same. The resource and corresponding endpoints use the term groups.
16 |
17 | Args:
18 | id (str): The unique identifier of the project. You can use this value for populating the {GROUP-ID}
19 | parameter of other Atlas Administration API endpoints.
20 | name (str): The name of the project. You can use this value for populating the {GROUP-NAME} parameter of the /groups/byName/{GROUP-NAME} endpoint.
21 | links (list): One or more uniform resource locators that link to sub-resources and/or related resources. The Web Linking Specification explains the relation-types between URLs.
22 | org_id (str): The unique identifier of the Atlas organization to which the project belongs.
23 | created_date (Optional[datetime]): The ISO-8601-formatted timestamp of when Atlas created the project.
24 | cluster_count (int): The number of Atlas clusters deployed in the project.
25 | with_default_alert_settings (bool): Flag that indicates whether to create the new project with the default alert settings enabled. This parameter defaults to true.
26 |
27 | """
28 | self.project_owner_id: Optional[str] = project_owner_id
29 | self.with_default_alert_settings: bool = with_default_alert_settings
30 | self.cluster_count: Optional[int] = cluster_count
31 | self.created_date: Optional[datetime] = created_date
32 | self.org_id: str = org_id
33 | self.links: Optional[list] = links
34 | self.name: str = name
35 | self.id: Optional[str] = id
36 |
37 | @classmethod
38 | def for_create(cls, name: str, org_id: str, with_default_alert_settings: bool = True, project_owner_id: str = None):
39 | """
40 | Creates a new Project object for use in creating a new project.
41 |
42 | Only name and org_id are required.
43 |
44 | Args:
45 | project_owner_id (str): Unique 24-hexadecimal digit string that identifies the Atlas user account to be granted the Project Owner role on the specified project. If you set this parameter, it overrides the default value of the oldest Organization Owner.
46 | name (str): The name of the project. You can use this value for populating the {GROUP-NAME} parameter of the /groups/byName/{GROUP-NAME} endpoint.
47 | org_id (str): The unique identifier of the Atlas organization to which the project belongs.
48 | with_default_alert_settings (bool): Flag that indicates whether to create the new project with the default alert settings enabled. This parameter defaults to true.
49 |
50 | Returns: None
51 |
52 | """
53 | return cls(name=name, org_id=org_id, with_default_alert_settings=with_default_alert_settings,
54 | project_owner_id=project_owner_id)
55 |
56 | @classmethod
57 | def from_dict(cls, data_dict):
58 | """
59 | Creates a Project object from a passed dict, in the format of the Atlas API.
60 |
61 | Args:
62 | data_dict (dict): A dictionary in the format of the Atlas API.
63 |
64 | Returns: None
65 |
66 | """
67 | created_date = isodatetime.parse_datetime(data_dict.get("created"))
68 | return cls(id=data_dict.get("id"), name=data_dict.get("name"),
69 | links=data_dict.get("links", []), org_id=data_dict.get("orgId"),
70 | created_date=created_date, cluster_count=data_dict.get("clusterCount"))
71 |
72 | @property
73 | def create_dict(self) -> dict:
74 | """
75 | A dictionary in the format Atlas API "create project expects"
76 |
77 | Returns: A dictionary in the format Atlas API "create project expects"
78 |
79 | """
80 | return dict(name=self.name, orgId = self.org_id, withDefaultAlertsSettings=self.with_default_alert_settings)
81 |
82 |
83 | class ProjectSettings:
84 | def __init__(self, is_collect_db_stats: Optional[bool] = None, is_data_explorer: Optional[bool] = None,
85 | is_performance_advisor: Optional[bool] = None, is_realtime_perf: Optional[bool] = None,
86 | is_schema_advisor: Optional[bool] = None):
87 | """Holds Project/Group settings.
88 |
89 | Args:
90 | is_collect_db_stats (Optional[bool]): Flag that indicates whether statistics in cluster metrics collection is enabled for the project.
91 | is_data_explorer (Optional[bool]): Flag that indicates whether Data Explorer is enabled for the project. If enabled, you can query your database with an easy to use interface.
92 | is_performance_advisor (Optional[bool]): Flag that indicates whether Performance Advisor and Profiler is enabled for the project. If enabled, you can analyze database logs to recommend performance improvements.
93 | is_realtime_perf (Optional[bool]): Flag that indicates whether Real Time Performance Panel is enabled for the project. If enabled, you can see real time metrics from your MongoDB database.
94 | is_schema_advisor (Optional[bool]): Flag that indicates whether Schema Advisor is enabled for the project. If enabled, you receive customized recommendations to optimize your data model and enhance performance.
95 | """
96 | self.is_schema_advisor: Optional[bool] = is_schema_advisor
97 | self.is_realtime_perf: Optional[bool] = is_realtime_perf
98 | self.is_performance_advisor: Optional[bool] = is_performance_advisor
99 | self.is_data_explorer: Optional[bool] = is_data_explorer
100 | self.is_collect_db_stats: Optional[bool] = is_collect_db_stats
101 |
102 | @classmethod
103 | def from_dict(cls,data_dict: dict):
104 | return cls(
105 | bool(data_dict.get("isCollectDatabaseSpecificsStatisticsEnabled", False)),
106 | bool(data_dict.get("isDataExplorerEnabled", False)),
107 | bool(data_dict.get("isPerformanceAdvisorEnabled", False)),
108 | bool(data_dict.get("isRealtimePerformancePanelEnabled", False)),
109 | bool(data_dict.get("isSchemaAdvisorEnabled", False)),
110 | )
111 |
--------------------------------------------------------------------------------
/atlasapi/settings.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Settings module
17 |
18 | Provides few constants, APIs endpoints.
19 | """
20 | import os
21 | from os import getenv
22 |
23 |
24 | class Settings:
25 | # Atlas APIs
26 | BASE_URL = getenv('BASE_URL', 'https://cloud.mongodb.com')
27 | URI_STUB = getenv('URI_STUB', '/api/atlas/v1.0')
28 |
29 | api_resources = {
30 | "Project": {
31 | "Get One Project": URI_STUB + "/groups/{GROUP_ID}"
32 | },
33 | "Monitoring and Logs": {
34 | "Get all processes for group": "/api/atlas/v1.0/groups/{group_id}/processes?pageNum={"
35 | "page_num}&itemsPerPage={items_per_page}",
36 | "Get information for process in group": "/api/atlas/v1.0/groups/%s/processes/%s:&s?pageNum=%d"
37 | "&itemsPerPage=%d",
38 | "Get measurement for host": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{"
39 | "port}/measurements?granularity={granularity}&period={period}&m={measurement}",
40 | "Get list of databases for host": "/api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{PORT}/databases",
41 | "Get measurements of database for host.": "/api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{"
42 | "PORT}/databases/{DATABASE-NAME}/measurements",
43 | "Get list of disks or partitions for host.": "/api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{"
44 | "PORT}/disks",
45 | "Get measurements of for host": "/api/atlas/v1.0/groups/{GROUP-ID}/processes/{HOST}:{PORT}/disks/{"
46 | "DISK-NAME}/measurements",
47 | "Get the log file for a host in the cluster": "/api/atlas/v1.0/groups/{group_id}/clusters/{"
48 | "host}/logs/{logname}",
49 | "Get Available Disks for Process": "/api/atlas/v1.0/groups/{group_id}/processes/"
50 | "{host}:{port}/disks",
51 | "Get Measurements of a Disk for Process": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{port}/disks/"
52 | "{disk_name}/measurements",
53 | "Get Measurements of a Database for Process": "/api/atlas/v1.0/groups/{group_id}/processes/{host}:{port}/"
54 | "databases/{database_name}/measurements",
55 | "Get Available Databases for Process": "/api/atlas/v1.0/groups/{group_id}/processes/"
56 | "{host}:{port}/databases"
57 | },
58 | "Events": {
59 | "Get All Project Events": URI_STUB + "/groups/{group_id}/events?includeRaw=true&pageNum={page_num}"
60 | "&itemsPerPage={items_per_page}",
61 | "Get Project Events Since Date": URI_STUB + "/groups/{group_id}/events?includeRaw=true&pageNum={"
62 | "page_num}&itemsPerPage={items_per_page}&minDate={"
63 | "min_date}"
64 | },
65 | "Clusters": {
66 | "Get All Clusters": URI_STUB + "/groups/%s/clusters?pageNum=%d&itemsPerPage=%d",
67 | "Get a Single Cluster": URI_STUB + "/groups/%s/clusters/%s",
68 | "Delete a Cluster": URI_STUB + "/groups/%s/clusters/%s",
69 | "Create a Cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/",
70 | "Modify a Cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}",
71 | "Test Failover": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}/restartPrimaries",
72 | "Advanced Configuration Options": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}/processArgs",
73 |
74 | },
75 | "Database Users": {
76 | "Get All Database Users": "/api/atlas/v1.0/groups/%s/databaseUsers?pageNum=%d&itemsPerPage=%d",
77 | "Get a Single Database User": "/api/atlas/v1.0/groups/%s/databaseUsers/admin/%s",
78 | "Create a Database User": "/api/atlas/v1.0/groups/%s/databaseUsers",
79 | "Update a Database User": "/api/atlas/v1.0/groups/%s/databaseUsers/admin/%s",
80 | "Delete a Database User": "/api/atlas/v1.0/groups/%s/databaseUsers/admin/%s"
81 | },
82 | "Alerts": {
83 | "Get All Alerts": "/api/atlas/v1.0/groups/%s/alerts?pageNum=%d&itemsPerPage=%d",
84 | "Get All Alerts with status": "/api/atlas/v1.0/groups/%s/alerts?status=%s&pageNum=%d&itemsPerPage=%d",
85 | "Get an Alert": "/api/atlas/v1.0/groups/%s/alerts/%s",
86 | "Acknowledge an Alert": "/api/atlas/v1.0/groups/%s/alerts/%s"
87 | },
88 | "Whitelist": {
89 | "Get All Whitelist Entries": "/api/atlas/v1.0/groups/%s/whitelist?pageNum=%d&itemsPerPage=%d",
90 | "Get Whitelist Entry": "/api/atlas/v1.0/groups/%s/whitelist/%s",
91 | "Create Whitelist Entry": "/api/atlas/v1.0/groups/%s/whitelist",
92 | "Delete Whitelist Entry": "/api/atlas/v1.0/groups/%s/whitelist/%s"
93 | },
94 | "Maintenance Windows": {
95 | "Get Maintenance Window": "/api/atlas/v1.0/groups/{GROUP_ID}/maintenanceWindow",
96 | "Update Maintenance Window": "/api/atlas/v1.0/groups/{GROUP_ID}/maintenanceWindow",
97 | "Defer Maintenance Window": "/api/atlas/v1.0/groups/{GROUP_ID}/maintenanceWindow/defer",
98 | "Delete Maintenance Window": "/api/atlas/v1.0/groups/{GROUP_ID}/maintenanceWindow"
99 | },
100 | "Organization API Keys": {
101 | "Get all Organization API Keys associated with org": URI_STUB + "/orgs/{GROUP_ID}/apiKeys",
102 | "Get one Organization API Key": URI_STUB + "/orgs/{ORG_ID}/apiKeys/{API_KEY_ID}",
103 | "Get Whitelists for API Key": URI_STUB + "/orgs/{ORG_ID}/apiKeys/{API_KEY_ID}/whitelist",
104 | "Create one or more whitelist entries for APi Key": URI_STUB + "/orgs/{GROUP_ID}/apiKeys/{"
105 | "API_KEY_ID}/whitelist",
106 | "Get a single whitelist entry": URI_STUB + "/orgs/{GROUP_ID}/apiKeys/{API_KEY_ID}/whitelist/{IP_ADDRESS}"
107 | # Incomplete
108 | },
109 | "Project API Keys": {
110 | "Get All API Keys Assigned to Project": URI_STUB + "/groups/{GROUP_ID}/apiKeys",
111 |
112 | },
113 | "Cloud Backup": {
114 | "Get all Cloud Backups for cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/"
115 | "{CLUSTER_NAME}/backup/snapshots",
116 | "Get snapshot by SNAPSHOT-ID": URI_STUB + "/groups/{GROUP_ID}/clusters/"
117 | "{CLUSTER_NAME}/backup/snapshots/{SNAPSHOT_ID}",
118 | "Delete snapshot by SNAPSHOT-ID": URI_STUB + "/groups/{GROUP_ID}/clusters/"
119 | "{CLUSTER_NAME}/backup/snapshots/{SNAPSHOT_ID}",
120 | "Take an on-demand snapshot": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}/backup/snapshots",
121 |
122 | },
123 | "Cloud Backup Restore Jobs": {
124 | "Get all Cloud Backup restore jobs by cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/"
125 | "{CLUSTER_NAME}/backup/restoreJobs",
126 | "Get Cloud Backup restore job by cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/"
127 | "{CLUSTER_NAME}/backup/restoreJobs/{JOB_ID}",
128 | "Restore snapshot by cluster": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}"
129 | "/backup/restoreJobs",
130 | "Cancel manual download restore job by job_id": URI_STUB + "/groups/{GROUP_ID}/clusters/{CLUSTER_NAME}"
131 | "/backup/restoreJobs/{JOB_ID}"
132 |
133 | }
134 | ,
135 | "Projects": {
136 | "Projects that the authenticated user can access": URI_STUB + "/groups/",
137 | "Project by group_id": URI_STUB + "/groups/{GROUP_ID}",
138 | "Project by group name": URI_STUB + "/groups/byName/{GROUP_NAME}",
139 | "Project teams by group_id": URI_STUB + "/groups/{GROUP_ID}/teams/",
140 | "Remove the specified Atlas team from the specified project.": URI_STUB + "/groups/{GROUP_ID}/"
141 | "teams/{TEAM_ID}",
142 | "Atlas Users assigned to project": URI_STUB + "/groups/{GROUP_ID}/users/",
143 | "Remove Atlas Users assigned to project": URI_STUB + "/groups/{GROUP_ID}/users/{USER_ID}",
144 | "Pending invitations to the project associated ": URI_STUB + "/groups/{GROUP_ID}/invites",
145 | "One Pending invitation to the project associated": URI_STUB + "/groups/{GROUP_ID}/"
146 | "invites{INVITATION_ID}",
147 | "Settings for project": URI_STUB + "/groups/{GROUP_ID}/settings",
148 | }
149 |
150 | ,
151 | "Organizations": {
152 | "Orgs the authenticated user can access": URI_STUB + "/orgs/",
153 | "Org by org_id": URI_STUB + "/orgs/{ORG_ID}",
154 | "Atlas Users associated to Org": URI_STUB + "/orgs/{ORGS_ID}/users/",
155 | "Projects associated with the Org": URI_STUB + "/orgs/{ORG_ID}/groups"
156 | }
157 |
158 | }
159 | #
160 |
161 | # Atlas enforced
162 | databaseName = "admin"
163 |
164 | # Atlas default pagination
165 | pageNum = 1
166 | itemsPerPage: int = int(os.getenv('ITEMS_PER_PAGE', 500))
167 | itemsPerPageMin: int = int(os.getenv('ITEMS_PER_PAGE_MIN', 1))
168 | itemsPerPageMax: int = int(os.getenv('ITEMS_PER_PAGE_MAX', 2000))
169 |
170 | # Requests
171 | requests_timeout = 10
172 | file_request_timeout = 360
173 |
174 | # HTTP Return code
175 | SUCCESS = 200
176 | CREATED = 201
177 | ACCEPTED = 202
178 | NO_CONTENT = 204
179 | BAD_REQUEST = 400
180 | UNAUTHORIZED = 401
181 | FORBIDDEN = 403
182 | NOTFOUND = 404
183 | METHOD_NOT_ALLOWED = 405
184 | CONFLICT = 409
185 | SERVER_ERRORS = 500
186 |
--------------------------------------------------------------------------------
/atlasapi/teams.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List
2 |
3 |
4 | class TeamRoles:
5 | def __init__(self, team_id: str, roles: List[str]):
6 | """Contains a team_id and its associated atlas access roles.
7 |
8 | Args:
9 | team_id (str):
10 | roles (List[str]): A list of Atlas access roles.
11 | """
12 | self.team_id = team_id
13 | self.roles = roles
14 |
15 |
16 | class Team:
17 | def __init__(self, name: str, id: str = None, usernames: Optional[List[str]] = None, org_id: Optional[str] = None,
18 | links: Optional[list] = None):
19 | """
20 |
21 | Args:
22 | org_id (str): The unique identifier for the organization you want to associate the team with.
23 | name (str): the name of the team
24 | id (str): The unique identifier for the team.
25 | links (Optional[list]): Links to team related resources
26 | usernames Optional[List[str]]: Valid email addresses of users to add to the new team. Atlas checks whether the user's email belongs to the organization, so that Atlas can properly associate the users to the teams.
27 | """
28 | self.org_id = org_id
29 | self.usernames = usernames
30 | self.links: Optional[list] = links
31 | self.id: Optional[str] = id
32 | self.name: str = name
33 |
34 | @classmethod
35 | def for_create(cls, org_id: str, name: str, usernames: List[str]):
36 | """
37 | Creates a Team object in the format needed to create in the Atlas API
38 | Args:
39 | usernames (List[str]): Valid email addresses of users to add to the new team. Atlas checks whether the user's email belongs to the organization, so that Atlas can properly associate the users to the teams.
40 | org_id (str): The unique identifier for the organization you want to associate the team with.
41 | name (str): the name of the team
42 |
43 |
44 | Returns: None
45 |
46 | """
47 | return cls(name=name, org_id=org_id, usernames=usernames)
48 |
49 | @property
50 | def as_create_dict(self) -> dict:
51 | """
52 | A dictionary in the format the Atlas API expects for Teams
53 | Returns:
54 |
55 | """
56 | return dict(name=self.name, usernames=self.usernames)
57 |
58 |
59 | class TeamRoles:
60 | def __init__(self, team_id: str, roles: List[str]):
61 | self.team_id = team_id
62 | self.roles = roles
--------------------------------------------------------------------------------
/atlasapi/whitelist.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ipaddress import IPv4Address, IPv4Network
16 | from pprint import pprint
17 | from typing import Optional
18 | import logging
19 | from datetime import datetime
20 | from isodate import parse_datetime
21 | class WhitelistEntry(object):
22 | def __init__(self, cidrBlock: str = None, comment: str = None, ipAddress: str = None, links: list = None,
23 | last_used: str = None, count: int = None, last_used_address: str = None):
24 | """
25 | For a single whitelist entry. Contains a bit of helper intelligence for ip addresses.
26 |
27 | :param cidrBlock:
28 | :param comment:
29 | :param ipAddress:
30 | :param links:
31 | """
32 | self.last_used_address: Optional[IPv4Address] = None
33 | try:
34 | self.last_used_address = IPv4Address(last_used_address)
35 | except Exception:
36 | logging.warning('No last used address')
37 |
38 | self.count: Optional[int] = count
39 | self.last_used: Optional[datetime] = None
40 | try:
41 | self.last_used = parse_datetime(last_used)
42 | except Exception:
43 | logging.warning('Could not get last used date.')
44 | self.links = links
45 | self.ipAddress = ipAddress
46 | self.comment = comment
47 | self.cidrBlock = cidrBlock
48 | try:
49 | self.cidrBlockObj: IPv4Network = IPv4Network(self.cidrBlock)
50 | except Exception:
51 | self.cidrBlockObj = None
52 | try:
53 | self.ipAddressObj: IPv4Address = IPv4Address(self.ipAddress)
54 | except Exception:
55 | self.ipAddressObj = None
56 |
57 | @classmethod
58 | def fill_from_dict(cls, data_dict: dict):
59 | """
60 | Fills the object from the standard Atlas API dictionary.
61 | :param data_dict:
62 | :return:
63 | """
64 | cidrBlock = data_dict.get('cidrBlock', None)
65 | comment = data_dict.get('comment', None)
66 | ipAddress = data_dict.get('ipAddress', None)
67 | links = data_dict.get('links', None)
68 | last_used = data_dict.get('lastUsed', None)
69 | count = data_dict.get('count', 0)
70 | last_used_address = data_dict.get('lastUsedAddress', None)
71 |
72 | return cls(cidrBlock=cidrBlock, comment=comment, ipAddress=ipAddress, links=links,
73 | last_used=last_used,count=count,last_used_address=last_used_address)
74 |
75 | def as_dict(self) -> dict:
76 | """
77 | Dumps obj as a json valid dict.
78 | :return:
79 | """
80 | orig_dict = self.__dict__
81 | orig_dict.__delitem__('ipAddressObj')
82 | orig_dict.__delitem__('cidrBlockObj')
83 | try:
84 | orig_dict['last_used_address'] = self.last_used_address.__str__()
85 | except Exception:
86 | pass
87 | return orig_dict
88 |
--------------------------------------------------------------------------------
/atlascli/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Joe Drumgoole
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/atlascli/atlaserrors.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Joe Drumgoole
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from requests.exceptions import HTTPError
16 |
17 |
18 | class AtlasError(HTTPError):
19 | def __init__(self, *args, **kwargs):
20 | self._text = kwargs.pop("text", None)
21 |
22 | super().__init__(*args, **kwargs)
23 |
24 | @property
25 | def text(self):
26 | return self._text
27 |
28 |
29 | class AtlasAuthenticationError(AtlasError):
30 | pass
31 |
32 |
33 | class AtlasGetError(AtlasError):
34 | pass
35 |
36 |
37 | class AtlasPostError(AtlasError):
38 | pass
39 |
40 |
41 | class AtlasPatchError(AtlasError):
42 | pass
43 |
44 |
45 | class AtlasDeleteError(AtlasError):
46 | pass
47 |
48 |
49 | class AtlasEnvironmentError(ValueError):
50 | pass
51 |
52 |
53 | class AtlasInitialisationError(ValueError):
54 | pass
55 |
--------------------------------------------------------------------------------
/atlascli/atlaskey.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Joe Drumgoole
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Modifications copyright (C) Joe Drumgoole
15 |
16 | from enum import Enum
17 | import os
18 | from atlascli.atlaserrors import AtlasEnvironmentError
19 |
20 |
21 | class AtlasEnv(Enum):
22 | ATLAS_PUBLIC_KEY = "ATLAS_PUBLIC_KEY"
23 | ATLAS_PRIVATE_KEY = "ATLAS_PRIVATE_KEY"
24 |
25 | def __str__(self):
26 | return self.value
27 |
28 |
29 | class AtlasKey:
30 |
31 | def __init__(self, public_key, private_key):
32 |
33 | self._public_key = public_key
34 | self._private_key = private_key
35 |
36 | @staticmethod
37 | def getenv(key_string):
38 | key = os.getenv(key_string)
39 | if key is None:
40 | raise AtlasEnvironmentError(f"Private key environment variable '{key_string}' is not set")
41 | return key
42 |
43 | @classmethod
44 | def get_from_env(cls):
45 | public_key = AtlasKey.getenv(AtlasEnv.ATLAS_PUBLIC_KEY.value)
46 | private_key = AtlasKey.getenv(AtlasEnv.ATLAS_PRIVATE_KEY.value)
47 | return AtlasKey(public_key, private_key)
48 |
49 | @property
50 | def private_key(self):
51 | return self._private_key
52 |
53 | @property
54 | def public_key(self):
55 | return self._public_key
56 |
57 | @staticmethod
58 | def obfuscate(s, show=4, hide_char="x"):
59 | l = len(s)
60 | if show > l:
61 | return s
62 | else:
63 | return (hide_char * (l - show)) + s[:show]
64 |
65 | def __repr__(self):
66 | return (f"AtlasKey(public_key='{AtlasKey.obfuscate(self._public_key)}', " +
67 | f"private_key='{AtlasKey.obfuscate(self._private_key)}')")
68 |
--------------------------------------------------------------------------------
/atlascli/cli.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Joe Drumgoole
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Modifications copyright (C) Joe Drumgoole
15 |
16 | import argparse
17 | import sys
18 | # import pprint
19 | import logging
20 | import os
21 | from enum import Enum
22 |
23 | from atlasapi.atlas import Atlas
24 |
25 | from atlascli.listcommand import ListCommand, ListFormat
26 |
27 |
28 | class AtlasResource(Enum):
29 | ORGANIZATION = "organization"
30 | PROJECT = "project"
31 | CLUSTER = "cluster"
32 |
33 | def __str__(self):
34 | return self.value
35 |
36 |
37 | def main(args):
38 | """
39 |
40 | :param args: Expect sys.argv
41 | :return: None
42 | """
43 | parser = argparse.ArgumentParser(prog="atlascli",
44 | description="A command line interface to the MongoDB Atlas"
45 | "database as a service."
46 | "https://www.mongodb.com/cloud/atlas for more info"
47 | "See also https://docs.atlas.mongodb.com/configure-api-access"
48 | "/#programmatic-api-keys "
49 | "For how to obtain a programmatic API key required to access the API"
50 | )
51 |
52 | parser.add_argument("--publickey", help="MongoDB Atlas public API key")
53 | parser.add_argument("--privatekey", help="MongoDB Atlas private API key")
54 | parser.add_argument("--atlasgroup", help="Default group (aka project)")
55 |
56 | parser.add_argument("--format", type=ListFormat,
57 | choices=list(ListFormat),
58 | default=ListFormat.short,
59 | help="Format for output of list command [default: %(default)s]")
60 |
61 | parser.add_argument("--resource",
62 | type=AtlasResource, default=AtlasResource.CLUSTER,
63 | choices=list(AtlasResource),
64 | help="Which resource type are we operating on:"
65 | "organization, project or cluster? [default: %(default)s]")
66 |
67 | parser.add_argument('--id', type=str, help='Specify a resource id')
68 |
69 | parser.add_argument("--debug", default=False, action="store_true",
70 | help="Turn on logging at debug level [default: %(default)s]")
71 |
72 | parser.add_argument("--list", default=False, action="store_true",
73 | help="List a set of resources [default: %(default)s]")
74 |
75 | args = parser.parse_args(args)
76 |
77 | if args.debug:
78 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
79 | level=logging.DEBUG)
80 | else:
81 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
82 | level=logging.INFO)
83 |
84 | logging.debug("logging is on at DEBUG level")
85 |
86 | if args.publickey:
87 | public_key = args.publickey
88 | else:
89 | public_key = os.getenv("ATLAS_PUBLIC_KEY")
90 | if public_key is None:
91 | print("you must specify an ATLAS public key via --publickey arg "
92 | "or the environment variable ATLAS_PUBLIC_KEY")
93 | sys.exit(1)
94 |
95 | if args.privatekey:
96 | private_key = args.privatekey
97 | else:
98 | private_key = os.getenv("ATLAS_PRIVATE_KEY")
99 | if private_key is None:
100 | print("you must specify an an ATLAS private key via --privatekey"
101 | "arg or the environment variable ATLAS_PRIVATE_KEY")
102 | sys.exit(1)
103 |
104 | atlas = Atlas(public_key, private_key, args.atlasgroup)
105 |
106 | if args.list:
107 | if args.resource == AtlasResource.CLUSTER:
108 | list_cmd = ListCommand(args.format)
109 | if args.id:
110 | print("Cluster:")
111 | cluster = atlas.Clusters.get_single_cluster(cluster=args.id)
112 | list_cmd.list_one(cluster)
113 | else:
114 | print("Cluster list:")
115 | clusters = atlas.Clusters.get_all_clusters(iterable=True)
116 | total = list_cmd.list_all(clusters)
117 | print(f"{total} cluster(s)")
118 |
119 |
120 | if __name__ == "__main__":
121 | main(sys.argv[1:]) # strip off the program name
122 |
--------------------------------------------------------------------------------
/atlascli/listcommand.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Joe Drumgoole
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Modifications copyright (C) Joe Drumgoole
15 |
16 | from enum import Enum
17 | import pprint
18 |
19 |
20 | class ListFormat(Enum):
21 | short = "short"
22 | full = "full"
23 |
24 | def __str__(self):
25 | return self.value
26 |
27 |
28 | class Commands(Enum):
29 | List = "List"
30 |
31 | def __str__(self):
32 | return self.value
33 |
34 |
35 | class ListCommand:
36 | """
37 | Execute the list command
38 | """
39 |
40 | def __init__(self, format: ListFormat = ListFormat.short):
41 | self._format = format
42 |
43 | def list_one(self, resource):
44 | if self._format is ListFormat.short:
45 | print(resource["id"])
46 | else:
47 | pprint.pprint(resource)
48 |
49 | def list_all(self, iterator):
50 | counter = 0
51 | for counter, resource in enumerate(iterator):
52 | self.list_one(resource)
53 | return counter
54 |
--------------------------------------------------------------------------------
/bumpversion.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # Quick and Dirty check about parameters
6 | if [ $# -ne 1 ]; then
7 | cat << EOF
8 |
9 | Usage: $(basename $0) version
10 |
11 | version:
12 | the version number (eg: 1.0.0)
13 |
14 | EOF
15 | exit 1
16 | fi
17 |
18 | # Quick and Dirty check about the PWD
19 | if [ $(dirname $(readlink -f $0)) != $PWD ]; then
20 | cat << EOF
21 |
22 | Please run the script from is root directory:
23 |
24 | cd $(dirname $(readlink -f $0))
25 |
26 | EOF
27 | exit 1
28 | fi
29 |
30 | # Main
31 |
32 | VERSION=$1
33 |
34 | echo "Updating version..."
35 |
36 | # setup
37 | sed -i "s/version=.*/version='$VERSION',/" ./setup.py
38 | # docs
39 | sed -i "s/version = .*/version = '$VERSION'/" ./gendocs/conf.py
40 | sed -i "s/release = .*/release = '$VERSION'/" ./gendocs/conf.py
41 | # packaging
42 | sed -i "s/pkgver=.*/pkgver='$VERSION'/" ./packaging/archlinux/PKGBUILD
43 | sed -i "s/pkgrel=.*/pkgrel=1/" ./packaging/archlinux/PKGBUILD
44 |
45 | # generate docs
46 | echo "Generating docs..."
47 | cd gendocs
48 | make clean > /dev/null
49 | make html > /dev/null
50 | rsync -crv --delete --exclude=README.rst _build/html/ ../docs/ > /dev/null
51 | cd ..
52 |
53 | # git add
54 | echo "Preparing git commit..."
55 | git add .
56 |
57 | # python module for pypi
58 | echo "python module creation..."
59 | if [ -d "./dist/" ]; then
60 | rm -f ./dist/* > /dev/null
61 | fi
62 | python setup.py bdist_wheel > /dev/null
63 |
64 | # NOTICE
65 | cat << EOF
66 |
67 | Please check that everything is fine by running :"
68 |
69 | git diff HEAD
70 |
71 | Once checked, please run :
72 |
73 | git commit -m "Bump version: $VERSION"
74 | git push origin master
75 |
76 | git tag -m "$VERSION" $VERSION
77 | git push origin $VERSION
78 |
79 | # pypi
80 | twine upload dist/*
81 |
82 | EOF
83 |
--------------------------------------------------------------------------------
/doc_corrections.diff:
--------------------------------------------------------------------------------
1 | diff --git a/README.rst b/README.rst
2 | index 8ffb7ce..02acb8a 100644
3 | --- a/README.rst
4 | +++ b/README.rst
5 | @@ -151,13 +151,13 @@ Clusters
6 | print(cluster["name"])
7 |
8 | # Get a Single Cluster
9 | - details = a.Clusters.get_a_single_cluster("cluster-dev")
10 | + details = a.Clusters.get_single_cluster("cluster-dev")
11 |
12 | # Delete a Cluster (dry run, raise ErrConfirmationRequested)
13 | - details = a.Clusters.delete_a_cluster("cluster-dev")
14 | + details = a.Clusters.delete_cluster("cluster-dev")
15 |
16 | # Delete a Cluster (approved)
17 | - details = a.Clusters.delete_a_cluster("cluster-dev", areYouSure=True)
18 | + details = a.Clusters.delete_cluster("cluster-dev", areYouSure=True)
19 |
20 | # Create a Simple Replica Set Cluster
21 |
22 | @@ -173,14 +173,14 @@ Clusters
23 | providerSettings=provider_settings,
24 | replication_specs=replication_specs)
25 |
26 | - output = a.Clusters.create_a_cluster(cluster_config)
27 | + output = a.Clusters.create_cluster(cluster_config)
28 |
29 |
30 | # Modify a cluster
31 | - existing_config = a.Clusters.get_a_single_cluster_as_obj(cluster=TEST_CLUSTER_NAME)
32 | + existing_config = a.Clusters.get_single_cluster_as_obj(cluster=TEST_CLUSTER_NAME)
33 | out.providerSettings.instance_size_name = InstanceSizeName.M10
34 | out.disk_size_gb = 13
35 | - new_config = a.Clusters.modify_a_cluster('pyAtlasAPIClustersTest', out)
36 | + new_config = a.Clusters.modify_cluster('pyAtlasAPIClustersTest', out)
37 | pprint(new_config)
38 |
39 | # Modify cluster instance size
40 | diff --git a/atlasapi/atlas.py b/atlasapi/atlas.py
41 | index 591cbff..77c7686 100644
42 | --- a/atlasapi/atlas.py
43 | +++ b/atlasapi/atlas.py
44 | @@ -267,7 +267,7 @@ class Atlas:
45 | return self.atlas.network.delete(Settings.BASE_URL + uri)
46 | else:
47 | raise ErrConfirmationRequested(
48 | - "Please set areYouSure=True on delete_a_cluster call if you really want to delete [%s]" % cluster)
49 | + "Please set areYouSure=True on delete_cluster call if you really want to delete [%s]" % cluster)
50 |
51 | def modify_cluster(self, cluster: str, cluster_config: Union[ClusterConfig, dict]) -> dict:
52 | """Modify a Cluster
53 | diff --git a/tests/test_clusters.py b/tests/test_clusters.py
54 | index 93fb341..c0e3755 100644
55 | --- a/tests/test_clusters.py
56 | +++ b/tests/test_clusters.py
57 | @@ -73,13 +73,13 @@ class ClusterTests(BaseTests):
58 | sleep(20)
59 | print('-----------------------------------Done Sleeping -------------------------------------')
60 |
61 | - def test_06_delete_a_cluster(self):
62 | + def test_06_delete_cluster(self):
63 | myoutput = self.a.Clusters.delete_cluster(cluster=self.TEST_CLUSTER2_NAME_UNIQUE, areYouSure=True)
64 | print('Successfully Deleted {}, output was '.format(self.TEST_CLUSTER2_NAME_UNIQUE, myoutput))
65 |
66 | - test_06_delete_a_cluster.advanced = True
67 | + test_06_delete_cluster.advanced = True
68 |
69 | - def test_07_create_a_cluster(self):
70 | + def test_07_create_cluster(self):
71 | provider_settings: ProviderSettings = ProviderSettings()
72 | regions_config = RegionConfig()
73 | replication_specs = ReplicationSpecs(regions_config={provider_settings.region_name: regions_config.__dict__})
74 | @@ -90,7 +90,7 @@ class ClusterTests(BaseTests):
75 | output = self.a.Clusters.create_cluster(cluster_config)
76 | pprint(output)
77 |
78 | - test_07_create_a_cluster.advanced = True
79 | + test_07_create_cluster.advanced = True
80 |
81 | def test_08_resize_a_cluster(self):
82 | self.a.Clusters.modify_cluster_instance_size(cluster=self.TEST_CLUSTER3_NAME_UNIQUE,
83 |
--------------------------------------------------------------------------------
/gendocs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = atlasapi
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/gendocs/README.rst:
--------------------------------------------------------------------------------
1 | Docs with sphinx
2 | ----------------
3 |
4 | Local documentation
5 | ^^^^^^^^^^^^^^^^^^^
6 |
7 | .. code:: bash
8 |
9 | cd gendocs
10 | make clean
11 | make html
12 | python -m RangeHTTPServer
13 |
14 | Update docs
15 | ^^^^^^^^^^^
16 |
17 | .. code:: bash
18 |
19 | cd gendocs
20 | make clean
21 | make html
22 | rsync -crv --delete --exclude=README.rst _build/html/ ../docs/
23 |
24 |
--------------------------------------------------------------------------------
/gendocs/atlasapi-atlas-nested.rst:
--------------------------------------------------------------------------------
1 | Nested class for atlasapi\.atlas::Atlas
2 | =======================================
3 |
4 | Atlas._Clusters
5 | ---------------
6 |
7 | .. autoclass:: atlasapi.atlas::Atlas._Clusters
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 | Atlas._DatabaseUsers
13 | --------------------
14 |
15 | .. autoclass:: atlasapi.atlas::Atlas._DatabaseUsers
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
20 |
21 | Atlas._Alerts
22 | -------------
23 |
24 | .. autoclass:: atlasapi.atlas::Atlas._Alerts
25 | :members:
26 | :undoc-members:
27 | :show-inheritance:
28 |
29 |
30 | Atlas._MaintenanceWindows
31 | -------------------------
32 |
33 | .. autoclass:: atlasapi.atlas::Atlas._MaintenanceWindows
34 | :members:
35 | :undoc-members:
36 | :show-inheritance:
37 |
38 |
39 | Atlas._Hosts
40 | ------------
41 |
42 | .. autoclass:: atlasapi.atlas::Atlas._Hosts
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 |
48 | Atlas._Events
49 | -------------
50 |
51 | .. autoclass:: atlasapi.atlas::Atlas._Events
52 | :members:
53 | :undoc-members:
54 | :show-inheritance:
55 |
56 | Atlas._Whitelist
57 | ----------------
58 |
59 | .. autoclass:: atlasapi.atlas::Atlas._Whitelist
60 | :members:
61 | :undoc-members:
62 | :show-inheritance:
63 |
64 | Atlas._CloudBackups
65 | --------------------
66 |
67 | .. autoclass:: atlasapi.atlas::Atlas._CloudBackups
68 | :members:
69 | :undoc-members:
70 | :show-inheritance:
71 |
72 |
73 | Atlas._Projects
74 | ----------------
75 |
76 | .. autoclass:: atlasapi.atlas::Atlas._Projects
77 | :members:
78 | :undoc-members:
79 | :show-inheritance:
80 |
81 |
82 | Atlas._Organizations
83 | ----------------
84 |
85 | .. autoclass:: atlasapi.atlas::Atlas._Organizations
86 | :members:
87 | :undoc-members:
88 | :show-inheritance:
--------------------------------------------------------------------------------
/gendocs/atlasapi.rst:
--------------------------------------------------------------------------------
1 | atlasapi package
2 | =================
3 |
4 | atlasapi\.atlas module
5 | ----------------------
6 |
7 | .. automodule:: atlasapi.atlas
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 |
13 | atlasapi\.atlas_types module
14 | -----------------------------
15 |
16 | .. automodule:: atlasapi.atlas_types
17 | :members:
18 | :undoc-members:
19 | :show-inheritance:
20 |
21 | atlasapi\.alerts module
22 | ------------------------
23 |
24 | .. automodule:: atlasapi.alerts
25 | :members:
26 | :undoc-members:
27 | :show-inheritance:
28 |
29 | atlasapi\.clusters module
30 | -------------------------
31 |
32 | .. automodule:: atlasapi.clusters
33 | :members:
34 | :undoc-members:
35 | :show-inheritance:
36 |
37 | atlasapi\.events module
38 | -----------------------
39 |
40 | .. automodule:: atlasapi.events
41 | :members:
42 | :undoc-members:
43 | :show-inheritance:
44 |
45 | atlasapi\.measurements module
46 | ------------------------------
47 |
48 | .. automodule:: atlasapi.measurements
49 | :members:
50 | :undoc-members:
51 | :show-inheritance:
52 |
53 |
54 | atlasapi\.whitelist module
55 | ---------------------------
56 |
57 | .. automodule:: atlasapi.whitelist
58 | :members:
59 | :undoc-members:
60 | :show-inheritance:
61 |
62 | atlasapi\.errors module
63 | -----------------------
64 |
65 | .. automodule:: atlasapi.errors
66 | :members:
67 | :undoc-members:
68 | :show-inheritance:
69 |
70 | atlasapi\.network module
71 | ------------------------
72 |
73 | .. automodule:: atlasapi.network
74 | :members:
75 | :undoc-members:
76 | :show-inheritance:
77 |
78 | atlasapi\.settings module
79 | -------------------------
80 |
81 | .. automodule:: atlasapi.settings
82 | :members:
83 | :undoc-members:
84 | :show-inheritance:
85 |
86 | atlasapi\.specs module
87 | ----------------------
88 |
89 | .. automodule:: atlasapi.specs
90 | :members:
91 | :undoc-members:
92 | :show-inheritance:
93 |
94 | atlasapi\.maintenance_window module
95 | ------------------------------------
96 |
97 | .. automodule:: atlasapi.maintenance_window
98 | :members:
99 | :undoc-members:
100 | :show-inheritance:
101 |
102 | atlasapi\.cloud_backup module
103 | ------------------------------------
104 |
105 | .. automodule:: atlasapi.cloud_backup
106 | :members:
107 | :undoc-members:
108 | :show-inheritance:
109 |
110 | atlasapi\.lib module
111 | ---------------------
112 |
113 | .. automodule:: atlasapi.lib
114 | :members:
115 | :undoc-members:
116 | :show-inheritance:
117 |
118 | atlasapi\.projects module
119 | -------------------------
120 |
121 | .. automodule:: atlasapi.projects
122 | :members:
123 | :undoc-members:
124 | :show-inheritance:
125 |
126 | atlasapi\.organizations module
127 | ------------------------------
128 |
129 | .. automodule:: atlasapi.organizations
130 | :members:
131 | :undoc-members:
132 | :show-inheritance:
133 |
--------------------------------------------------------------------------------
/gendocs/atlascli.rst:
--------------------------------------------------------------------------------
1 | atlascli - A Command line program for MongoDB Atlas
2 | ====================================================
3 |
4 | The command line help for atlascli.py::
5 |
6 | $ python atlascli/cli.py -h
7 | usage: atlascli [-h] [--publickey PUBLICKEY] [--privatekey PRIVATEKEY]
8 | [--atlasgroup ATLASGROUP] [--format {short,full}]
9 | [--resource {organization,project,cluster}] [--id ID]
10 | [--debug] [--list]
11 |
12 | A command line interface too the MongoDB Atlasdatabase as a
13 | service.https://www.mongodb.com/cloud/atlas for more infoSee also
14 | https://docs.atlas.mongodb.com/configure-api-access/#programmatic-api-keysFor
15 | how to obtain a programmatic API key required to access the API
16 |
17 | optional arguments:
18 | -h, --help show this help message and exit
19 | --publickey PUBLICKEY
20 | MongoDB Atlas public API key
21 | --privatekey PRIVATEKEY
22 | MongoDB Atlas private API key
23 | --atlasgroup ATLASGROUP
24 | Default group (aka project)
25 | --format {short,full}
26 | Format for output of list command [default: short]
27 | --resource {organization,project,cluster}
28 | Which resource type are we operating on:organization,
29 | project or cluster? [default: cluster]
30 | --id ID Specify a resource id
31 | --debug Turn on logging at debug level [default: False]
32 | --list List a set of resources [default: False]
33 |
--------------------------------------------------------------------------------
/gendocs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | #
4 | # atlasapi documentation build configuration file, created by
5 | # sphinx-quickstart on Wed Jan 3 14:44:12 2018.
6 | #
7 | # This file is execfile()d with the current directory set to its
8 | # containing dir.
9 | #
10 | # Note that not all possible configuration values are present in this
11 | # autogenerated file.
12 | #
13 | # All configuration values have a default; values that are commented out
14 | # serve to show the default.
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #
20 | import os
21 | import sys
22 | import sphinx_rtd_theme
23 |
24 | sys.path.insert(0, os.path.abspath('../'))
25 |
26 | # -- General configuration ------------------------------------------------
27 |
28 | # If your documentation needs a minimal Sphinx version, state it here.
29 | #
30 | # needs_sphinx = '1.0'
31 |
32 | # Add any Sphinx extension module names here, as strings. They can be
33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 | # ones.
35 | extensions = ['sphinx.ext.autodoc',
36 | 'sphinx.ext.githubpages',
37 | "sphinx_rtd_theme",
38 | 'sphinx.ext.napoleon',
39 | 'sphinx.ext.autodoc',
40 | 'sphinx_autodoc_annotation',
41 | ]
42 |
43 | # Add any paths that contain templates here, relative to this directory.
44 | templates_path = ['_templates']
45 |
46 | # The suffix(es) of source filenames.
47 | # You can specify multiple suffix as a list of string:
48 | #
49 | # source_suffix = ['.rst', '.md']
50 | source_suffix = '.rst'
51 |
52 | # The master toctree document.
53 | master_doc = 'index'
54 |
55 | # General information about the project.
56 | project = 'atlasapi'
57 | copyright = '2022, Matthew G. Monteleone'
58 | author = 'Matthew G. Monteleone'
59 |
60 | # The version info for the project you're documenting, acts as replacement for
61 | # |version| and |release|, also used in various other places throughout the
62 | # built documents.
63 | #
64 | # The short X.Y version.
65 | version = '0.14.1'
66 | # The full version, including alpha/beta/rc tags.
67 | release = '0.14.1'
68 |
69 | # The language for content autogenerated by Sphinx. Refer to documentation
70 | # for a list of supported languages.
71 | #
72 | # This is also used if you do content translation via gettext catalogs.
73 | # Usually you set "language" from the command line for these cases.
74 | language = None
75 |
76 | # List of patterns, relative to source directory, that match files and
77 | # directories to ignore when looking for source files.
78 | # This patterns also effect to html_static_path and html_extra_path
79 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.rst']
80 |
81 | # The name of the Pygments (syntax highlighting) style to use.
82 | pygments_style = 'sphinx'
83 |
84 | # If true, `todo` and `todoList` produce output, else they produce nothing.
85 | todo_include_todos = False
86 |
87 | # -- Options for HTML output ----------------------------------------------
88 |
89 | # The theme to use for HTML and HTML Help pages. See the documentation for
90 | # a list of builtin themes.
91 | #
92 | html_theme = 'sphinx_rtd_theme'
93 |
94 | # Theme options are theme-specific and customize the look and feel of a theme
95 | # further. For a list of options available for each theme, see the
96 | # documentation.
97 | #
98 | # html_theme_options = {}
99 |
100 | # Add any paths that contain custom static files (such as style sheets) here,
101 | # relative to this directory. They are copied after the builtin static files,
102 | # so a file named "default.css" will overwrite the builtin "default.css".
103 | html_static_path = ['_static']
104 |
105 | # Custom sidebar templates, must be a dictionary that maps document names
106 | # to template names.
107 | #
108 | # This is required for the alabaster theme
109 | # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
110 | html_sidebars = {
111 | '**': [
112 | 'relations.html', # needs 'show_related': True theme option to display
113 | 'searchbox.html',
114 | ]
115 | }
116 |
117 | # -- Options for HTMLHelp output ------------------------------------------
118 |
119 | # Output file base name for HTML help builder.
120 | htmlhelp_basename = 'atlasapidoc'
121 |
122 | # -- Options for LaTeX output ---------------------------------------------
123 |
124 | latex_elements = {
125 | # The paper size ('letterpaper' or 'a4paper').
126 | #
127 | # 'papersize': 'letterpaper',
128 |
129 | # The font size ('10pt', '11pt' or '12pt').
130 | #
131 | # 'pointsize': '10pt',
132 |
133 | # Additional stuff for the LaTeX preamble.
134 | #
135 | # 'preamble': '',
136 |
137 | # Latex figure (float) alignment
138 | #
139 | # 'figure_align': 'htbp',
140 | }
141 |
142 | # Grouping the document tree into LaTeX files. List of tuples
143 | # (source start file, target name, title,
144 | # author, documentclass [howto, manual, or own class]).
145 | latex_documents = [
146 | (master_doc, 'atlasapi.tex', 'atlasapi Documentation',
147 | 'Matthew G. Monteleone', 'manual'),
148 | ]
149 |
150 | # -- Options for manual page output ---------------------------------------
151 |
152 | # One entry per manual page. List of tuples
153 | # (source start file, name, description, authors, manual section).
154 | man_pages = [
155 | (master_doc, 'atlasapi', 'atlasapi Documentation',
156 | [author], 1)
157 | ]
158 |
159 | # -- Options for Texinfo output -------------------------------------------
160 |
161 | # Grouping the document tree into Texinfo files. List of tuples
162 | # (source start file, target name, title, author,
163 | # dir menu entry, description, category)
164 | texinfo_documents = [
165 | (master_doc, 'atlasapi', 'atlasapi Documentation',
166 | author, 'atlasapi', 'One line description of project.',
167 | 'Miscellaneous'),
168 | ]
169 |
--------------------------------------------------------------------------------
/gendocs/index.rst:
--------------------------------------------------------------------------------
1 | .. atlasapi documentation master file, created by
2 | sphinx-quickstart on Wed Jan 3 14:44:12 2018.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to atlasapi's documentation!
7 | ====================================
8 |
9 | Python Bindings for the Atlas Public API
10 |
11 | .. image:: https://readthedocs.org/projects/python-atlasapi/badge/?version=latest
12 | :target: https://python-atlasapi.readthedocs.io/en/latest/?badge=latest
13 |
14 | .. toctree::
15 | :maxdepth: 2
16 | :caption: Contents:
17 |
18 | atlasapi.rst
19 | atlasapi-atlas-nested.rst
20 | atlascli.rst
21 |
22 | Indices and tables
23 | ==================
24 |
25 | * :ref:`genindex`
26 | * :ref:`modindex`
27 | * :ref:`search`
28 |
--------------------------------------------------------------------------------
/gendocs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx-autodoc-annotation
2 | sphinx_rtd_theme
3 | pygments>=2.7.4 # not directly required, pinned by Snyk to avoid a vulnerability
4 | sphinx>=3.0.4 # not directly required, pinned by Snyk to avoid a vulnerability
5 | setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | python-dateutil
3 | isodate
4 | future
5 | pytz
6 | coolname
7 | nose
8 | awspublicranges
9 | humanfriendly
10 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from setuptools import find_packages, setup
3 |
4 | setup(
5 | name='atlasapi',
6 | version='2.0.11',
7 | python_requires='>=3.7',
8 | packages=find_packages(exclude=("tests",)),
9 | install_requires=['requests', 'python-dateutil', 'isodate', 'future', 'pytz', 'coolname', 'humanfriendly', 'nose'],
10 | setup_requires=['wheel'],
11 | # Metadata
12 | author="Matthew G. Monteleone",
13 | author_email="mgm@mgm.dev",
14 | license="Apache License 2.0",
15 | description="Expose MongoDB Atlas Cloud provider APIs",
16 | long_description=open('README.rst').read(),
17 | url="https://github.com/mgmonteleone/python-atlasapi",
18 | keywords=["atlas", "mongo", "mongodb", "cloud", "api"],
19 | classifiers=[
20 | # How mature is this project? Common values are
21 | # 3 - Alpha
22 | # 4 - Beta
23 | # 5 - Production/Stable
24 | 'Development Status :: 5 - Production/Stable',
25 |
26 | # Indicate who your project is intended for
27 | 'Intended Audience :: Developers',
28 |
29 | # Pick your license as you wish (should match "license" above)
30 | 'License :: OSI Approved :: Apache Software License',
31 |
32 | 'Operating System :: OS Independent',
33 |
34 | # Specify the Python versions you support here. In particular, ensure
35 | # that you indicate whether you support Python 2, Python 3 or both.
36 | 'Programming Language :: Python :: 3 :: Only',
37 | 'Programming Language :: Python :: 3.7',
38 | 'Programming Language :: Python :: 3.8',
39 | 'Programming Language :: Python :: 3.9',
40 | ],
41 | entry_points={
42 | 'console_scripts': [
43 | 'atlascli=atlascli.cli:main',
44 | ]
45 | },
46 | extras_require={}
47 |
48 | )
49 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from os import getenv, environ
3 |
4 | try:
5 | from atlasapi.atlas import Atlas
6 | from atlasapi.clusters import ClusterStates
7 | except NameError:
8 | from atlas import Atlas
9 | from clusters import ClusterStates
10 | from datetime import datetime
11 | import coolname
12 | from time import sleep, time
13 | import humanfriendly
14 |
15 | TEST_CLUSTER_NAME = getenv('TEST_CLUSTER_NAME', 'pyAtlasTestCluster')
16 | TEST_CLUSTER2_NAME = getenv('TEST_CLUSTER2_NAME', 'pyAtlas-')
17 |
18 | test_run_id = coolname.generate_slug(2)
19 |
20 | TEST_CLUSTER_NAME_UNIQUE = TEST_CLUSTER_NAME + test_run_id
21 | TEST_CLUSTER2_NAME_UNIQUE = TEST_CLUSTER2_NAME + test_run_id
22 | TEST_CLUSTER3_NAME_UNIQUE = TEST_CLUSTER2_NAME + coolname.generate_slug(2)
23 | CLUSTER_CREATE_WAIT_SECONDS = 60 * 10
24 | TEST_SERVERLESS_NAME = f"serverless{test_run_id}"
25 | ephemeral_test_clusters = [TEST_CLUSTER_NAME_UNIQUE, TEST_CLUSTER2_NAME_UNIQUE, TEST_CLUSTER3_NAME_UNIQUE]
26 |
27 |
28 | class BaseTests(unittest.TestCase):
29 | def wait_for_cluster_state(self, cluster_name: str, states_desired: list = None, states_to_wait: list = None):
30 | if not states_to_wait:
31 | states_to_wait = [ClusterStates.CREATING, ClusterStates.UPDATING, ClusterStates.REPAIRING]
32 | if not states_desired:
33 | states_desired = [ClusterStates.IDLE]
34 | t_end = time() + self.CLUSTER_CREATE_WAIT_SECONDS
35 | seconds_elapsed = 0
36 | print(f"The states to wait for are: {states_to_wait}")
37 | print(f"The states desired are: {states_desired}")
38 | while time() < t_end:
39 | cluster_state = ClusterStates[self.a.Clusters.get_single_cluster(cluster=cluster_name).get('stateName')]
40 | print(f"⏲⏲ THe Cluster State is: {cluster_state}")
41 | if cluster_state in states_to_wait:
42 | print(
43 | f"⏳The cluster {cluster_name} is still creating (state= {cluster_state.value}), "
44 | f"will wait 15 seconds before polling again. {humanfriendly.format_timespan(seconds_elapsed)} "
45 | f"elapsed of {humanfriendly.format_timespan(self.CLUSTER_CREATE_WAIT_SECONDS)}")
46 | seconds_elapsed += 15
47 | sleep(15)
48 | elif cluster_state in states_desired:
49 | print(f"✅The cluster {self.TEST_CLUSTER3_NAME_UNIQUE} is now in {cluster_state} state!! It took "
50 | f"{humanfriendly.format_timespan(seconds_elapsed)}")
51 | break
52 | print(f"🔎🔎 The cluster {cluster_name} is in {self.a.Clusters.get_single_cluster(cluster=cluster_name)} state.")
53 | if ClusterStates[self.a.Clusters.get_single_cluster(cluster=cluster_name).get('stateName')] not in states_desired:
54 | msg = f"🙅🏽The cluster {cluster_name} did not get to {states_to_wait} state within the timeout of" \
55 | f" {self.CLUSTER_CREATE_WAIT_SECONDS} 🆘 Makes sure you manually clean up the cluster if needed!!."
56 | print(msg)
57 | raise TimeoutError(msg)
58 |
59 | def setUp(self):
60 | self.USER = getenv('ATLAS_USER', None)
61 | self.API_KEY = getenv('ATLAS_KEY', None)
62 | self.GROUP_ID = getenv('ATLAS_GROUP', None)
63 | self.OTHER_GROUP_ID = getenv('ATLAS_OTHER_GROUP', None)
64 | self.OTHER_USER = getenv('ATLAS_OTHER_USER', None)
65 | self.OTHER_API_KEY = getenv('ATLAS_OTHER_KEY', None)
66 |
67 | # print("env var is".format(getenv('ATLAS_USER', None)))
68 |
69 | self.GROUP_OWNER_USER = getenv('ATLAS_ORG_USER', None)
70 | self.GROUP_OWNER_KEY = getenv('ATLAS_ORG_KEY', None)
71 |
72 | self.TEST_CLUSTER_NAME = TEST_CLUSTER_NAME
73 | self.TEST_CLUSTER2_NAME = TEST_CLUSTER2_NAME
74 |
75 | self.TEST_CLUSTER_NAME_UNIQUE = TEST_CLUSTER2_NAME_UNIQUE
76 | self.TEST_CLUSTER2_NAME_UNIQUE = TEST_CLUSTER2_NAME_UNIQUE
77 | self.TEST_CLUSTER3_NAME_UNIQUE = TEST_CLUSTER3_NAME_UNIQUE
78 | self.TEST_SERVERLESS_NAME = TEST_SERVERLESS_NAME
79 |
80 | if not self.USER or not self.API_KEY or not self.GROUP_ID:
81 | raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables'
82 | 'your env variables are {}'.format(environ.__str__()))
83 | self.a = Atlas(self.USER, self.API_KEY, self.GROUP_ID)
84 | self.a_other = Atlas(self.OTHER_USER, self.OTHER_API_KEY, self.OTHER_GROUP_ID)
85 | self.a_owner = Atlas(self.GROUP_OWNER_USER, self.GROUP_OWNER_KEY)
86 |
87 | self.CLUSTER_CREATE_WAIT_SECONDS = CLUSTER_CREATE_WAIT_SECONDS
88 |
89 | # make sure test cluster is unpaused
90 | print(f"🚀🚀🚀 Pre Test Checks........")
91 | try:
92 | print(f"🚀Making sure the {TEST_CLUSTER_NAME} is not paused. . .")
93 | self.wait_for_cluster_state(TEST_CLUSTER_NAME, states_desired=[ClusterStates.IDLE, ClusterStates.UPDATING],
94 | states_to_wait=[ClusterStates.CREATING, ClusterStates.REPAIRING, ClusterStates])
95 | except Exception as e:
96 | raise e
97 |
98 | # executed after each test
99 |
100 | def clean_up_cluster(self, cluster_name):
101 | if self.a.Clusters.is_existing_cluster(cluster_name):
102 | if self.a.Clusters.get_single_cluster(cluster_name).state_name not in \
103 | [ClusterStates.DELETED, ClusterStates.DELETING]:
104 | print(f"🧹👀{cluster_name} found, and needs to be cleaned up.")
105 | self.a.Clusters.delete_cluster(cluster_name, areYouSure=True)
106 | self.wait_for_cluster_state(cluster_name, [ClusterStates.DELETED, ClusterStates.DELETING])
107 | print(f"🧹␡Successfully deleted {cluster_name}")
108 | else:
109 | print((f"🧹👀👍Found {cluster_name}, but its was in deleting state, so passing."))
110 | else:
111 | print(f"🧹👍No need to clean up {cluster_name}")
112 |
113 | def tearDown(self):
114 | print(f"✅✅✅✅✅✅✅✅✅✅ Tests Completed, entering tear down stage. ✅✅✅✅✅✅✅✅✅✅")
115 | print(f"🧹Cleaning Up, Ensuring {ephemeral_test_clusters} are not present, or is deleted/deleting.")
116 |
117 | for each_test_cluster in ephemeral_test_clusters:
118 | self.clean_up_cluster(each_test_cluster)
119 |
--------------------------------------------------------------------------------
/tests/alerts_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Stupid and simple smoke tests.
3 |
4 | Uses ENV vars to store user, key and group.
5 |
6 | TODO: Create real tests
7 |
8 |
9 | """
10 |
11 | from atlasapi.atlas import Atlas
12 | from pprint import pprint
13 | from os import environ, getenv
14 | from atlasapi.atlas import Atlas
15 | from json import dumps
16 | from atlasapi.specs import AlertStatusSpec
17 |
18 | from atlasapi.specs import DatabaseUsersPermissionsSpecs, RoleSpecs, DatabaseUsersUpdatePermissionsSpecs
19 |
20 | USER = getenv('ATLAS_USER', None)
21 | API_KEY = getenv('ATLAS_KEY', None)
22 | GROUP_ID = getenv('ATLAS_GROUP', None)
23 |
24 | if not USER or not API_KEY or not GROUP_ID:
25 | raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables'
26 | 'your env variables are {}'.format(environ.__str__()))
27 |
28 | a = Atlas(USER, API_KEY, GROUP_ID)
29 |
30 |
31 | print('----------Test Get all closed alerts ------------------')
32 |
33 | alert_list = []
34 | for alert in a.Alerts.get_all_alerts(AlertStatusSpec.CLOSED, iterable=True):
35 | print(alert["id"])
36 | alert_list.append(alert["id"])
37 |
38 | print('----------Test Get an alert ------------------')
39 |
40 |
41 | details = a.Alerts.get_an_alert(alert_list[1])
42 | pprint(details.__dict__)
43 |
44 |
45 | print('----unack an alert-----')
46 |
47 | details = a.Alerts.unacknowledge_an_alert(alert_list[1])
48 |
49 | pprint(details)
50 |
--------------------------------------------------------------------------------
/tests/atlascll_test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import os
3 | import sys
4 |
5 | from atlascli.cli import main
6 |
7 |
8 | class AtlascliTest(unittest.TestCase):
9 |
10 | def test_atlascli(self):
11 | atlas_public_key = os.getenv("ATLAS_PUBLIC_KEY")
12 | self.assertTrue(atlas_public_key)
13 | atlas_private_key = os.getenv("ATLAS_PRIVATE_KEY")
14 | self.assertTrue(atlas_private_key)
15 | atlas_group = os.getenv("ATLAS_GROUP")
16 | self.assertTrue(atlas_group)
17 |
18 | main(["--publickey", atlas_public_key,
19 | "--privatekey", atlas_private_key,
20 | "--atlasgroup", atlas_group,
21 | "--list",
22 | ])
23 |
24 | main(["--publickey", atlas_public_key,
25 | "--privatekey", atlas_private_key,
26 | "--atlasgroup", atlas_group,
27 | "--list",
28 | "--format", "full",
29 | ])
30 |
31 | main(["--publickey", atlas_public_key,
32 | "--privatekey", atlas_private_key,
33 | "--atlasgroup", atlas_group,
34 | "--list",
35 | "--format", "short",
36 | ])
37 | if __name__ == '__main__':
38 | unittest.main()
39 |
--------------------------------------------------------------------------------
/tests/database_users_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Stupid and simple smoke tests.
3 |
4 | Uses ENV vars to store user, key and group.
5 |
6 | TODO: Create real tests
7 |
8 |
9 | """
10 |
11 | from atlasapi.atlas import Atlas
12 | from pprint import pprint
13 | from os import environ, getenv
14 | from atlasapi.atlas import Atlas
15 | from json import dumps
16 | from atlasapi.errors import ErrAtlasNotFound
17 |
18 | from atlasapi.specs import DatabaseUsersPermissionsSpecs, RoleSpecs, DatabaseUsersUpdatePermissionsSpecs
19 |
20 | USER = getenv('ATLAS_USER', None)
21 | API_KEY = getenv('ATLAS_KEY', None)
22 | GROUP_ID = getenv('ATLAS_GROUP', None)
23 |
24 | if not USER or not API_KEY or not GROUP_ID:
25 | raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables'
26 | 'your env variables are {}'.format(environ.__str__()))
27 |
28 | a = Atlas(USER, API_KEY, GROUP_ID)
29 |
30 |
31 | print('----------Test Get all Users ------------------')
32 | details = a.DatabaseUsers.get_all_database_users(pageNum=1, itemsPerPage=100)
33 | pprint(details)
34 |
35 | print('----------Test Get all Users (iterable) ------------------')
36 |
37 | for user in a.DatabaseUsers.get_all_database_users(iterable=True):
38 | print(user["username"])
39 |
40 | print('----------Test Adding a User------------------')
41 |
42 | p = DatabaseUsersPermissionsSpecs("testuser3", "not_a_password")
43 | p.add_roles("test-db",
44 | [RoleSpecs.dbAdmin,
45 | RoleSpecs.readWrite])
46 |
47 | p.add_role("other-test-db", RoleSpecs.readWrite, "a_collection")
48 |
49 | details = a.DatabaseUsers.create_a_database_user(p)
50 |
51 | pprint(details)
52 |
53 | #print('---------Modify a User---------------')
54 | #
55 | ## Update roles and password
56 | #p = DatabaseUsersUpdatePermissionsSpecs("new_password")
57 | #p.add_role("test-db", RoleSpecs.read, "b_collection")
58 | #
59 | #details = a.DatabaseUsers.update_a_database_user("testuser", p)
60 | #
61 | #pprint(details)
62 | #
63 | #
64 | print('----------Delete A Database User -----------------')
65 |
66 | try:
67 | details = a.DatabaseUsers.delete_a_database_user("testuser3")
68 | pprint(details)
69 | except ErrAtlasNotFound as e:
70 | pprint('The user was not found {}'.format(e))
71 |
72 |
--------------------------------------------------------------------------------
/tests/monitoring_logs.py:
--------------------------------------------------------------------------------
1 | """
2 | Stupid and simple smoke tests.
3 |
4 | Uses ENV vars to store user, key and group.
5 |
6 | TODO: Create real tests
7 |
8 |
9 | """
10 |
11 | from atlasapi.atlas import Atlas
12 | from pprint import pprint
13 | from os import environ, getenv
14 |
15 | from atlasapi.specs import ListOfHosts, Host
16 |
17 | USER = getenv('ATLAS_USER', None)
18 | API_KEY = getenv('ATLAS_KEY', None)
19 | GROUP_ID = getenv('ATLAS_GROUP', None)
20 | from atlasapi.lib import AtlasPeriods, AtlasUnits, AtlasGranularities
21 | from atlasapi.measurements import AtlasMeasurementTypes
22 | import csv
23 | if not USER or not API_KEY or not GROUP_ID:
24 | raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables'
25 | 'your env variables are {}'.format(environ.__str__()))
26 |
27 | a = Atlas(USER, API_KEY, GROUP_ID)
28 |
29 | # Low level Api
30 | # details = a.Hosts._get_all_hosts(pageNum=1, itemsPerPage=100)
31 | # pprint(details)
32 | # print('-----------------Now as iterable ------------------')
33 | # Iterable
34 | # for a_host in a.Hosts.host_names:
35 | # print(a_host)
36 |
37 | pprint('----------MeasureMents')
38 |
39 | # a.Hosts._get_measurement_for_host(a.Hosts.host_list[0]
40 | # ,measurement=AtlasMeasurementTypes.Memory.virtual,iterable=True
41 | # ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE)
42 | #
43 | # a.Hosts._get_measurement_for_host(a.Hosts.host_list[0]
44 | # ,measurement=AtlasMeasurementTypes.Memory.resident,iterable=True
45 | # ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE)
46 | #
47 | # a.Hosts._get_measurement_for_host(a.Hosts.host_list[1]
48 | # ,measurement=AtlasMeasurementTypes.Memory.virtual,iterable=True
49 | # ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE)
50 | #
51 | # a.Hosts._get_measurement_for_host(a.Hosts.host_list[0]
52 | # ,measurement=AtlasMeasurementTypes.Memory.virtual,iterable=True
53 | # ,period=AtlasPeriods.HOURS_24,granularity=AtlasGranularities.MINUTE)
54 | #
55 | #
56 | # print(len(a.Hosts.host_list))
57 | #
58 | # for each in a.Hosts.host_list:
59 | # print('Hostname: {} - Measurements: {}'.format(each.hostname, each.measurements))
60 | # pprint('------------Test list of clusters-----------------')
61 | #
62 | # cluster_list = a.Hosts.cluster_list
63 |
64 | # for cluster in cluster_list:
65 | # print('Cluster name {}'.format(cluster))
66 |
67 |
68 | pprint('------------Test get hosts by cluster-----------------')
69 |
70 | # hosts = a.Hosts.host_list_by_cluster('monitoringtest')
71 |
72 | print('-----------Test get metrics for a clusters hosts---------------')
73 | a.Hosts.fill_host_list(for_cluster='monitoringtest')
74 |
75 | #a.Hosts.get_measurement_for_hosts(measurement=AtlasMeasurementTypes.Network.bytes_out
76 | # , period=AtlasPeriods.HOURS_1, granularity=AtlasGranularities.FIVE_MINUTE)
77 |
78 | for hostObj in a.Hosts.host_list:
79 | hostObj.get_measurement_for_host(measurement=AtlasMeasurementTypes.Network.bytes_out,
80 | period=AtlasPeriods.HOURS_1, granularity=AtlasGranularities.FIVE_MINUTE)
81 |
82 |
83 |
84 | the = list()
85 |
86 | #for host in a.Hosts.host_list:
87 | # hostname = host.hostname
88 | # for each_measurement in host.measurements:
89 | # name = each_measurement.name
90 | # for each_point in each_measurement._measurements:
91 | # timestamp = each_point.timestamp
92 | # value = each_point.value
93 | # the.append(dict(hostname=hostname,metric=name,value=value,timestamp=timestamp))
94 | # pprint(dict(hostname=hostname,metric=name,value=value,timestamp=timestamp))
95 | #
96 | #with open('names.csv', 'w', newline='') as csvfile:
97 | # fieldnames = ['hostname', 'metric', 'value', 'timestamp']
98 | # writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
99 | # writer.writeheader()
100 | # for item in the:
101 | # writer.writerow(item)
--------------------------------------------------------------------------------
/tests/test_api_keys.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from tests import BaseTests
3 | from atlasapi.atlas import Atlas
4 | from pprint import pprint
5 | from atlasapi.api_keys import ApiKey
6 | from typing import Generator
7 | KEY_ID = '5dd4d35fc56c98f31d6c454f'
8 |
9 | class Test_API_keys(BaseTests):
10 | def test_00_raw_key_get(self):
11 | out = self.a.ApiKeys._get_api_keys()
12 | self.assertIsInstance(out,list)
13 |
14 | def test_01_keys_get(self):
15 | out = self.a.ApiKeys.all_keys
16 | pprint(type(out))
17 | self.assertIsInstance(out,Generator)
18 | for each_key in out:
19 | pprint(each_key.__dict__)
20 |
21 | def test_02_raw_key_get_one(self):
22 | out = self.a.ApiKeys._get_api_key(KEY_ID)
23 | self.assertIsInstance(out,dict)
24 |
25 | def test_03_key_get_one(self):
26 | out = self.a.ApiKeys.get_single_key(KEY_ID)
27 | self.assertTrue(out)
28 |
29 | def test_04_raw_get_whitelists(self):
30 | out = self.a.ApiKeys._get_whitelist_entry_for_key(key_id=KEY_ID)
31 | pprint(out)
32 |
33 | if __name__ == '__main__':
34 | unittest.main()
35 |
--------------------------------------------------------------------------------
/tests/test_cloudbackup.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for Cloud Backup
3 |
4 |
5 | """
6 | from pprint import pprint
7 | from os import environ, getenv
8 | from atlasapi.atlas import Atlas
9 | from atlasapi.cloud_backup import CloudBackupSnapshot, DeliveryType, SnapshotRestoreResponse
10 | from json import dumps
11 | from tests import BaseTests
12 | import logging
13 | from time import sleep
14 | from typing import List
15 |
16 | logger = logging.getLogger('test')
17 |
18 |
19 | class CloudBackupTests(BaseTests):
20 |
21 | def test_00_test_get_for_cluster(self):
22 | cluster_name = 'pyAtlasTestCluster'
23 | snapshots: List[CloudBackupSnapshot] = self.a.CloudBackups.get_backup_snapshots_for_cluster(
24 | cluster_name=cluster_name)
25 | count = 0
26 | for each in snapshots:
27 | count += 1
28 | # pprint(each)
29 |
30 | self.assertEquals(type(each), CloudBackupSnapshot)
31 | print(f'The number of cloudbackup snapshots returned = {count}')
32 | self.assertGreaterEqual(count, 1)
33 |
34 | test_00_test_get_for_cluster.basic = True
35 |
36 | def test_01_test_get_for_snapshot(self):
37 | cluster_name = 'pyAtlasTestCluster'
38 | snapshots: List[CloudBackupSnapshot] = self.a.CloudBackups.get_backup_snapshots_for_cluster(
39 | cluster_name=cluster_name)
40 |
41 | snapshot_id = list(snapshots)[0].id
42 | print(f'The tested snapshot_id is {snapshot_id}')
43 | snapshot = self.a.CloudBackups.get_backup_snapshot_for_cluster(cluster_name=cluster_name,
44 | snapshot_id=snapshot_id)
45 | count = 0
46 | for each in snapshot:
47 | # pprint(each)
48 | self.assertEquals(type(each), CloudBackupSnapshot)
49 |
50 | test_01_test_get_for_snapshot.basic = True
51 |
52 | def test_02_create_snapshot(self):
53 | cluster_name = 'pyAtlasTestCluster'
54 | response_obj = self.a.CloudBackups.create_snapshot_for_cluster(cluster_name=cluster_name,
55 | retention_days=1, description="Test 01",
56 | as_obj=True)
57 | self.assertEquals(type(response_obj), CloudBackupSnapshot)
58 | pprint('New Snapshot created!!')
59 | # pprint(response_obj)
60 |
61 | test_02_create_snapshot.basic = False
62 |
63 | def test_03_restore_snapshot_to_atlas(self):
64 | source_cluster_name = 'pyAtlasTestCluster'
65 | target_cluster_name = 'pyAtlasTestRestore'
66 | snapshot_id = '6104a8c6c1b4ef7788b5d8f0'
67 | response_obj = self.a.CloudBackups.request_snapshot_restore(source_cluster_name=source_cluster_name,
68 | snapshot_id=snapshot_id,
69 | target_cluster_name=target_cluster_name,
70 | delivery_type=DeliveryType.automated)
71 | # pprint(response_obj.__dict__)
72 | self.assertEquals(type(response_obj), SnapshotRestoreResponse)
73 |
74 | test_03_restore_snapshot_to_atlas.basic = False
75 |
76 | def test_04_restore_snapshot_to_atlas_bad_snapshot_id(self):
77 | source_cluster_name = 'pyAtlasTestCluster'
78 | target_cluster_name = 'pyAtlasTestRestore'
79 | snapshot_id = '6104a8c6c1b4ef7788b5d8f0-322'
80 | with self.assertRaises(ValueError) as ex:
81 | response_obj = self.a.CloudBackups.request_snapshot_restore(source_cluster_name=source_cluster_name,
82 | snapshot_id=snapshot_id,
83 | target_cluster_name=target_cluster_name,
84 | delivery_type=DeliveryType.automated)
85 | pprint(response_obj)
86 |
87 | test_04_restore_snapshot_to_atlas_bad_snapshot_id.basic = True
88 |
89 | def test_05_restore_snapshot_to_atlas_bad_dest_cluster(self):
90 | source_cluster_name = 'pyAtlasTestCluster'
91 | target_cluster_name = 'restoreTest-222'
92 | snapshot_id = '6104a8c6c1b4ef7788b5d8f0'
93 | with self.assertRaises(ValueError) as ex:
94 | response_obj = self.a.CloudBackups.request_snapshot_restore(source_cluster_name=source_cluster_name,
95 | snapshot_id=snapshot_id,
96 | target_cluster_name=target_cluster_name,
97 | delivery_type=DeliveryType.automated)
98 |
99 | test_05_restore_snapshot_to_atlas_bad_dest_cluster.basic = True
100 |
101 | def test_06_restore_snapshot_to_atlas_bad_same_cluster(self):
102 | source_cluster_name = 'pyAtlasTestCluster'
103 | target_cluster_name = 'pyAtlasTestCluster'
104 | snapshot_id = '6104a8c6c1b4ef7788b5d8f0'
105 | with self.assertRaises(ValueError) as ex:
106 | response_obj = self.a.CloudBackups.request_snapshot_restore(source_cluster_name=source_cluster_name,
107 | snapshot_id=snapshot_id,
108 | target_cluster_name=target_cluster_name,
109 | delivery_type=DeliveryType.automated)
110 |
111 | test_06_restore_snapshot_to_atlas_bad_same_cluster.basic = True
112 |
113 | def test_07_get_restore_job_for_cluster(self):
114 | cluster_name = 'pyAtlasTestCluster'
115 | restores: List[SnapshotRestoreResponse] = self.a.CloudBackups.get_snapshot_restore_requests(
116 | cluster_name=cluster_name)
117 | count = 0
118 | for each in restores:
119 | count += 1
120 | pprint(each)
121 |
122 | self.assertEquals(type(each), SnapshotRestoreResponse)
123 | print(f'The number of snapshots restore jobs returned = {count}')
124 | self.assertGreaterEqual(count, 1)
125 |
126 | test_07_get_restore_job_for_cluster.basic = False
127 |
128 | def test_08_get_one_restore_job(self):
129 | cluster_name = 'pyAtlasTestCluster'
130 | restores: List[SnapshotRestoreResponse] = self.a.CloudBackups.get_snapshot_restore_requests(
131 | cluster_name=cluster_name)
132 | count = 0
133 | restore_id = list(restores)[0].restore_id
134 |
135 | print(f'The restore_id to be tested is {restore_id}')
136 |
137 | restore_jobs = self.a.CloudBackups.get_snapshot_restore_requests(cluster_name=cluster_name,
138 | restore_id=restore_id)
139 |
140 | restore_job = list(restore_jobs)[0]
141 |
142 | self.assertEquals(type(restore_job), SnapshotRestoreResponse)
143 |
144 | test_08_get_one_restore_job.basic = False
145 |
146 | def test_09_is_valid_snapshot_false(self):
147 | cluster_name = 'pyAtlasTestCluster'
148 | response = self.a.CloudBackups.is_existing_snapshot(cluster_name=cluster_name, snapshot_id='sdasdasd')
149 | self.assertEquals(response, False)
150 |
151 | test_09_is_valid_snapshot_false.basic = True
152 |
153 | def test_10_is_valid_snapshot_true(self):
154 | cluster_name = 'pyAtlasTestCluster'
155 | snapshots: List[CloudBackupSnapshot] = self.a.CloudBackups.get_backup_snapshots_for_cluster(
156 | cluster_name=cluster_name)
157 |
158 | snapshot_id = list(snapshots)[0].id
159 | print(f'The tested snapshot_id is {snapshot_id}')
160 | response = self.a.CloudBackups.is_existing_snapshot(cluster_name=cluster_name, snapshot_id=snapshot_id)
161 |
162 | self.assertEquals(response, True)
163 |
164 | test_10_is_valid_snapshot_true.basic = True
165 |
166 | def test_11_restore_snapshot_to_atlas_other_proj(self):
167 | source_cluster_name = 'pyAtlasTestCluster'
168 | target_cluster_name = 'pyAtlasTestRestore'
169 | snapshot_id = '6237a59f0942760c753d6df9'
170 |
171 | pprint(self.a_other.group)
172 | response_obj = self.a.CloudBackups.request_snapshot_restore_to_group(source_cluster_name=source_cluster_name,
173 | snapshot_id=snapshot_id,
174 | target_cluster_name=target_cluster_name,
175 | target_group_obj=self.a_other,
176 | delivery_type=DeliveryType.automated)
177 | self.assertEquals(type(response_obj), SnapshotRestoreResponse)
178 |
179 | test_11_restore_snapshot_to_atlas_other_proj.basic = False
180 |
181 | # def test_12_cancel_valid_restore_job(self):
182 | # source_cluster_name = 'pyAtlasTestCluster'
183 | # target_cluster_name = 'pyAtlasTestRestore'
184 | # snapshot_id = '619d5e979977cf1a6a9adfbf'
185 | # response_obj = self.a.CloudBackups.request_snapshot_restore(source_cluster_name=source_cluster_name,
186 | # snapshot_id=snapshot_id,
187 | # target_cluster_name=target_cluster_name,
188 | # delivery_type=DeliveryType.automated)
189 | #
190 | # print(f"The restore_id of this test restore is {response_obj.restore_id}")
191 | # print(f"The canceled status is ({response_obj.cancelled})")
192 | # print(f"The completed date is {response_obj.finished_at}")
193 | #
194 | # print("Now lets cancel this puppy")
195 | # out = self.a.CloudBackups.cancel_snapshot_restore_request(cluster_name=source_cluster_name,
196 | # restore_id=response_obj.restore_id)
197 | # pprint(f"({out})")
198 | #
199 | # test_12_cancel_valid_restore_job.basic = True
200 |
201 | # def test_12a_cancel_valid_restore_job(self):
202 | # source_cluster_name = 'pyAtlasTestCluster'
203 | # target_cluster_name = 'pyAtlasTestRestore'
204 | # snapshot_id = '619d5e979977cf1a6a9adfbf'
205 | # response_obj = self.a.CloudBackups.request_snapshot_restore(source_cluster_name=source_cluster_name,
206 | # snapshot_id=snapshot_id,
207 | # target_cluster_name=target_cluster_name,
208 | # delivery_type=DeliveryType.automated)
209 | #
210 | # print(f"The restore_id of this test restore is {response_obj.restore_id}")
211 | # print(f"The canceled status is ({response_obj.cancelled})")
212 | # print(f"The completed date is {response_obj.finished_at}")
213 | #
214 | # print("Now lets cancel this puppy")
215 | # out = self.a.CloudBackups.cancel_snapshot_restore_request(cluster_name=source_cluster_name,
216 | # restore_id="619d87217547a804f47a07e7")
217 | #
218 | #
219 | # test_12a_cancel_valid_restore_job.basic = True
220 |
--------------------------------------------------------------------------------
/tests/test_clusters.py:
--------------------------------------------------------------------------------
1 | """
2 | Nose2 Unit Tests for the clusters module.
3 |
4 |
5 | """
6 | from pprint import pprint
7 | from os import environ, getenv
8 |
9 | import atlasapi.errors
10 | from atlasapi.atlas import Atlas
11 | from atlasapi.lib import AtlasPeriods, AtlasUnits, AtlasGranularities
12 | from json import dumps
13 | from datetime import datetime
14 | from atlasapi.clusters import AtlasBasicReplicaSet, ClusterConfig
15 | from atlasapi.lib import MongoDBMajorVersion as mdb_version
16 | from atlasapi.clusters import ClusterConfig, ProviderSettings, ReplicationSpecs, InstanceSizeName
17 | from atlasapi.clusters import RegionConfig, AdvancedOptions, TLSProtocols, ClusterStates
18 | from tests import BaseTests
19 | import logging
20 | from time import sleep, time
21 | import humanfriendly
22 |
23 | logger = logging.getLogger('test')
24 |
25 |
26 | class ClusterTests(BaseTests):
27 |
28 | def test_00_get_all_clusters(self):
29 | cluster_list = list(self.a.Clusters.get_all_clusters(iterable=True))
30 |
31 | self.assertTrue(type(cluster_list) is list)
32 |
33 | test_00_get_all_clusters.basic = True
34 |
35 | def test_01_get_all_clusters_type(self):
36 | cluster_list = list(self.a.Clusters.get_all_clusters(iterable=True))
37 | for each_cluster in cluster_list:
38 | logger.warning(each_cluster)
39 | self.assertTrue(type(each_cluster) is dict)
40 |
41 | test_01_get_all_clusters_type.basic = True
42 |
43 | def test_02_get_a_cluster_as_obj(self):
44 | cluster = self.a.Clusters.get_single_cluster_as_obj(self.TEST_CLUSTER_NAME)
45 | self.assertTrue(type(cluster) is ClusterConfig)
46 | self.assertEqual(cluster.name, self.TEST_CLUSTER_NAME)
47 |
48 | test_02_get_a_cluster_as_obj.basic = True
49 |
50 | def test_03_get_a_cluster(self):
51 | cluster = self.a.Clusters.get_single_cluster(self.TEST_CLUSTER_NAME)
52 | self.assertTrue(type(cluster) is dict)
53 | self.assertEqual(cluster['name'], self.TEST_CLUSTER_NAME)
54 |
55 | test_03_get_a_cluster.basic = True
56 |
57 | def test_04_create_basic_cluster(self):
58 | myoutput = self.a.Clusters.create_basic_rs(name=self.TEST_CLUSTER2_NAME_UNIQUE, version=mdb_version.v4_2,
59 | size=InstanceSizeName.M10)
60 | self.assertEqual(type(myoutput), AtlasBasicReplicaSet)
61 | pprint(myoutput.config.as_dict)
62 | print('-------------------Waiting a bit to allow the cluster to be created......-------------')
63 | sleep(30)
64 | print('-----------------------------------Done Sleeping -------------------------------------')
65 |
66 | test_04_create_basic_cluster.advanced = True
67 |
68 | def test_05_modify_cluster_disk(self):
69 | existing = self.a.Clusters.get_single_cluster_as_obj(cluster=self.TEST_CLUSTER_NAME)
70 | old_size = existing.disk_size_gb
71 | new_size = existing.disk_size_gb + 1
72 | existing.disk_size_gb = new_size
73 | new_config = self.a.Clusters.modify_cluster(self.TEST_CLUSTER_NAME, existing)
74 | pprint('Old Size: {}. New Size {}'.format(old_size, new_size))
75 | self.assertEquals(new_config.get('diskSizeGB', 0), new_size), new_config
76 | print('-------------------Waiting a bit to allow the cluster to be modified......-------------')
77 | sleep(20)
78 | print('-----------------------------------Done Sleeping -------------------------------------')
79 |
80 | def test_4a_delete_cluster(self):
81 | myoutput = self.a.Clusters.delete_cluster(cluster=self.TEST_CLUSTER2_NAME_UNIQUE, areYouSure=True)
82 | print('Successfully Deleted {}, output was '.format(self.TEST_CLUSTER2_NAME_UNIQUE, myoutput))
83 |
84 | test_4a_delete_cluster.advanced = True
85 |
86 | def test_07_create_resize_delete(self):
87 | provider_settings: ProviderSettings = ProviderSettings()
88 | regions_config = RegionConfig()
89 | replication_specs = ReplicationSpecs(regions_config={provider_settings.region_name: regions_config.__dict__})
90 | cluster_config = ClusterConfig(name=self.TEST_CLUSTER3_NAME_UNIQUE,
91 | providerSettings=provider_settings,
92 | replication_specs=replication_specs)
93 |
94 | output = self.a.Clusters.create_cluster(cluster_config)
95 |
96 | cluster_3_config = self.a.Clusters.get_single_cluster(cluster=self.TEST_CLUSTER3_NAME_UNIQUE)
97 | self.assertEqual(cluster_3_config.name, self.TEST_CLUSTER3_NAME_UNIQUE)
98 |
99 | self.wait_for_cluster_state(self.TEST_CLUSTER3_NAME_UNIQUE)
100 | print(f"✅ Cluster {self.TEST_CLUSTER3_NAME_UNIQUE} created successfully in"
101 | f" {humanfriendly.format_timespan(seconds_elapsed)}.")
102 |
103 | printf(f"Will now resize {self.TEST_CLUSTER3_NAME_UNIQUE} to m20....")
104 | self.a.Clusters.modify_cluster_instance_size(cluster=self.TEST_CLUSTER3_NAME_UNIQUE,
105 | new_cluster_size=InstanceSizeName.M20)
106 | self.wait_for_cluster_state(self.TEST_CLUSTER3_NAME_UNIQUE,
107 | states_to_wait=[ClusterStates.UPDATING, ClusterStates.REPAIRING])
108 |
109 | print(f"✅ Cluster Succesfully resized.")
110 | print(f"Going to clean up by deleting this cluster ({self.TEST_CLUSTER3_NAME_UNIQUE})")
111 | output = self.a.Clusters.delete_cluster(cluster=self.TEST_CLUSTER3_NAME_UNIQUE, areYouSure=True)
112 | self.wait_for_cluster_state(cluster_name=self.TEST_CLUSTER3_NAME_UNIQUE)
113 | print('Successfully Deleted resized cluster :{}, output was '.format(self.TEST_CLUSTER3_NAME_UNIQUE, output))
114 |
115 | test_07_create_resize_delete.advanced = True
116 |
117 | def test_10_pause_cluster(self):
118 | pprint('Pausing Cluster {}'.format(self.TEST_CLUSTER_NAME))
119 | try:
120 | out = self.a.Clusters.pause_cluster(cluster=self.TEST_CLUSTER_NAME, toggle_if_paused=True)
121 | self.assertTrue(type(out), dict), "Out Type is {}".format(type(out))
122 | except Exception as e:
123 | if e.details.get('errorCode') == 'CANNOT_PAUSE_RECENTLY_RESUMED_CLUSTER':
124 | print("We are working to fast. {}".format(e.details.get('detail')))
125 | pass
126 |
127 | test_10_pause_cluster.advanced = True
128 |
129 | def test_11_test_failover(self):
130 | try:
131 | self.a.Clusters.test_failover(self.TEST_CLUSTER_NAME)
132 | except atlasapi.errors.ErrAtlasBadRequest as e:
133 | if e.code == 'CLUSTER_RESTART_IN_PROGRESS':
134 | self.assertTrue(True)
135 | logger.warning('A cluster retstart was already in effect, so passing this test.')
136 |
137 | test_11_test_failover.basic = False
138 |
139 | def test_12_get_advanced_options(self):
140 | out_obj = self.a.Clusters.get_single_cluster_advanced_options(self.TEST_CLUSTER_NAME)
141 | self.assertEqual(type(out_obj), AdvancedOptions, msg='Output should be and AdvancedOptions object')
142 | out_dict = self.a.Clusters.get_single_cluster_advanced_options(self.TEST_CLUSTER_NAME, as_obj=False)
143 | self.assertEqual(type(out_dict), dict, msg="output should be a dict")
144 |
145 | test_12_get_advanced_options.basic = True
146 |
147 | def test_13_set_advanced_options(self):
148 | # Removed this test, since it is now failing due to this option no longer supported in 4.2 +.
149 | # Will need to remove the
150 | # set_1 = AdvancedOptions(failIndexKeyTooLong=True)
151 | # out_set_1 = self.a.Clusters.modify_cluster_advanced_options(cluster=self.TEST_CLUSTER_NAME,
152 | # advanced_options=set_1)
153 |
154 | set_2 = AdvancedOptions(javascriptEnabled=True)
155 | out_set_2 = self.a.Clusters.modify_cluster_advanced_options(cluster=self.TEST_CLUSTER_NAME,
156 | advanced_options=set_2)
157 | self.assertEqual(set_2.javascriptEnabled, out_set_2.javascriptEnabled,
158 | msg='in = {}: out= {}'.format(set_2.__dict__, out_set_2.__dict__))
159 |
160 | set_3 = AdvancedOptions(minimumEnabledTlsProtocol=TLSProtocols.TLS1_2)
161 | out_set_3 = self.a.Clusters.modify_cluster_advanced_options(cluster=self.TEST_CLUSTER_NAME,
162 | advanced_options=set_3)
163 | self.assertEqual(set_3.minimumEnabledTlsProtocol, out_set_3.minimumEnabledTlsProtocol,
164 | msg='in = {}: out= {}'.format(set_3.__dict__, out_set_3.__dict__))
165 |
166 | set_4 = AdvancedOptions(noTableScan=True)
167 | out_set_4 = self.a.Clusters.modify_cluster_advanced_options(cluster=self.TEST_CLUSTER_NAME,
168 | advanced_options=set_4)
169 | self.assertEqual(set_4.noTableScan, out_set_4.noTableScan,
170 | msg='in = {}: out= {}'.format(set_4.__dict__, out_set_4.__dict__))
171 |
172 | set_5 = AdvancedOptions(oplogSizeMB=1000)
173 | out_set_5 = self.a.Clusters.modify_cluster_advanced_options(cluster=self.TEST_CLUSTER_NAME,
174 | advanced_options=set_5)
175 | self.assertEqual(set_5.oplogSizeMB, out_set_5.oplogSizeMB,
176 | msg='in = {}: out= {}'.format(set_5.__dict__, out_set_5.__dict__))
177 |
178 | test_13_set_advanced_options.basic = True
179 |
180 | def test_14_issue_154_additional_data(self):
181 | cluster = self.a.Clusters.get_single_cluster_as_obj(self.TEST_CLUSTER_NAME)
182 | self.assertTrue(type(cluster) is ClusterConfig)
183 | self.assertEqual(cluster.name, self.TEST_CLUSTER_NAME)
184 | self.assertIsInstance(cluster.create_date, datetime)
185 | pprint(cluster.as_dict())
186 |
187 | test_02_get_a_cluster_as_obj.basic = True
188 |
189 | def test_15_issue_182(self):
190 | """
191 | Test for problems with added read_only items being included in the as_modify
192 | Returns:
193 |
194 | """
195 | atlas = self.a
196 |
197 | out = atlas.Clusters.modify_cluster_instance_size(cluster=self.TEST_CLUSTER_NAME,
198 | new_cluster_size=InstanceSizeName.M20)
199 | pprint(out)
200 |
201 | out2 = atlas.Clusters.modify_cluster_instance_size(cluster=self.TEST_CLUSTER_NAME,
202 | new_cluster_size=InstanceSizeName.M10)
203 |
--------------------------------------------------------------------------------
/tests/test_events.py:
--------------------------------------------------------------------------------
1 | """
2 | Events NoseTests
3 |
4 |
5 | """
6 |
7 | from atlasapi.atlas import Atlas
8 | from pprint import pprint
9 | from os import environ, getenv
10 | from atlasapi import events
11 | from atlasapi.lib import AtlasPeriods, AtlasUnits, AtlasGranularities
12 | from json import dumps
13 | from tests import BaseTests
14 | import logging
15 | from time import sleep
16 | from datetime import datetime, timedelta, timezone
17 | from atlasapi.events import AtlasEventTypes, AtlasEvent, _AtlasBaseEvent
18 |
19 | USER = getenv('ATLAS_USER', None)
20 | API_KEY = getenv('ATLAS_KEY', None)
21 | GROUP_ID = getenv('ATLAS_GROUP', None)
22 |
23 | if not USER or not API_KEY or not GROUP_ID:
24 | raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables'
25 | 'your env variables are {}'.format(environ.__str__()))
26 |
27 | verbose_logger = logging.getLogger('verbose_logger')
28 |
29 |
30 | class EventsTests(BaseTests):
31 |
32 | def test_00_All_Events(self):
33 | out = self.a.Events.all
34 | self.assertIsInstance(out, list)
35 | self.assertIsInstance(out[0], events._AtlasBaseEvent)
36 | self.assertIsInstance(out[10], events._AtlasBaseEvent)
37 | verbose_logger.warning(f'The count of all events is {len(out)}')
38 |
39 | test_00_All_Events.basic = False
40 |
41 | def test_01_get_project_events_since(self):
42 | test_datetime = datetime.utcnow() - timedelta(hours=12)
43 | verbose_logger.info(f'Events Since {test_datetime.isoformat()}')
44 | out = self.a.Events._get_all_project_events(iterable=True, since_datetime=test_datetime)
45 | verbose_logger.warning(f'The count of since events is {len(out)}')
46 | self.assertIsInstance(out, list)
47 | for each in out:
48 | self.assertIsInstance(each, events._AtlasBaseEvent)
49 |
50 | test_01_get_project_events_since.basic = True
51 |
52 | def test_02_since(self):
53 | test_datetime = datetime.utcnow() - timedelta(hours=12)
54 | verbose_logger.info(f'Events Since (public) {test_datetime.isoformat()}')
55 | out = self.a.Events.since(test_datetime)
56 | verbose_logger.warning(f'The count of since events is {len(out)}')
57 | self.assertIsInstance(out, list)
58 | for each in out:
59 | self.assertIsInstance(each, events._AtlasBaseEvent)
60 |
61 | test_02_since.basic = True
62 |
63 | def test_03_atlas(self):
64 | self.assertIsInstance(self.a, Atlas)
65 |
66 | def test_04_CPS(self):
67 | test_datetime = datetime.utcnow() - timedelta(hours=12)
68 | verbose_logger.info(f'CPS Events Since (public) {test_datetime.isoformat()}')
69 | out = self.a.Events.since(test_datetime)
70 | verbose_logger.warning(f'The count of since events is {len(out)}')
71 | self.assertIsInstance(out, list)
72 | for each in out:
73 | if type(each) == events.AtlasCPSEvent:
74 | self.assertIsInstance(each, events.AtlasCPSEvent)
75 |
76 | test_04_CPS.basic = True
77 |
78 | def test_05_All_Events_By_Type(self):
79 | out = self.a.Events.all_by_type(event_type=AtlasEventTypes.CLUSTER_UPDATE_COMPLETED)
80 | count = 0
81 | for each_event in out:
82 | count += 1
83 | self.assertIsInstance(each_event, _AtlasBaseEvent)
84 | pprint(f"Found {count} events.")
85 |
86 | test_05_All_Events_By_Type.basic = True
87 |
88 | def test_06_Events_Since_By_Type(self):
89 | out = self.a.Events.since_by_type(event_type=AtlasEventTypes.CLUSTER_UPDATE_COMPLETED,
90 | since_datetime=datetime(2022, 1, 1))
91 | count = 0
92 | for each_event in out:
93 | count += 1
94 | self.assertIsInstance(each_event, _AtlasBaseEvent)
95 | pprint(f"Found {count} events.")
96 |
97 | test_06_Events_Since_By_Type.basic = True
98 |
--------------------------------------------------------------------------------
/tests/test_getting_logs.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for getting logs Windows
3 |
4 |
5 | """
6 | from pprint import pprint
7 | from os import environ, getenv
8 | from atlasapi.atlas import Atlas
9 | from json import dumps
10 | from tests import BaseTests
11 | import logging
12 | from time import sleep
13 | from atlasapi.lib import AtlasLogNames, LogLine
14 | from atlasapi.specs import HostLogFile
15 | from io import BytesIO
16 | from datetime import datetime, timedelta
17 |
18 | logger = logging.getLogger('test')
19 |
20 |
21 | class LogsTests(BaseTests):
22 |
23 | def test_00_test_retrieve_log_lines(self):
24 | atlas: Atlas = self.a
25 | atlas.Hosts.fill_host_list()
26 | test_host = atlas.Hosts.host_list[0]
27 | print(f'Will get a mongod log for {test_host.hostname}')
28 | out = atlas.Hosts.get_loglines_for_host(host_obj=test_host, log_name=AtlasLogNames.MONGODB)
29 | counter = 0
30 | for each in out:
31 | if counter < 3:
32 | self.assertIsInstance(each, LogLine)
33 | else:
34 | break
35 |
36 | def test_01_test_retrieve_log(self):
37 | atlas: Atlas = self.a
38 | atlas.Hosts.fill_host_list()
39 | test_host = atlas.Hosts.host_list[0]
40 | print(f'Will get a mongod log for {test_host.hostname}')
41 | out = atlas.Hosts.get_log_for_host(host_obj=test_host, log_name=AtlasLogNames.MONGODB)
42 | self.assertIsInstance(out, BytesIO)
43 | self.assertGreater(out.tell(), 1000)
44 |
45 | def test_02_test_retrieve_log_to_date(self):
46 | atlas: Atlas = self.a
47 | start_date = datetime(year=2020, month=2, day=3)
48 | atlas.Hosts.fill_host_list()
49 | test_host = atlas.Hosts.host_list[0]
50 | print(f'Will get a mongod log for {test_host.hostname}')
51 | out = atlas.Hosts.get_log_for_host(host_obj=test_host, log_name=AtlasLogNames.MONGODB, date_from=start_date)
52 | self.assertIsInstance(out, BytesIO)
53 | self.assertGreater(out.tell(), 1000)
54 |
55 | # def test_03_test_retrieve_logs_for_project(self):
56 | # atlas: Atlas = self.a
57 | # atlas.Hosts.fill_host_list()
58 | # host_list = list(atlas.Hosts.host_list)
59 | #
60 | # print(f'Will get a mongod logs for {len(host_list)} hosts.')
61 | # out = atlas.Hosts.get_logs_for_project(log_name=AtlasLogNames.MONGODB)
62 | # for each in out:
63 | # print(f'Received a log for {each.hostname}, of type {each.log_files[0].log_name}, lenght: {each.log_files[0].log_file_binary.tell()}')
64 | # self.assertIsInstance(each.log_files[0], HostLogFile)
65 | # self.assertGreater(each.log_files[0].log_file_binary.tell(), 1000)
66 |
--------------------------------------------------------------------------------
/tests/test_maint_window.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for Maintenance Windows
3 |
4 |
5 | """
6 | from pprint import pprint
7 | from os import environ, getenv
8 | from atlasapi.atlas import Atlas
9 | from atlasapi.errors import ErrMaintenanceError
10 | from atlasapi.maintenance_window import MaintenanceWindow, Weekdays
11 | from json import dumps
12 | from tests import BaseTests
13 | import logging
14 | from time import sleep
15 |
16 | logger = logging.getLogger('test')
17 |
18 |
19 | class MaintTests(BaseTests):
20 |
21 | def test_00_test_object(self):
22 | data = {'dayOfWeek': 6, 'hourOfDay': 12, 'numberOfDeferrals': 1, 'startASAP': False}
23 |
24 | out = MaintenanceWindow(Weekdays.FRIDAY, 12, 1, False)
25 | self.assertEquals(out.dayOfWeek.value, 6)
26 | self.assertEquals(out.as_dict(), data)
27 |
28 | out2 = MaintenanceWindow.from_dict(data)
29 |
30 | self.assertEquals(out2.dayOfWeek, Weekdays.FRIDAY)
31 |
32 | test_00_test_object.basic = True
33 |
34 | def test_01_get_maint_window(self):
35 | # Test as Object
36 | output = self.a.MaintenanceWindows._get_maint_window()
37 | self.assertEquals(type(output), MaintenanceWindow)
38 | self.assertEquals(type(output.dayOfWeek), Weekdays)
39 | output2 = self.a.MaintenanceWindows._get_maint_window(as_obj=False)
40 | self.assertEquals(type(output2), dict)
41 |
42 | output = self.a.MaintenanceWindows.current_config()
43 | self.assertEquals(type(output), MaintenanceWindow)
44 | self.assertEquals(type(output.dayOfWeek), Weekdays)
45 |
46 | test_01_get_maint_window.basic = True
47 |
48 | def test_02_update_maint_window(self):
49 | new_config = MaintenanceWindow(day_of_week=Weekdays.SUNDAY,
50 | hour_of_day=4)
51 | try:
52 | output = self.a.MaintenanceWindows.set_config(new_config)
53 |
54 | self.assertTrue(output)
55 |
56 | updated_config = self.a.MaintenanceWindows.current_config()
57 |
58 | self.assertEquals(new_config.dayOfWeek, updated_config.dayOfWeek.value)
59 | self.assertEquals(new_config.hourOfDay, updated_config.hourOfDay)
60 |
61 | except ErrMaintenanceError:
62 | logger.warning("The maint window already exists, so will pass this test.")
63 |
64 |
65 | test_02_update_maint_window.basic = True
66 |
67 | def test_03_defer_maint_window(self):
68 | try:
69 | output = self.a.MaintenanceWindows._defer_maint_window()
70 | self.assertIn(output, [True, False])
71 | except ErrMaintenanceError:
72 | logger.warning('The maint window was defered too many times, so will pass this test.')
73 |
74 | test_03_defer_maint_window.basic = True
75 |
--------------------------------------------------------------------------------
/tests/test_network.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for teting the network stack
3 |
4 |
5 | """
6 | from pprint import pprint
7 | from os import environ, getenv
8 |
9 | import atlasapi.specs
10 | import atlasapi.measurements
11 | from atlasapi.atlas import Atlas
12 | from json import dumps
13 | from tests import BaseTests
14 | import logging
15 | from time import sleep
16 | from atlasapi.lib import AtlasUnits, ClusterType
17 | from atlasapi.specs import Host, AtlasPeriods, AtlasGranularities, ReplicaSetTypes
18 | from atlasapi.measurements import AtlasMeasurementTypes, AtlasMeasurementValue, AtlasMeasurement
19 | from atlasapi.clusters import RegionConfig, AdvancedOptions, TLSProtocols, ClusterStates
20 | from atlasapi.errors import ErrAtlasBadRequest
21 |
22 | from io import BytesIO
23 | from datetime import datetime, timedelta
24 |
25 | logger = logging.getLogger('test')
26 |
27 |
28 | class NetworkTests(BaseTests):
29 |
30 | def test_00_handle_400s(self):
31 | """
32 | Force a 400 error so we can test details being passed through.
33 | """
34 | try:
35 | cluster = self.a.Clusters.get_single_cluster(cluster=self.TEST_CLUSTER_NAME)
36 | update = self.a.Clusters.modify_cluster(cluster=self.TEST_CLUSTER_NAME,
37 | cluster_config=dict(backupEnabled="whatevs"))
38 | except Exception as e:
39 | self.assertIsInstance(e, ErrAtlasBadRequest)
40 | error_code = e.getAtlasResponse()[0]
41 | self.assertEqual(error_code, 400, "The error code needs to be 400")
42 | print(f"Error Code is {error_code}")
43 |
44 | test_00_handle_400s.basic = True
45 |
--------------------------------------------------------------------------------
/tests/test_organizations.py:
--------------------------------------------------------------------------------
1 | """
2 | Nose2 Unit Tests for the clusters module.
3 |
4 |
5 | """
6 | from pprint import pprint
7 | from os import environ, getenv
8 | from atlasapi.atlas import Atlas
9 | from atlasapi.organizations import Organization
10 | from atlasapi.teams import TeamRoles
11 | from atlasapi.atlas_users import AtlasUser
12 | from atlasapi.projects import Project
13 | from json import dumps
14 | from tests import BaseTests
15 | import logging
16 | from time import sleep
17 |
18 | logger = logging.getLogger('test')
19 |
20 |
21 | class ProjectTests(BaseTests):
22 |
23 | def test_00_get_organizations(self):
24 | for each in self.a.Organizations.organizations:
25 | self.assertIsInstance(each, Organization, "An Atlas should be returned")
26 |
27 | test_00_get_organizations.basic = True
28 |
29 | def test_01_get_organization_by_name(self):
30 | for each in self.a.Organizations.organizations:
31 | org_name = each.name
32 |
33 | result = self.a.Organizations.organization_by_name(org_name=org_name)
34 | #pprint(result.__dict__)
35 | self.assertIsInstance(result, Organization, "An Atlas should be returned")
36 | self.assertEqual(org_name, result.name, "Returned result was not the same.")
37 |
38 | test_01_get_organization_by_name.basic = True
39 |
40 | def test_02_get_organization_by_id(self):
41 | for each in self.a.Organizations.organizations:
42 | org_id = each.id
43 |
44 | result = self.a.Organizations.organization_by_id(org_id)
45 | #pprint(result.__dict__)
46 | self.assertIsInstance(result, Organization, "An Atlas should be returned")
47 | self.assertEqual(org_id, result.id, "Returned result was not the same.")
48 |
49 | test_02_get_organization_by_id.basic = True
50 |
51 | def test_03_get_organization_count(self):
52 | result = self.a.Organizations.count
53 | self.assertIsInstance(result, int, "The count should be an int")
54 |
55 | test_03_get_organization_count.basic = True
56 |
57 | def test_04_get_all_projects_for_org(self):
58 | org_id = '5ac52173d383ad0caf52e11c'
59 | project_count = 0
60 | for each_project in self.a_owner.Organizations.get_all_projects_for_org(org_id=org_id):
61 | print(f"Found Project :{each_project.name}, {type(each_project)}")
62 | self.assertIsInstance(each_project, Project, f"The return type was not , it was {type(each_project)}")
63 | project_count +=1
64 |
65 | self.assertGreater(project_count,0, "Did not find any projects, this is a bug, or the test org is not set up "
66 | "correctly.")
67 |
68 | test_04_get_all_projects_for_org.basic = True
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/tests/test_projects.py:
--------------------------------------------------------------------------------
1 | """
2 | Nose2 Unit Tests for the clusters module.
3 |
4 |
5 | """
6 | import datetime
7 | from pprint import pprint
8 | from os import environ, getenv
9 | from atlasapi.atlas import Atlas
10 | from atlasapi.projects import Project, ProjectSettings
11 | from atlasapi.teams import TeamRoles
12 | from atlasapi.atlas_users import AtlasUser
13 | from json import dumps
14 | from tests import BaseTests
15 | import logging
16 | from time import sleep
17 |
18 | logger = logging.getLogger('test')
19 |
20 |
21 | class ProjectTests(BaseTests):
22 |
23 | def test_00_get_projects_all_for_org_key(self):
24 | count = 0
25 | for each in self.a_owner.Projects.projects:
26 | # pprint(each.__dict__)
27 | self.assertIsInstance(each, Project, "An Atlas ")
28 | count += 1
29 | self.assertEqual(count, 4, "There should be exactly 4 projects returned when for this test Organization")
30 |
31 | test_00_get_projects_all_for_org_key.basic = True
32 |
33 | def test_01_get_projects_all_for_proj_key(self):
34 | count = 0
35 | for each in self.a.Projects.projects:
36 | # pprint(each.__dict__)
37 | self.assertIsInstance(each, Project, "An Atlas ")
38 | count += 1
39 | self.assertEqual(count, 1, "There should be exactly 1 projects returned when for this test Project")
40 |
41 | test_01_get_projects_all_for_proj_key.basic = True
42 |
43 | def test_02_get_project_by_id(self):
44 | for each in self.a.Projects.projects:
45 | self.assertIsInstance(each, Project, "An Atlas ")
46 | pprint(f"👍The group_id to use is {each.id}")
47 | group_id = each.id
48 | self.assertIsInstance(each.org_id, str, "OrgID should be an str")
49 |
50 | out = self.a.Projects.project_by_id(group_id)
51 | # pprint(out.__dict__)
52 | self.assertIsInstance(out, Project, "An Atlas should be returned")
53 |
54 | test_02_get_project_by_id.basic = True
55 |
56 | def test_03_get_project_by_name(self):
57 | for each in self.a.Projects.projects:
58 | self.assertIsInstance(each, Project, "An Atlas ")
59 | pprint(f"👍The group_name to use is {each.name}")
60 | group_name = each.name
61 | self.assertIsInstance(each.name, str, "OrgID should be an str")
62 |
63 | out = self.a.Projects.project_by_name(group_name)
64 | # pprint(out.__dict__)
65 | self.assertIsInstance(out, Project, "An Atlas should be returned")
66 |
67 | test_03_get_project_by_name.basic = True
68 |
69 | def test_04_get_project_by_both_fail(self):
70 |
71 | with self.assertRaises(ValueError) as ex:
72 | pprint(f"👍Supplying both yielded exception")
73 | out = self.a.Projects._get_project(group_name="bad bad", group_id='anid')
74 |
75 | test_04_get_project_by_both_fail.basic = True
76 |
77 | def test_05_get_project_teams_basic(self):
78 | out = self.a.Projects.get_project_teams()
79 | for each in out:
80 | self.assertIsInstance(each, TeamRoles)
81 | self.assertIsInstance(each.roles,list,"Roles should be a list of strings")
82 | for each_role in each.roles:
83 | self.assertIsInstance(each_role,str, "Each listed role should be a string.")
84 | #pprint(each.__dict__)
85 |
86 | test_05_get_project_teams_basic.basic = True
87 |
88 | def test_06_get_project_teams_pass_id(self):
89 | out = self.a_owner.Projects.get_project_teams(group_id=self.a.group)
90 | for each in out:
91 | self.assertIsInstance(each, TeamRoles)
92 | self.assertIsInstance(each.roles,list,"Roles should be a list of strings")
93 | for each_role in each.roles:
94 | self.assertIsInstance(each_role,str, "Each listed role should be a string.")
95 | #pprint(each.__dict__)
96 |
97 | test_06_get_project_teams_pass_id.basic = True
98 |
99 | def test_07_get_project_users_fail_noGroup(self):
100 | with self.assertRaises(ValueError):
101 | pprint(f"👍Supplying no group yielded exception")
102 | out = self.a_owner.Projects.get_project_users()
103 | for each in out:
104 | self.assertIsInstance(each, AtlasUser)
105 | #pprint(each.__dict__)
106 |
107 | test_07_get_project_users_fail_noGroup.basic = True
108 |
109 | def test_08_get_project_users(self):
110 | out = self.a.Projects.get_project_users()
111 | for each in out:
112 | self.assertIsInstance(each, AtlasUser)
113 | #pprint(each.__dict__)
114 |
115 | test_08_get_project_users.basic = True
116 |
117 |
118 | def test_09_get_project_user_count(self):
119 | out = self.a.Projects.user_count()
120 | pprint(f"👍 The count is {out}")
121 | self.assertIsInstance(out, int, "The count should be a in integer!")
122 | self.assertGreaterEqual(out, 1, "Should have more than one user!")
123 |
124 | test_09_get_project_user_count.basic = True
125 |
126 | def test_10_get_project_settings(self):
127 | out = self.a.Projects.settings
128 | #pprint(f"👍 The settings are {out.__dict__}")
129 | self.assertIsInstance(out, ProjectSettings, "The response must be a ProjectSettings obj")
130 |
131 | test_10_get_project_settings.basic = True
132 |
133 |
134 | def test_11_get_project_create_date(self):
135 | out = self.a.Projects.project_by_id(self.a.group)
136 | pprint(out.__dict__)
137 | self.assertIsInstance(out.created_date, datetime.datetime, "An datetime should be returned")
138 |
139 | test_02_get_project_by_id.basic = True
--------------------------------------------------------------------------------
/tests/unittest.cfg:
--------------------------------------------------------------------------------
1 | [unittest]
2 | plugins = nose2.plugins.attrib
3 | nose2.plugins.junitxml
4 | [test-result]
5 | always-on = True
6 | descriptions = True
7 | [junit-xml]
8 | always-on = True
9 | keep_restricted = False
10 | test_fullname = False
--------------------------------------------------------------------------------
/tests/whitelist_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 20109 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Stupid and simple smoke tests.
17 |
18 | Uses ENV vars to store user, key and group.
19 |
20 | TODO: Create real tests
21 |
22 |
23 | """
24 |
25 | from atlasapi.atlas import Atlas
26 | from pprint import pprint
27 | from os import environ, getenv
28 | from atlasapi.atlas import Atlas
29 | from json import dumps
30 |
31 |
32 | from atlasapi.specs import DatabaseUsersPermissionsSpecs, RoleSpecs, DatabaseUsersUpdatePermissionsSpecs
33 |
34 | USER = getenv('ATLAS_USER', None)
35 | API_KEY = getenv('ATLAS_KEY', None)
36 | GROUP_ID = getenv('ATLAS_GROUP', None)
37 |
38 | if not USER or not API_KEY or not GROUP_ID:
39 | raise EnvironmentError('In order to run this smoke test you need ATLAS_USER, AND ATLAS_KEY env variables'
40 | 'your env variables are {}'.format(environ.__str__()))
41 |
42 | a = Atlas(USER, API_KEY, GROUP_ID)
43 |
44 |
45 | #print('----------Test Get whitelist entries------------------')
46 | #
47 | #
48 | #out = a.Whitelist.get_all_whitelist_entries()
49 | #
50 | #for each in out:
51 | # pprint(each.__dict__)
52 | #
53 |
54 | print('----------Create a whitelist entry then get it.------------------')
55 |
56 |
57 | returnit = a.Whitelist.create_whitelist_entry('67.180.12.52', 'Test 12')
58 |
59 | out = a.Whitelist.get_whitelist_entry('67.180.12.52')
60 |
61 | pprint(out.__dict__)
62 |
--------------------------------------------------------------------------------
/unittest.cfg:
--------------------------------------------------------------------------------
1 | [unittest]
2 | plugins = nose2.plugins.attrib
3 | nose2.plugins.junitxml
4 | [test-result]
5 | always-on = True
6 | descriptions = True
7 | [junit-xml]
8 | always-on = True
9 | keep_restricted = False
10 | test_fullname = False
--------------------------------------------------------------------------------
/utils/convert_java_classes_to_enum.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 Matthew G. Monteleone
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | file = open("events.java", 'r')
17 | print("class AtlasEventTypes(Enum):")
18 | for line in file.readlines():
19 | if line[3].isspace() is False and line.split("(",1)[0].lstrip()[0] not in ['/','@']:
20 | base_name = line.split("(",1)[0].lstrip()
21 | name_text = base_name.strip().replace('_'," ").title().replace("Gcp", "GCP").replace("Aws", "AWS").replace('Crl', "CRL").replace('Ofac', "OFAC").replace('Mtm', 'MTM').replace('Csv',"CSV").replace('Mfa', 'MFA').replace('Api', "API").replace('Ip', "IP").replace('Dns', "DNS").replace('Ssl', 'SSL')
22 |
23 | print(f" {base_name} = '{name_text}'")
24 |
--------------------------------------------------------------------------------