├── .gitattributes
├── .github
└── workflows
│ ├── publication.yml
│ └── validation.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── demo.png
├── pyproject.toml
├── setup.py
├── src
└── isilon_hadoop_tools
│ ├── __init__.py
│ ├── _scripts.py
│ ├── cli.py
│ ├── directories.py
│ ├── identities.py
│ └── onefs.py
├── tests
├── conftest.py
├── test___init__.py
├── test__scripts.py
├── test_cli.py
├── test_directories.py
├── test_identities.py
└── test_onefs.py
└── tox.ini
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
4 | # Custom for Visual Studio
5 | *.cs diff=csharp
6 |
7 | # Standard to msysgit
8 | *.doc diff=astextplain
9 | *.DOC diff=astextplain
10 | *.docx diff=astextplain
11 | *.DOCX diff=astextplain
12 | *.dot diff=astextplain
13 | *.DOT diff=astextplain
14 | *.pdf diff=astextplain
15 | *.PDF diff=astextplain
16 | *.rtf diff=astextplain
17 | *.RTF diff=astextplain
18 |
--------------------------------------------------------------------------------
/.github/workflows/publication.yml:
--------------------------------------------------------------------------------
1 | name: Publication
2 | on:
3 | release:
4 | types: [created]
5 | jobs:
6 | deploy:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v3
10 | - uses: actions/setup-python@v4
11 | with:
12 | python-version: '3.8'
13 | - run: python -m pip install --upgrade tox-gh-actions
14 | - env:
15 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
16 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
17 | run: tox -e publish -- upload
18 |
--------------------------------------------------------------------------------
/.github/workflows/validation.yml:
--------------------------------------------------------------------------------
1 | name: Validation
2 | on: [push, pull_request]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | strategy:
7 | matrix:
8 | python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
9 | steps:
10 | - uses: actions/checkout@v3
11 | - uses: actions/setup-python@v4
12 | with:
13 | python-version: ${{ matrix.python-version }}
14 | - run: python -m pip install --upgrade tox-gh-actions
15 | - run: sudo apt install --assume-yes libkrb5-dev
16 | - run: tox
17 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 | db.sqlite3-journal
62 |
63 | # Flask stuff:
64 | instance/
65 | .webassets-cache
66 |
67 | # Scrapy stuff:
68 | .scrapy
69 |
70 | # Sphinx documentation
71 | docs/_build/
72 |
73 | # PyBuilder
74 | target/
75 |
76 | # Jupyter Notebook
77 | .ipynb_checkpoints
78 |
79 | # IPython
80 | profile_default/
81 | ipython_config.py
82 |
83 | # pyenv
84 | .python-version
85 |
86 | # pipenv
87 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
88 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
89 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
90 | # install all needed dependencies.
91 | #Pipfile.lock
92 |
93 | # celery beat schedule file
94 | celerybeat-schedule
95 |
96 | # SageMath parsed files
97 | *.sage.py
98 |
99 | # Environments
100 | .env
101 | .venv
102 | env/
103 | venv/
104 | ENV/
105 | env.bak/
106 | venv.bak/
107 |
108 | # Spyder project settings
109 | .spyderproject
110 | .spyproject
111 |
112 | # Rope project settings
113 | .ropeproject
114 |
115 | # mkdocs documentation
116 | /site
117 |
118 | # mypy
119 | .mypy_cache/
120 | .dmypy.json
121 | dmypy.json
122 |
123 | # Pyre type checker
124 | .pyre/
125 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | [GitHub](https://github.com/) hosts the project.
4 |
5 | ## Development
6 |
7 | Please [open an issue](https://help.github.com/articles/creating-an-issue/) to discuss any concerns or ideas for the project.
8 |
9 | ### Version Control
10 |
11 | Use [`git`](https://git-scm.com/doc) to retrieve and manage the project source code:
12 | ``` sh
13 | git clone https://github.com/Isilon/isilon_hadoop_tools.git
14 | ```
15 |
16 | ### Test Environment
17 |
18 | Use [`tox`](https://tox.readthedocs.io/) to deploy and run the project source code:
19 | ``` sh
20 | # Build and test the entire project:
21 | tox
22 |
23 | # Run a specific test with Python 3.7, and drop into Pdb if it fails:
24 | tox -e py37 -- -k test_catches --pdb
25 |
26 | # Create a Python 3.7 development environment:
27 | tox -e py37 --devenv ./venv
28 |
29 | # Run an IHT console_script in that environment:
30 | venv/bin/isilon_create_users --help
31 | ```
32 |
33 | ## Merges
34 |
35 | 1. All change proposals are submitted by [creating a pull request](https://help.github.com/articles/creating-a-pull-request/) (PR).
36 | - [Branch protection](https://help.github.com/articles/about-protected-branches/) is used to enforce acceptance criteria.
37 |
38 | 2. All PRs are [associated with a milestone](https://help.github.com/articles/associating-milestones-with-issues-and-pull-requests/).
39 | - Milestones serve as a change log for the project.
40 |
41 | 3. Any PR that directly changes any release artifact gets 1 of 3 [labels](https://help.github.com/articles/applying-labels-to-issues-and-pull-requests/): `major`, `minor`, `patch`.
42 | - This helps with release versioning.
43 |
44 | ### Continuous Integration
45 |
46 | [Github Actions](https://github.com/features/actions) ensures the build never breaks and the tests always pass.
47 |
48 | [](https://github.com/Isilon/isilon_hadoop_tools/actions/workflows/validation.yml)
49 |
50 | It also deploys releases to the package repository automatically (see below).
51 |
52 | ## Releases
53 |
54 | 1. Releases are versioned according to [Semantic Versioning](http://semver.org/).
55 | - https://semver.org/#why-use-semantic-versioning
56 |
57 | 2. All releases are [tagged](https://git-scm.com/book/en/v2/Git-Basics-Tagging).
58 | - This permanently associates a version with a commit.
59 |
60 | 3. Every release closes a [milestone](https://help.github.com/articles/about-milestones/).
61 | - This permanently associates a version with a milestone.
62 |
63 | ### Package Repository
64 |
65 | [PyPI](http://pypi.org/) serves releases publically.
66 |
67 | [](https://pypi.org/project/isilon_hadoop_tools)
68 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Dell EMC Isilon
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Isilon Hadoop Tools
2 |
3 | Tools for Using Hadoop with OneFS
4 |
5 | - `isilon_create_users` creates identities needed by Hadoop distributions compatible with OneFS.
6 | - `isilon_create_directories` creates a directory structure with appropriate ownership and permissions in HDFS on OneFS.
7 |
8 | 
9 |
10 | ## Installation
11 |
12 | Isilon Hadoop Tools (IHT) currently requires Python 3.7+ and supports OneFS 8+.
13 |
14 | - Python support schedules can be found [in the Python Developer's Guide](https://devguide.python.org/versions/).
15 | - OneFS support schedules can be found in the [PowerScale OneFS Product Availability Guide](https://www.dell.com/support/manuals/en-us/isilon-onefs/ifs_pub_product_availability_9.4.0.0/software?guid=guid-925f6b6a-2882-42b1-8b64-2c5eb2190eb7).
16 |
17 | ### Option 1: Install as a stand-alone command line tool.
18 |
19 |
20 | Use pipx
to install IHT.
21 |
22 |
23 | > _`pipx` requires Python 3.7 or later._
24 |
25 | 1. [Install `pipx`:](https://pipxproject.github.io/pipx/installation/)
26 |
27 | ``` sh
28 | python3 -m pip install --user pipx
29 | ```
30 |
31 | - Tip: Newer versions of some Linux distributions (e.g. [Debian 10](https://packages.debian.org/buster/pipx), [Ubuntu 19.04](https://packages.ubuntu.com/disco/pipx), etc.) offer native packages for `pipx`.
32 |
33 |
34 |
35 | ``` sh
36 | python3 -m pipx ensurepath
37 | ```
38 |
39 | - Note: You may need to restart your terminal for the `$PATH` updates to take effect.
40 |
41 | 2. Use `pipx` to install [`isilon_hadoop_tools`](https://pypi.org/project/isilon_hadoop_tools/):
42 |
43 | ``` sh
44 | pipx install isilon_hadoop_tools
45 | ```
46 |
47 | 3. Test the installation:
48 |
49 | ``` sh
50 | isilon_create_users --help
51 | isilon_create_directories --help
52 | ```
53 |
54 | - Use `pipx` to uninstall at any time:
55 |
56 | ``` sh
57 | pipx uninstall isilon_hadoop_tools
58 | ```
59 |
60 | See Python's [Installing stand alone command line tools](https://packaging.python.org/guides/installing-stand-alone-command-line-tools/) guide for more information.
61 |
62 |
63 | ### Option 2: Create an ephemeral installation.
64 |
65 |
66 | Use pip
to install IHT in a virtual environment.
67 |
68 |
69 | > Python "Virtual Environments" allow Python packages to be installed in an isolated location for a particular application, rather than being installed globally.
70 |
71 | 1. Use the built-in [`venv`](https://docs.python.org/3/library/venv.html) module to create a virtual environment:
72 |
73 | ``` sh
74 | python3 -m venv ./iht
75 | ```
76 |
77 | 2. Install [`isilon_hadoop_tools`](https://pypi.org/project/isilon_hadoop_tools/) into the virtual environment:
78 |
79 | ``` sh
80 | iht/bin/pip install isilon_hadoop_tools
81 | ```
82 |
83 | - Note: This requires access to an up-to-date Python Package Index (PyPI, usually https://pypi.org/).
84 | For offline installations, necessary resources can be downloaded to a USB flash drive which can be used instead:
85 |
86 | ``` sh
87 | pip3 download --dest /media/usb/iht-dists isilon_hadoop_tools
88 | ```
89 | ``` sh
90 | iht/bin/pip install --no-index --find-links /media/usb/iht-dists isilon_hadoop_tools
91 | ```
92 |
93 | 3. Test the installation:
94 |
95 | ``` sh
96 | iht/bin/isilon_create_users --help
97 | ```
98 |
99 | - Tip: Some users find it more convenient to "activate" the virtual environment (which prepends the virtual environment's `bin/` to `$PATH`):
100 |
101 | ``` sh
102 | source iht/bin/activate
103 | isilon_create_users --help
104 | isilon_create_directories --help
105 | deactivate
106 | ```
107 |
108 | - Remove the virtual environment to uninstall at any time:
109 |
110 | ``` sh
111 | rm --recursive iht/
112 | ```
113 |
114 | See Python's [Installing Packages](https://packaging.python.org/tutorials/installing-packages/) tutorial for more information.
115 |
116 |
117 | ## Usage
118 |
119 | - Tip: `--help` can be used with any IHT script to see extended usage information.
120 |
121 | To use IHT, you will need the following:
122 |
123 | - `$onefs`, an IP address, hostname, or SmartConnect name associated with the OneFS System zone
124 | - Unfortunately, Zone-specific Role-Based Access Control (ZRBAC) is not fully supported by OneFS's RESTful Access to Namespace (RAN) service yet, which is required by `isilon_create_directories`.
125 | - `$iht_user`, a OneFS System zone user with the following privileges:
126 | - `ISI_PRIV_LOGIN_PAPI`
127 | - `ISI_PRIV_AUTH`
128 | - `ISI_PRIV_HDFS`
129 | - `ISI_PRIV_IFS_BACKUP` (only needed by `isilon_create_directories`)
130 | - `ISI_PRIV_IFS_RESTORE` (only needed by `isilon_create_directories`)
131 | - `$zone`, the name of the access zone on OneFS that will host HDFS
132 | - The System zone should **NOT** be used for HDFS.
133 | - `$dist`, the distribution of Hadoop that will be deployed with OneFS (e.g. CDH, HDP, etc.)
134 | - `$cluster_name`, the name of the Hadoop cluster
135 |
136 | ### Connecting to OneFS via HTTPS
137 |
138 | OneFS ships with a self-signed SSL/TLS certificate by default, and such a certificate will not be verifiable by any well-known certificate authority. If you encounter `CERTIFICATE_VERIFY_FAILED` errors while using IHT, it may be because OneFS is still using the default certificate. To remedy the issue, consider encouraging your OneFS administrator to install a verifiable certificate instead. Alternatively, you may choose to skip certificate verification by using the `--no-verify` option, but do so at your own risk!
139 |
140 | ### Preparing OneFS for Hadoop Deployment
141 |
142 | _Note: This is not meant to be a complete guide to setting up Hadoop with OneFS. If you stumbled upon this page or have not otherwise consulted the appropriate install guide for your distribution, please do so at https://community.emc.com/docs/DOC-61379._
143 |
144 | There are 2 tools in IHT that are meant to assist with the setup of OneFS as HDFS for a Hadoop cluster:
145 | 1. `isilon_create_users`, which creates users and groups that must exist on all hosts in the Hadoop cluster, including OneFS
146 | 2. `isilon_create_directories`, which sets the correct ownership and permissions on directories in HDFS on OneFS
147 |
148 | These tools must be used _in order_ since a user/group must exist before it can own a directory.
149 |
150 | #### `isilon_create_users`
151 |
152 | Using the information from above, an invocation of `isilon_create_users` could look like this:
153 | ``` sh
154 | isilon_create_users --dry \
155 | --onefs-user "$iht_user" \
156 | --zone "$zone" \
157 | --dist "$dist" \
158 | --append-cluster-name "$cluster_name" \
159 | "$onefs"
160 | ```
161 | - Note: `--dry` causes the script to log without executing. Use it to ensure the script will do what you intend before actually doing it.
162 |
163 | If anything goes wrong (e.g. the script stopped because you forgot to give `$iht_user` the `ISI_PRIV_HDFS` privilege), you can safely rerun with the same options. IHT should figure out that some of its job has been done already and work with what it finds.
164 | - If a particular user/group already exists with a particular UID/GID, the ID it already has will be used.
165 | - If a particular UID/GID is already in use by another user/group, IHT will try again with a different, higher ID.
166 | - IHT may **NOT** detect previous runs that used different options.
167 |
168 | ##### Generated Shell Script
169 |
170 | After running `isilon_create_users`, you will find a new file in `$PWD` named like so:
171 | ``` sh
172 | $unix_timestamp-$zone-$dist-$cluster_name.sh
173 | ```
174 |
175 | This script should be copied to and run on all the other hosts in the Hadoop cluster (excluding OneFS).
176 | It will create the same users/groups with the same UIDs/GIDs and memberships as on OneFS using LSB utilities such as `groupadd`, `useradd`, and `usermod`.
177 |
178 | #### `isilon_create_directories`
179 |
180 | Using the information from above, an invocation of `isilon_create_directories` could look like this:
181 | ``` sh
182 | isilon_create_directories --dry \
183 | --onefs-user "$iht_user" \
184 | --zone "$zone" \
185 | --dist "$dist" \
186 | --append-cluster-name "$cluster_name" \
187 | "$onefs"
188 | ```
189 | - Note: `--dry` causes the script to log without executing. Use it to ensure the script will do what you intend before actually doing it.
190 |
191 | If anything goes wrong (e.g. the script stopped because you forgot to run `isilon_create_users` first), you can safely rerun with the same options. IHT should figure out that some of its job has been done already and work with what it finds.
192 | - If a particular directory already exists but does not have the correct ownership or permissions, IHT will correct it.
193 | - If a user/group has been deleted and re-created with a new UID/GID, IHT will adjust ownership accordingly.
194 | - IHT may **NOT** detect previous runs that used different options.
195 |
196 | ## Development
197 |
198 | See the [Contributing Guidelines](https://github.com/Isilon/isilon_hadoop_tools/blob/master/CONTRIBUTING.md) for information on project development.
199 |
--------------------------------------------------------------------------------
/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Isilon/isilon_hadoop_tools/cc9488d5152f15edcc875aada03c2149c1a14cf3/demo.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools ~= 67.5.0", "setuptools-scm[toml] ~= 7.1.0", "wheel ~= 0.38.4"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | # https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#pylint
6 | [tool.pylint.design]
7 | max-args = "7"
8 | [tool.pylint.format]
9 | max-line-length = "100"
10 |
11 | [tool.setuptools_scm]
12 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """Packaging for Isilon Hadoop Tools"""
2 |
3 | import setuptools
4 |
5 | with open("README.md", encoding="utf-8") as readme_file:
6 | README = readme_file.read()
7 |
8 | setuptools.setup(
9 | name="isilon_hadoop_tools",
10 | description="Tools for Using Hadoop with OneFS",
11 | long_description=README,
12 | long_description_content_type="text/markdown",
13 | license="MIT",
14 | url="https://github.com/isilon/isilon_hadoop_tools",
15 | maintainer="Isilon",
16 | maintainer_email="support@isilon.com",
17 | package_dir={"": "src"},
18 | packages=setuptools.find_packages("src"),
19 | include_package_data=True,
20 | python_requires=">=3.7",
21 | install_requires=[
22 | "isi-sdk-7-2 ~= 0.2.11",
23 | "isi-sdk-8-0 ~= 0.2.11",
24 | "isi-sdk-8-0-1 ~= 0.2.11",
25 | "isi-sdk-8-1-0 ~= 0.2.11",
26 | "isi-sdk-8-1-1 ~= 0.2.11",
27 | "isi-sdk-8-2-0 ~= 0.2.11",
28 | "isi-sdk-8-2-1 ~= 0.2.11",
29 | "isi-sdk-8-2-2 ~= 0.2.11",
30 | "requests >= 2.20.0",
31 | "setuptools >= 41.0.0",
32 | "urllib3 >= 1.22.0",
33 | ],
34 | entry_points={
35 | "console_scripts": [
36 | "isilon_create_directories = isilon_hadoop_tools._scripts:isilon_create_directories",
37 | "isilon_create_users = isilon_hadoop_tools._scripts:isilon_create_users",
38 | ],
39 | },
40 | classifiers=[
41 | "Development Status :: 5 - Production/Stable",
42 | "License :: OSI Approved :: MIT License",
43 | "Programming Language :: Python :: 3",
44 | "Programming Language :: Python :: 3.7",
45 | "Programming Language :: Python :: 3.8",
46 | "Programming Language :: Python :: 3.9",
47 | "Programming Language :: Python :: 3.10",
48 | "Programming Language :: Python :: 3.11",
49 | ],
50 | )
51 |
--------------------------------------------------------------------------------
/src/isilon_hadoop_tools/__init__.py:
--------------------------------------------------------------------------------
1 | """Isilon Hadoop Tools"""
2 |
3 |
4 | from pkg_resources import get_distribution
5 |
6 |
7 | __all__ = [
8 | # Constants
9 | "__version__",
10 | # Exceptions
11 | "IsilonHadoopToolError",
12 | ]
13 | __version__ = get_distribution(__name__).version
14 |
15 |
16 | class IsilonHadoopToolError(Exception):
17 | """All Exceptions emitted from this package inherit from this Exception."""
18 |
19 | def __str__(self):
20 | return super().__str__() or repr(self)
21 |
22 | def __repr__(self):
23 | cause = (
24 | f" caused by {self.__cause__!r}" # pylint: disable=no-member
25 | if getattr(self, "__cause__", None)
26 | else ""
27 | )
28 | return f"{super()!r}{cause}"
29 |
--------------------------------------------------------------------------------
/src/isilon_hadoop_tools/_scripts.py:
--------------------------------------------------------------------------------
1 | """Command-line interface for entry points"""
2 |
3 | import logging
4 | import os
5 | import sys
6 | import time
7 |
8 | import urllib3
9 |
10 | import isilon_hadoop_tools
11 | import isilon_hadoop_tools.cli
12 | import isilon_hadoop_tools.directories
13 | import isilon_hadoop_tools.identities
14 |
15 |
16 | DRY_RUN = "Had this been for real, this is what would have happened..."
17 | LOGGER = logging.getLogger(__name__)
18 |
19 |
20 | def base_cli(parser=None):
21 | """Define CLI arguments and options for all entry points."""
22 | if parser is None:
23 | parser = isilon_hadoop_tools.cli.base_cli()
24 | parser.add_argument(
25 | "--append-cluster-name",
26 | help="the cluster name to append on identities",
27 | type=str,
28 | )
29 | parser.add_argument(
30 | "--dist",
31 | help="the Hadoop distribution to be deployed",
32 | choices=("cdh", "cdp", "hdp"),
33 | required=True,
34 | )
35 | parser.add_argument(
36 | "--dry",
37 | help="do a dry run (only logs)",
38 | action="store_true",
39 | default=False,
40 | )
41 | parser.add_argument(
42 | "--version",
43 | action="version",
44 | version=f"%(prog)s v{isilon_hadoop_tools.__version__}",
45 | )
46 | return parser
47 |
48 |
49 | def configure_script(args):
50 | """Logic that applies to all scripts goes here."""
51 | if args.no_verify:
52 | urllib3.disable_warnings()
53 |
54 |
55 | def isilon_create_users_cli(parser=None):
56 | """Define CLI arguments and options for isilon_create_users."""
57 | if parser is None:
58 | parser = base_cli()
59 | parser.add_argument(
60 | "--start-gid",
61 | help="the lowest GID to create a group with",
62 | type=int,
63 | default=isilon_hadoop_tools.identities.Creator.default_start_gid,
64 | )
65 | parser.add_argument(
66 | "--start-uid",
67 | help="the lowest UID to create a user with",
68 | type=int,
69 | default=isilon_hadoop_tools.identities.Creator.default_start_uid,
70 | )
71 | parser.add_argument(
72 | "--user-password",
73 | help="the password for users created",
74 | type=str,
75 | default=None,
76 | )
77 | return parser
78 |
79 |
80 | @isilon_hadoop_tools.cli.catches(isilon_hadoop_tools.IsilonHadoopToolError)
81 | def isilon_create_users(argv=None):
82 | """Execute isilon_create_users commands."""
83 |
84 | if argv is None:
85 | argv = sys.argv[1:]
86 | args = isilon_create_users_cli().parse_args(argv)
87 |
88 | isilon_hadoop_tools.cli.configure_logging(args)
89 | configure_script(args)
90 | onefs = isilon_hadoop_tools.cli.hdfs_client(args)
91 |
92 | identities = {
93 | "cdh": isilon_hadoop_tools.identities.cdh_identities,
94 | "cdp": isilon_hadoop_tools.identities.cdp_identities,
95 | "hdp": isilon_hadoop_tools.identities.hdp_identities,
96 | }[args.dist](args.zone)
97 |
98 | name = "-".join(
99 | [
100 | str(int(time.time())),
101 | args.zone,
102 | args.dist,
103 | ]
104 | )
105 |
106 | if args.append_cluster_name is not None:
107 | suffix = args.append_cluster_name
108 | if not suffix.startswith("-"):
109 | suffix = "-" + suffix
110 | identities = isilon_hadoop_tools.identities.with_suffix_applied(
111 | identities, suffix
112 | )
113 | name += suffix
114 |
115 | onefs_and_files = isilon_hadoop_tools.identities.Creator(
116 | onefs=onefs,
117 | onefs_zone=args.zone,
118 | start_uid=args.start_uid,
119 | start_gid=args.start_gid,
120 | script_path=os.path.join(os.getcwd(), name + ".sh"),
121 | user_password=args.user_password,
122 | )
123 | if args.dry:
124 | LOGGER.info(DRY_RUN)
125 | LOGGER.info(
126 | "A script would have been created at %s.", onefs_and_files.script_path
127 | )
128 | LOGGER.info("The following actions would have populated it and OneFS:")
129 | onefs_and_files.log_identities(identities)
130 | else:
131 | onefs_and_files.create_identities(identities)
132 |
133 |
134 | def isilon_create_directories_cli(parser=None):
135 | """Define CLI arguments and options for isilon_create_directories."""
136 | if parser is None:
137 | parser = base_cli()
138 | return parser
139 |
140 |
141 | @isilon_hadoop_tools.cli.catches(isilon_hadoop_tools.IsilonHadoopToolError)
142 | def isilon_create_directories(argv=None):
143 | """Execute isilon_create_directories commands."""
144 |
145 | if argv is None:
146 | argv = sys.argv[1:]
147 | args = isilon_create_directories_cli().parse_args(argv)
148 |
149 | isilon_hadoop_tools.cli.configure_logging(args)
150 | configure_script(args)
151 | onefs = isilon_hadoop_tools.cli.hdfs_client(args)
152 |
153 | suffix = args.append_cluster_name
154 | if suffix is not None and not suffix.startswith("-"):
155 | suffix = "-" + suffix
156 |
157 | directories = {
158 | "cdh": isilon_hadoop_tools.directories.cdh_directories,
159 | "cdp": isilon_hadoop_tools.directories.cdp_directories,
160 | "hdp": isilon_hadoop_tools.directories.hdp_directories,
161 | }[args.dist](identity_suffix=suffix)
162 |
163 | creator = isilon_hadoop_tools.directories.Creator(
164 | onefs=onefs,
165 | onefs_zone=args.zone,
166 | )
167 | try:
168 | if args.dry:
169 | LOGGER.info(DRY_RUN)
170 | creator.log_directories(directories)
171 | else:
172 | creator.create_directories(directories)
173 | except isilon_hadoop_tools.directories.HDFSRootDirectoryError as exc:
174 | raise isilon_hadoop_tools.cli.CLIError(
175 | f"The HDFS root directory must not be {exc}."
176 | ) from exc
177 |
--------------------------------------------------------------------------------
/src/isilon_hadoop_tools/cli.py:
--------------------------------------------------------------------------------
1 | """This module defines a CLI common to all command-line tools."""
2 |
3 | import argparse
4 | import getpass
5 | import logging
6 |
7 | import isilon_hadoop_tools.onefs
8 |
9 |
10 | __all__ = [
11 | # Decorators
12 | "catches",
13 | # Exceptions
14 | "CLIError",
15 | "HintedError",
16 | # Functions
17 | "base_cli",
18 | "configure_logging",
19 | "hdfs_client",
20 | "logging_cli",
21 | "onefs_cli",
22 | "onefs_client",
23 | ]
24 |
25 | LOGGER = logging.getLogger(__name__)
26 |
27 |
28 | class CLIError(isilon_hadoop_tools.IsilonHadoopToolError):
29 | """All Exceptions emitted from this module inherit from this Exception."""
30 |
31 |
32 | def catches(exception):
33 | """Create a decorator for functions that emit the specified exception."""
34 |
35 | def decorator(func):
36 | """Decorate a function that should catch instances of the specified exception."""
37 |
38 | def decorated(*args, **kwargs):
39 | """Catch instances of a specified exception that are raised from the function."""
40 | try:
41 | return func(*args, **kwargs)
42 | except exception as ex:
43 | logging.error(ex)
44 | return 1
45 |
46 | return decorated
47 |
48 | return decorator
49 |
50 |
51 | def base_cli(parser=None):
52 | """Define common CLI arguments and options."""
53 | if parser is None:
54 | parser = argparse.ArgumentParser(
55 | formatter_class=argparse.ArgumentDefaultsHelpFormatter
56 | )
57 | onefs_cli(parser.add_argument_group("OneFS"))
58 | logging_cli(parser.add_argument_group("Logging"))
59 | return parser
60 |
61 |
62 | def onefs_cli(parser=None):
63 | """Define OneFS CLI arguments and options."""
64 | if parser is None:
65 | parser = argparse.ArgumentParser(
66 | formatter_class=argparse.ArgumentDefaultsHelpFormatter
67 | )
68 | parser.add_argument(
69 | "--zone",
70 | "-z",
71 | help="Specify a OneFS access zone.",
72 | type=str,
73 | required=True,
74 | )
75 | parser.add_argument(
76 | "--no-verify",
77 | help="Do not verify SSL/TLS certificates.",
78 | default=False,
79 | action="store_true",
80 | )
81 | parser.add_argument(
82 | "--onefs-password",
83 | help="Specify the password for --onefs-user.",
84 | type=str,
85 | )
86 | parser.add_argument(
87 | "--onefs-user",
88 | help="Specify the user to connect to OneFS as.",
89 | type=str,
90 | default="root",
91 | )
92 | parser.add_argument(
93 | "onefs_address",
94 | help="Specify an IP address or FQDN/SmartConnect that "
95 | "can be used to connect to and configure OneFS.",
96 | type=str,
97 | )
98 | return parser
99 |
100 |
101 | class HintedError(CLIError):
102 |
103 | """
104 | This exception is used to modify the error message passed to the user
105 | when a common error occurs that has a possible solution the user will likely want.
106 | """
107 |
108 | def __str__(self):
109 | base_str = super().__str__()
110 | return str(getattr(self, "__cause__", None)) + "\nHint: " + base_str
111 |
112 |
113 | def _client_from_onefs_cli(init, args):
114 | try:
115 | return init(
116 | address=args.onefs_address,
117 | username=args.onefs_user,
118 | password=getpass.getpass()
119 | if args.onefs_password is None
120 | else args.onefs_password,
121 | default_zone=args.zone,
122 | verify_ssl=not args.no_verify,
123 | )
124 | except isilon_hadoop_tools.onefs.OneFSCertificateError as exc:
125 | raise HintedError(
126 | "--no-verify can be used to skip certificate verification."
127 | ) from exc
128 | except isilon_hadoop_tools.onefs.MissingLicenseError as exc:
129 | raise CLIError(
130 | (
131 | isilon_hadoop_tools.onefs.APIError.license_expired_error_format
132 | if isinstance(exc, isilon_hadoop_tools.onefs.ExpiredLicenseError)
133 | else isilon_hadoop_tools.onefs.APIError.license_missing_error_format
134 | ).format(exc),
135 | ) from exc
136 | except isilon_hadoop_tools.onefs.MissingZoneError as exc:
137 | raise CLIError(
138 | isilon_hadoop_tools.onefs.APIError.zone_not_found_error_format.format(exc)
139 | ) from exc
140 |
141 |
142 | def hdfs_client(args):
143 | """Get a onefs.Client.for_hdfs from args parsed by onefs_cli."""
144 | return _client_from_onefs_cli(isilon_hadoop_tools.onefs.Client.for_hdfs, args)
145 |
146 |
147 | def onefs_client(args):
148 | """Get a onefs.Client from args parsed by onefs_cli."""
149 | return _client_from_onefs_cli(isilon_hadoop_tools.onefs.Client, args)
150 |
151 |
152 | def logging_cli(parser=None):
153 | """Define logging CLI arguments and options."""
154 | if parser is None:
155 | parser = argparse.ArgumentParser(
156 | formatter_class=argparse.ArgumentDefaultsHelpFormatter
157 | )
158 | parser.add_argument(
159 | "-q",
160 | "--quiet",
161 | default=False,
162 | action="store_true",
163 | help="Supress console output.",
164 | )
165 | parser.add_argument(
166 | "--log-file",
167 | type=str,
168 | help="Specify a path to log to.",
169 | )
170 | parser.add_argument(
171 | "--log-level",
172 | help="Specify how verbose logging should be.",
173 | default="info",
174 | choices=("debug", "info", "warning", "error", "critical"),
175 | )
176 | return parser
177 |
178 |
179 | def configure_logging(args):
180 | """Configure logging for command-line tools."""
181 | logging.getLogger().setLevel(logging.getLevelName(args.log_level.upper()))
182 | if not args.quiet:
183 | console_handler = logging.StreamHandler()
184 | console_handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s"))
185 | logging.getLogger().addHandler(console_handler)
186 | if args.log_file:
187 | logfile_handler = logging.FileHandler(args.log_file)
188 | logfile_handler.setFormatter(
189 | logging.Formatter("[%(asctime)s] %(name)s [%(levelname)s] %(message)s"),
190 | )
191 | logging.getLogger().addHandler(logfile_handler)
192 |
--------------------------------------------------------------------------------
/src/isilon_hadoop_tools/directories.py:
--------------------------------------------------------------------------------
1 | """Define and create directories with appropriate permissions on OneFS."""
2 |
3 | import logging
4 | import posixpath
5 |
6 | import isilon_hadoop_tools.onefs
7 | from isilon_hadoop_tools import IsilonHadoopToolError
8 |
9 | __all__ = [
10 | # Exceptions
11 | "DirectoriesError",
12 | "HDFSRootDirectoryError",
13 | # Functions
14 | "cdh_directories",
15 | "cdp_directories",
16 | "hdp_directories",
17 | # Objects
18 | "Creator",
19 | "HDFSDirectory",
20 | ]
21 |
22 | LOGGER = logging.getLogger(__name__)
23 |
24 |
25 | class DirectoriesError(IsilonHadoopToolError):
26 | """All exceptions emitted from this module inherit from this Exception."""
27 |
28 |
29 | class HDFSRootDirectoryError(DirectoriesError):
30 | """This exception occurs when the HDFS root directory is not set to a usable path."""
31 |
32 |
33 | class Creator:
34 |
35 | """Create directories with appropriate ownership and permissions on OneFS."""
36 |
37 | def __init__(self, onefs, onefs_zone=None):
38 | self.onefs = onefs
39 | self.onefs_zone = onefs_zone
40 |
41 | def create_directories(
42 | self, directories, setup=None, mkdir=None, chmod=None, chown=None
43 | ):
44 | """Create directories on HDFS on OneFS."""
45 | if self.onefs_zone.lower() == "system":
46 | LOGGER.warning("Deploying in the System zone is not recommended.")
47 | sep = posixpath.sep
48 | zone_root = self.onefs.zone_settings(zone=self.onefs_zone)["path"].rstrip(sep)
49 | hdfs_root = self.onefs.hdfs_settings(zone=self.onefs_zone)[
50 | "root_directory"
51 | ].rstrip(sep)
52 | if hdfs_root == zone_root:
53 | LOGGER.warning("The HDFS root is the same as the zone root.")
54 | if hdfs_root == "/ifs":
55 | # The HDFS root requires non-default ownership/permissions,
56 | # and modifying /ifs can break NFS/SMB.
57 | raise HDFSRootDirectoryError(hdfs_root)
58 | assert hdfs_root.startswith(zone_root)
59 | zone_hdfs = hdfs_root[len(zone_root) :]
60 | if setup:
61 | setup(zone_root, hdfs_root, zone_hdfs)
62 | for directory in directories:
63 | path = posixpath.join(zone_hdfs, directory.path.lstrip(posixpath.sep))
64 | LOGGER.info("mkdir '%s%s'", zone_root, path)
65 | try:
66 | (mkdir or self.onefs.mkdir)(path, directory.mode, zone=self.onefs_zone)
67 | except isilon_hadoop_tools.onefs.APIError as exc:
68 | if exc.dir_path_already_exists_error():
69 | LOGGER.warning("%s%s already exists. ", zone_root, path)
70 | else:
71 | raise
72 | LOGGER.info("chmod '%o' '%s%s'", directory.mode, zone_root, path)
73 | (chmod or self.onefs.chmod)(path, directory.mode, zone=self.onefs_zone)
74 | LOGGER.info(
75 | "chown '%s:%s' '%s%s'",
76 | directory.owner,
77 | directory.group,
78 | zone_root,
79 | path,
80 | )
81 | (chown or self.onefs.chown)(
82 | path,
83 | owner=directory.owner,
84 | group=directory.group,
85 | zone=self.onefs_zone,
86 | )
87 |
88 | def log_directories(self, directories):
89 | """Log the actions that would be taken by create_directories."""
90 |
91 | def _pass(*_, **__):
92 | pass
93 |
94 | self.create_directories(
95 | directories, setup=_pass, mkdir=_pass, chmod=_pass, chown=_pass
96 | )
97 |
98 |
99 | class HDFSDirectory: # pylint: disable=too-few-public-methods
100 |
101 | """A Directory on HDFS"""
102 |
103 | def __init__(self, path, owner, group, mode):
104 | self.path = path
105 | self.owner = owner
106 | self.group = group
107 | self.mode = mode
108 |
109 | def apply_identity_suffix(self, suffix):
110 | """Append a suffix to all identities associated with the directory."""
111 | self.owner += suffix
112 | self.group += suffix
113 |
114 |
115 | def cdh_directories(identity_suffix=None):
116 | """Directories needed for Cloudera Distribution including Hadoop"""
117 | directories = [
118 | HDFSDirectory("/", "hdfs", "hadoop", 0o755),
119 | HDFSDirectory("/hbase", "hbase", "hbase", 0o755),
120 | HDFSDirectory("/solr", "solr", "solr", 0o775),
121 | HDFSDirectory("/tmp", "hdfs", "supergroup", 0o1777),
122 | HDFSDirectory("/tmp/hive", "hive", "supergroup", 0o777),
123 | HDFSDirectory("/tmp/logs", "mapred", "hadoop", 0o1777),
124 | HDFSDirectory("/user", "hdfs", "supergroup", 0o755),
125 | HDFSDirectory("/user/flume", "flume", "flume", 0o775),
126 | HDFSDirectory("/user/hdfs", "hdfs", "hdfs", 0o755),
127 | HDFSDirectory("/user/history", "mapred", "hadoop", 0o777),
128 | HDFSDirectory("/user/hive", "hive", "hive", 0o775),
129 | HDFSDirectory("/user/hive/warehouse", "hive", "hive", 0o1777),
130 | HDFSDirectory("/user/hue", "hue", "hue", 0o755),
131 | HDFSDirectory(
132 | "/user/hue/.cloudera_manager_hive_metastore_canary", "hue", "hue", 0o777
133 | ),
134 | HDFSDirectory("/user/impala", "impala", "impala", 0o775),
135 | HDFSDirectory("/user/oozie", "oozie", "oozie", 0o775),
136 | HDFSDirectory("/user/spark", "spark", "spark", 0o751),
137 | HDFSDirectory("/user/spark/applicationHistory", "spark", "spark", 0o1777),
138 | HDFSDirectory("/user/sqoop2", "sqoop2", "sqoop", 0o775),
139 | HDFSDirectory("/user/yarn", "yarn", "yarn", 0o755),
140 | ]
141 | if identity_suffix:
142 | for directory in directories:
143 | directory.apply_identity_suffix(identity_suffix)
144 | return directories
145 |
146 |
147 | def cdp_directories(identity_suffix=None):
148 | """Directories needed for Cloudera Data Platform"""
149 | directories = [
150 | HDFSDirectory("/", "hdfs", "hadoop", 0o755),
151 | HDFSDirectory("/hbase", "hbase", "hbase", 0o755),
152 | HDFSDirectory("/ranger", "hdfs", "supergroup", 0o755),
153 | HDFSDirectory("/ranger/audit", "hdfs", "supergroup", 0o755),
154 | HDFSDirectory("/solr", "solr", "solr", 0o775),
155 | HDFSDirectory("/tmp", "hdfs", "supergroup", 0o1777),
156 | HDFSDirectory("/tmp/hive", "hive", "supergroup", 0o777),
157 | HDFSDirectory("/tmp/logs", "yarn", "hadoop", 0o1777),
158 | HDFSDirectory("/user", "hdfs", "supergroup", 0o755),
159 | HDFSDirectory("/user/flume", "flume", "flume", 0o775),
160 | HDFSDirectory("/user/hdfs", "hdfs", "hdfs", 0o755),
161 | HDFSDirectory("/user/history", "mapred", "hadoop", 0o777),
162 | HDFSDirectory("/user/history/done_intermediate", "mapred", "hadoop", 0o1777),
163 | HDFSDirectory("/user/hive", "hive", "hive", 0o775),
164 | HDFSDirectory("/user/hive/warehouse", "hive", "hive", 0o1777),
165 | HDFSDirectory("/user/hue", "hue", "hue", 0o755),
166 | HDFSDirectory(
167 | "/user/hue/.cloudera_manager_hive_metastore_canary", "hue", "hue", 0o777
168 | ),
169 | HDFSDirectory("/user/impala", "impala", "impala", 0o775),
170 | HDFSDirectory("/user/livy", "livy", "livy", 0o775),
171 | HDFSDirectory("/user/oozie", "oozie", "oozie", 0o775),
172 | HDFSDirectory("/user/spark", "spark", "spark", 0o751),
173 | HDFSDirectory("/user/spark/applicationHistory", "spark", "spark", 0o1777),
174 | HDFSDirectory("/user/spark/spark3ApplicationHistory", "spark", "spark", 0o1777),
175 | HDFSDirectory("/user/spark/driverLogs", "spark", "spark", 0o1777),
176 | HDFSDirectory("/user/spark/driver3Logs", "spark", "spark", 0o1777),
177 | HDFSDirectory("/user/sqoop", "sqoop", "sqoop", 0o775),
178 | HDFSDirectory("/user/sqoop2", "sqoop2", "sqoop", 0o775),
179 | HDFSDirectory("/user/tez", "hdfs", "supergroup", 0o775),
180 | HDFSDirectory("/user/yarn", "hdfs", "supergroup", 0o775),
181 | HDFSDirectory("/user/yarn/mapreduce", "hdfs", "supergroup", 0o775),
182 | HDFSDirectory("/user/yarn/mapreduce/mr-framework", "yarn", "hadoop", 0o775),
183 | HDFSDirectory("/user/yarn/services", "hdfs", "supergroup", 0o775),
184 | HDFSDirectory("/user/yarn/services/service-framework", "hdfs", "supergroup", 0o775),
185 | HDFSDirectory("/user/zeppelin", "zeppelin", "zeppelin", 0o775),
186 | HDFSDirectory("/warehouse", "hdfs", "supergroup", 0o775),
187 | HDFSDirectory("/warehouse/tablespace", "hdfs", "supergroup", 0o775),
188 | HDFSDirectory("/warehouse/tablespace/external", "hdfs", "supergroup", 0o775),
189 | HDFSDirectory("/warehouse/tablespace/managed", "hdfs", "supergroup", 0o775),
190 | HDFSDirectory("/warehouse/tablespace/external/hive", "hive", "hive", 0o1775),
191 | HDFSDirectory("/warehouse/tablespace/managed/hive", "hive", "hive", 0o1775),
192 | HDFSDirectory("/yarn", "yarn", "yarn", 0o700),
193 | HDFSDirectory("/yarn/node-labels", "yarn", "yarn", 0o700),
194 | ]
195 | if identity_suffix:
196 | for directory in directories:
197 | directory.apply_identity_suffix(identity_suffix)
198 | return directories
199 |
200 |
201 | def hdp_directories(identity_suffix=None):
202 | """Directories needed for Hortonworks Data Platform"""
203 | directories = [
204 | HDFSDirectory("/", "hdfs", "hadoop", 0o755),
205 | HDFSDirectory("/app-logs", "yarn", "hadoop", 0o1777),
206 | HDFSDirectory("/app-logs/ambari-qa", "ambari-qa", "hadoop", 0o770),
207 | HDFSDirectory("/app-logs/ambari-qa/logs", "ambari-qa", "hadoop", 0o770),
208 | HDFSDirectory("/apps", "hdfs", "hadoop", 0o755),
209 | HDFSDirectory("/apps/accumulo", "accumulo", "hadoop", 0o750),
210 | HDFSDirectory("/apps/falcon", "falcon", "hdfs", 0o777),
211 | HDFSDirectory("/apps/hbase", "hdfs", "hadoop", 0o755),
212 | HDFSDirectory("/apps/hbase/data", "hbase", "hadoop", 0o775),
213 | HDFSDirectory("/apps/hbase/staging", "hbase", "hadoop", 0o711),
214 | HDFSDirectory("/apps/hive", "hdfs", "hdfs", 0o755),
215 | HDFSDirectory("/apps/hive/warehouse", "hive", "hdfs", 0o777),
216 | HDFSDirectory("/apps/tez", "tez", "hdfs", 0o755),
217 | HDFSDirectory("/apps/webhcat", "hcat", "hdfs", 0o755),
218 | HDFSDirectory("/ats", "yarn", "hdfs", 0o755),
219 | HDFSDirectory("/ats/done", "yarn", "hdfs", 0o775),
220 | HDFSDirectory("/atsv2", "yarn-ats", "hadoop", 0o755),
221 | HDFSDirectory("/mapred", "mapred", "hadoop", 0o755),
222 | HDFSDirectory("/mapred/system", "mapred", "hadoop", 0o755),
223 | HDFSDirectory("/system", "yarn", "hadoop", 0o755),
224 | HDFSDirectory("/system/yarn", "yarn", "hadoop", 0o755),
225 | HDFSDirectory("/system/yarn/node-labels", "yarn", "hadoop", 0o700),
226 | HDFSDirectory("/tmp", "hdfs", "hdfs", 0o1777),
227 | HDFSDirectory("/tmp/hive", "ambari-qa", "hdfs", 0o777),
228 | HDFSDirectory("/user", "hdfs", "hdfs", 0o755),
229 | HDFSDirectory("/user/ambari-qa", "ambari-qa", "hdfs", 0o770),
230 | HDFSDirectory("/user/hcat", "hcat", "hdfs", 0o755),
231 | HDFSDirectory("/user/hdfs", "hdfs", "hdfs", 0o755),
232 | HDFSDirectory("/user/hive", "hive", "hdfs", 0o700),
233 | HDFSDirectory("/user/hue", "hue", "hue", 0o755),
234 | HDFSDirectory("/user/oozie", "oozie", "hdfs", 0o775),
235 | HDFSDirectory("/user/yarn", "yarn", "hdfs", 0o755),
236 | ]
237 | if identity_suffix:
238 | for directory in directories:
239 | directory.apply_identity_suffix(identity_suffix)
240 | return directories
241 |
--------------------------------------------------------------------------------
/src/isilon_hadoop_tools/identities.py:
--------------------------------------------------------------------------------
1 | """Define and create necessary Hadoop users and groups on OneFS."""
2 |
3 | import logging
4 | import os
5 |
6 | import isilon_hadoop_tools.onefs
7 |
8 |
9 | __all__ = [
10 | # Functions
11 | "cdh_identities",
12 | "cdp_identities",
13 | "hdp_identities",
14 | "iterate_identities",
15 | "log_identities",
16 | "with_suffix_applied",
17 | # Objects
18 | "Creator",
19 | ]
20 |
21 | ENCODING = "utf-8"
22 | LOGGER = logging.getLogger(__name__)
23 |
24 |
25 | def _log_create_group(group_name):
26 | LOGGER.info("Create %s group.", group_name)
27 |
28 |
29 | def _log_create_user(user_name, pgroup_name):
30 | LOGGER.info("Create %s:%s user.", user_name, pgroup_name)
31 |
32 |
33 | def _log_add_user_to_group(user_name, group_name):
34 | LOGGER.info("Add %s user to %s group.", user_name, group_name)
35 |
36 |
37 | def _log_create_proxy_user(proxy_user_name, members):
38 | LOGGER.info(
39 | "Create %s proxy user with the following members: %s.",
40 | proxy_user_name,
41 | ", ".join(
42 | f"{member_name} ({member_type})" for member_name, member_type in members
43 | ),
44 | )
45 |
46 |
47 | class Creator:
48 |
49 | """
50 | Create users and groups with contiguous IDs on OneFS
51 | and in a local user/group creation script for Linux.
52 | """
53 |
54 | # pylint: disable=logging-format-interpolation
55 |
56 | default_start_uid = 1025
57 | default_start_gid = 1025
58 |
59 | def __init__( # pylint: disable=too-many-arguments
60 | self,
61 | onefs,
62 | onefs_zone=None,
63 | start_uid=default_start_uid,
64 | start_gid=default_start_gid,
65 | script_path=None,
66 | user_password=None,
67 | ):
68 | self.onefs = onefs
69 | self.onefs_zone = onefs_zone
70 | self._next_uid = start_uid
71 | self._next_gid = start_gid
72 | self.script_path = script_path
73 | self.user_password = user_password
74 |
75 | @property
76 | def next_gid(self):
77 | """Get the next monotonically-increasing GID (begins at start_gid)."""
78 | try:
79 | return self._next_gid
80 | finally:
81 | self._next_gid += 1
82 |
83 | @property
84 | def next_uid(self):
85 | """Get the next monotonically-increasing UID (begins at start_uid)."""
86 | try:
87 | return self._next_uid
88 | finally:
89 | self._next_uid += 1
90 |
91 | def add_user_to_group(self, user_name, group_name):
92 | """Add a user to a group on OneFS and in the local group-creation script."""
93 | try:
94 | LOGGER.info(
95 | "Adding the %s user to the %s group on %s...",
96 | user_name,
97 | group_name,
98 | self.onefs.address,
99 | )
100 | self.onefs.add_user_to_group(
101 | user_name=user_name,
102 | group_name=group_name,
103 | zone=self.onefs_zone,
104 | )
105 | except isilon_hadoop_tools.onefs.APIError as exc:
106 | uid = self.onefs.uid_of_user(user_name=user_name, zone=self.onefs_zone)
107 | if exc.user_already_in_group_error(uid, group_name):
108 | LOGGER.warning(
109 | exc.user_already_in_group_error_format.format(uid, group_name)
110 | )
111 | else:
112 | raise
113 | if self.script_path:
114 | self._create_script()
115 | LOGGER.info(
116 | "Adding the %s user to the %s group in %s...",
117 | user_name,
118 | group_name,
119 | self.script_path,
120 | )
121 | with open(self.script_path, "a", encoding=ENCODING) as script_file:
122 | script_file.write(f"usermod -a -G {group_name} {user_name}\n")
123 |
124 | def create_group(self, group_name):
125 | """Create a group on OneFS and in the local script."""
126 | while True:
127 | try:
128 | gid = self.next_gid
129 | LOGGER.info(
130 | "Creating the %s group with GID %s on %s...",
131 | group_name,
132 | gid,
133 | self.onefs.address,
134 | )
135 | self.onefs.create_group(name=group_name, gid=gid, zone=self.onefs_zone)
136 | break
137 | except isilon_hadoop_tools.onefs.APIError as exc:
138 | if exc.gid_already_exists_error(gid):
139 | LOGGER.warning(exc.gid_already_exists_error_format.format(gid))
140 | continue
141 | if exc.group_already_exists_error(group_name):
142 | LOGGER.warning(
143 | exc.group_already_exists_error_format.format(group_name)
144 | )
145 | gid = self.onefs.gid_of_group(
146 | group_name=group_name, zone=self.onefs_zone
147 | )
148 | break
149 | raise
150 | if self.script_path:
151 | self._create_script()
152 | LOGGER.info(
153 | "Creating the %s group with GID %s in %s...",
154 | group_name,
155 | gid,
156 | self.script_path,
157 | )
158 | with open(self.script_path, "a", encoding=ENCODING) as script_file:
159 | script_file.write(f"groupadd --gid {gid} {group_name}\n")
160 | return gid
161 |
162 | def create_identities(
163 | self,
164 | identities,
165 | create_group=None,
166 | create_user=None,
167 | add_user_to_group=None,
168 | create_proxy_user=None,
169 | _flush_auth_cache=None,
170 | _create_script=None,
171 | ):
172 | """Create identities on OneFS and in the local script."""
173 | if self.onefs_zone.lower() == "system":
174 | LOGGER.warning("Deploying in the System zone is not recommended.")
175 | if self.script_path:
176 | LOGGER.info("Creating %s...", self.script_path)
177 | (_create_script or self._create_script)()
178 | iterate_identities(
179 | identities,
180 | create_group=create_group or self.create_group,
181 | create_user=create_user or self.create_user,
182 | add_user_to_group=add_user_to_group or self.add_user_to_group,
183 | create_proxy_user=create_proxy_user or self.create_proxy_user,
184 | )
185 | LOGGER.info("Flushing the auth cache...")
186 | (_flush_auth_cache or self.onefs.flush_auth_cache)()
187 |
188 | def log_identities(self, identities):
189 | """Log the actions that would be taken by create_identities."""
190 | self.create_identities(
191 | identities,
192 | create_group=_log_create_group,
193 | create_user=_log_create_user,
194 | add_user_to_group=_log_add_user_to_group,
195 | create_proxy_user=_log_create_proxy_user,
196 | _flush_auth_cache=lambda: None,
197 | _create_script=lambda: None,
198 | )
199 |
200 | def create_proxy_user(self, proxy_user_name, members):
201 | """Create a proxy user on OneFS."""
202 | try:
203 | LOGGER.info(
204 | "Creating the %s proxy user with the following members: %s...",
205 | proxy_user_name,
206 | ", ".join(
207 | f"{member_name} ({member_type})"
208 | for member_name, member_type in members
209 | ),
210 | )
211 | self.onefs.create_hdfs_proxy_user(
212 | name=proxy_user_name,
213 | members=members,
214 | zone=self.onefs_zone,
215 | )
216 | except isilon_hadoop_tools.onefs.APIError as exc:
217 | if exc.proxy_user_already_exists_error(proxy_user_name=proxy_user_name):
218 | LOGGER.warning(
219 | exc.proxy_user_already_exists_error_format.format(proxy_user_name)
220 | )
221 | return
222 | raise
223 |
224 | def _create_script(self):
225 | if not os.path.exists(self.script_path):
226 | with open(self.script_path, "w", encoding=ENCODING) as script_file:
227 | script_file.write("#!/usr/bin/env sh\n")
228 | script_file.write("set -o errexit\n")
229 | script_file.write("set -o xtrace\n")
230 |
231 | def create_user(self, user_name, primary_group_name):
232 | """Create a user on OneFS and in the local script."""
233 | while True:
234 | try:
235 | uid = self.next_uid
236 | LOGGER.info(
237 | "Creating the %s user with UID %s on %s...",
238 | user_name,
239 | uid,
240 | self.onefs.address,
241 | )
242 | self.onefs.create_user(
243 | name=user_name,
244 | uid=uid,
245 | primary_group_name=primary_group_name,
246 | zone=self.onefs_zone,
247 | enabled=True,
248 | password=self.user_password,
249 | )
250 | break
251 | except isilon_hadoop_tools.onefs.APIError as exc:
252 | if exc.uid_already_exists_error(uid):
253 | LOGGER.warning(exc.uid_already_exists_error_format.format(uid))
254 | continue
255 | if exc.user_already_exists_error(user_name):
256 | LOGGER.warning(
257 | exc.user_already_exists_error_format.format(user_name)
258 | )
259 | uid = self.onefs.uid_of_user(
260 | user_name=user_name, zone=self.onefs_zone
261 | )
262 | break
263 | raise
264 | if self.script_path:
265 | self._create_script()
266 | LOGGER.info(
267 | "Creating the %s user with UID %s in %s...",
268 | user_name,
269 | uid,
270 | self.script_path,
271 | )
272 | gid = self.onefs.gid_of_group(
273 | group_name=self.onefs.primary_group_of_user(
274 | user_name=user_name,
275 | zone=self.onefs_zone,
276 | ),
277 | zone=self.onefs_zone,
278 | )
279 | with open(self.script_path, "a", encoding=ENCODING) as script_file:
280 | script_file.write(f"useradd --uid {uid} --gid {gid} {user_name}\n")
281 | return uid
282 |
283 |
284 | def iterate_identities(
285 | identities,
286 | create_group,
287 | create_user,
288 | add_user_to_group,
289 | create_proxy_user,
290 | ):
291 | """Iterate over all groups, users, and proxy users in creation-order."""
292 |
293 | created_group_names = set()
294 | for group_name in identities["groups"]:
295 | if group_name not in created_group_names:
296 | create_group(group_name)
297 | created_group_names.add(group_name)
298 |
299 | for user_name, (pgroup_name, sgroup_names) in identities["users"].items():
300 | for group_name in sgroup_names.union({pgroup_name}):
301 | if group_name not in created_group_names:
302 | create_group(group_name)
303 | created_group_names.add(group_name)
304 | create_user(user_name, pgroup_name)
305 | for group_name in sgroup_names:
306 | add_user_to_group(user_name, group_name)
307 |
308 | for proxy_user_name, members in identities["proxy_users"].items():
309 | create_proxy_user(proxy_user_name, members)
310 |
311 |
312 | def log_identities(identities):
313 | """Iterate identities in creation-order and log the actions that would be taken."""
314 | iterate_identities(
315 | identities,
316 | create_group=_log_create_group,
317 | create_user=_log_create_user,
318 | add_user_to_group=_log_add_user_to_group,
319 | create_proxy_user=_log_create_proxy_user,
320 | )
321 |
322 |
323 | def with_suffix_applied(
324 | identities,
325 | suffix,
326 | applicator=lambda identity, suffix: identity + suffix,
327 | ):
328 | """Append a suffix to all identities."""
329 | return {
330 | "groups": {
331 | applicator(group_name, suffix) for group_name in identities["groups"]
332 | },
333 | "users": {
334 | applicator(user_name, suffix): (
335 | applicator(pgroup_name, suffix),
336 | {applicator(sgroup_name, suffix) for sgroup_name in sgroup_names},
337 | )
338 | for user_name, (pgroup_name, sgroup_names) in identities["users"].items()
339 | },
340 | "proxy_users": {
341 | applicator(proxy_user_name, suffix): {
342 | (applicator(member_name, suffix), member_type)
343 | for member_name, member_type in members
344 | }
345 | for proxy_user_name, members in identities["proxy_users"].items()
346 | },
347 | }
348 |
349 |
350 | def cdh_identities(zone):
351 | """Identities needed for Cloudera Distribution including Hadoop"""
352 | smoke_user = ("cloudera-scm", "user")
353 | identities = {
354 | "groups": set(), # Groups with no users in them.
355 | "users": {
356 | "accumulo": ("accumulo", set()),
357 | "anonymous": ("anonymous", set()),
358 | "apache": ("apache", set()),
359 | "cloudera-scm": ("cloudera-scm", set()),
360 | "cmjobuser": ("cmjobuser", set()),
361 | "flume": ("flume", set()),
362 | "hbase": ("hbase", {"hadoop", "supergroup"}),
363 | "hdfs": ("hdfs", {"hadoop", "supergroup"}),
364 | "hive": ("hive", set()),
365 | "HTTP": ("HTTP", {"hadoop", "supergroup"}),
366 | "httpfs": ("httpfs", set()),
367 | "hue": ("hue", set()),
368 | "impala": ("impala", {"hive"}),
369 | "kafka": ("kafka", set()),
370 | "keytrustee": ("keytrustee", set()),
371 | "kms": ("kms", set()),
372 | "kudu": ("kudu", set()),
373 | "llama": ("llama", set()),
374 | "mapred": ("mapred", {"hadoop", "supergroup"}),
375 | "oozie": ("oozie", set()),
376 | "sentry": ("sentry", set()),
377 | "solr": ("solr", set()),
378 | "spark": ("spark", set()),
379 | "sqoop": ("sqoop", {"sqoop2"}),
380 | "sqoop2": ("sqoop2", {"sqoop"}),
381 | "yarn": ("yarn", {"hadoop", "supergroup"}),
382 | "zookeeper": ("zookeeper", set()),
383 | },
384 | "proxy_users": {
385 | "flume": {smoke_user, ("hadoop", "group")},
386 | "hive": {smoke_user, ("hadoop", "group")},
387 | "HTTP": {smoke_user},
388 | "hue": {smoke_user, ("hadoop", "group")},
389 | "impala": {smoke_user, ("hadoop", "group")},
390 | "mapred": {smoke_user, ("hadoop", "group")},
391 | "oozie": {smoke_user, ("hadoop", "group")},
392 | },
393 | }
394 | if zone.lower() != "system":
395 | identities["users"]["admin"] = ("admin", set())
396 | return identities
397 |
398 |
399 | def cdp_identities(zone):
400 | """Identities needed for Cloudera Data Platform"""
401 | smoke_user = ("cloudera-scm", "user")
402 | identities = {
403 | "groups": set(), # Groups with no users in them.
404 | "users": {
405 | "accumulo": ("accumulo", set()),
406 | "anonymous": ("anonymous", set()),
407 | "apache": ("apache", set()),
408 | "atlas": ("atlas", {"hadoop", "supergroup"}),
409 | "cloudera-scm": ("cloudera-scm", set()),
410 | "cmjobuser": ("cmjobuser", set()),
411 | "cruisecontrol": ("cruisecontrol", set()),
412 | "druid": ("druid", {"hadoop", "supergroup"}),
413 | "flume": ("flume", set()),
414 | "hbase": ("hbase", {"hadoop", "supergroup"}),
415 | "hdfs": ("hdfs", {"hadoop", "supergroup"}),
416 | "hive": ("hive", set()),
417 | "HTTP": ("HTTP", {"hadoop", "supergroup"}),
418 | "httpfs": ("httpfs", set()),
419 | "hue": ("hue", set()),
420 | "impala": ("impala", {"hive"}),
421 | "kafka": ("kafka", set()),
422 | "keyadmin": ("keyadmin", set()),
423 | "keytrustee": ("keytrustee", set()),
424 | "kms": ("kms", set()),
425 | "knox": ("knox", set()),
426 | "knoxui": ("knoxui", set()),
427 | "kudu": ("kudu", set()),
428 | "llama": ("llama", set()),
429 | "livy": ("livy", set()),
430 | "mapred": ("mapred", {"hadoop", "supergroup"}),
431 | "nifi": ("nifi", set()),
432 | "nifiregistry": ("nifiregistry", set()),
433 | "oozie": ("oozie", set()),
434 | "phoenix": ("phoenix", set()),
435 | "ranger": ("ranger", {"hadoop", "supergroup"}),
436 | "rangeradmin": ("rangeradmin", set()),
437 | "rangerlookup": ("rangerlookup", set()),
438 | "rangerraz": ("rangerraz", set()),
439 | "rangerrms": ("rangerrms", set()),
440 | "rangertagsync": ("rangertagsync", set()),
441 | "rangerusersync": ("rangerusersync", set()),
442 | "schemaregistry": ("schemaregistry", set()),
443 | "sentry": ("sentry", set()),
444 | "solr": ("solr", set()),
445 | "spark": ("spark", set()),
446 | "sqoop": ("sqoop", {"sqoop2"}),
447 | "sqoop2": ("sqoop2", {"sqoop"}),
448 | "streamsmsgmgr": ("streamsmsgmgr", set()),
449 | "tez": ("tez", set()),
450 | "superset": ("superset", set()),
451 | "yarn": ("yarn", {"hadoop", "supergroup"}),
452 | "zeppelin": ("zeppelin", set()),
453 | "zookeeper": ("zookeeper", set()),
454 | },
455 | "proxy_users": {
456 | "flume": {smoke_user, ("hadoop", "group")},
457 | "hive": {smoke_user, ("hadoop", "group")},
458 | "hue": {smoke_user, ("hadoop", "group")},
459 | "impala": {smoke_user, ("hadoop", "group")},
460 | "mapred": {smoke_user, ("hadoop", "group")},
461 | "oozie": {smoke_user, ("hadoop", "group")},
462 | "phoenix": {smoke_user, ("hadoop", "group")},
463 | "yarn": {smoke_user, ("hadoop", "group")},
464 | "knox": {smoke_user, ("hadoop", "group")},
465 | "hdfs": {smoke_user, ("hadoop", "group")},
466 | "livy": {smoke_user, ("hadoop", "group")},
467 | "HTTP": {smoke_user},
468 | },
469 | }
470 | if zone.lower() != "system":
471 | identities["users"]["admin"] = ("admin", set())
472 | return identities
473 |
474 |
475 | def hdp_identities(zone):
476 | """Identities needed for Hortonworks Data Platform"""
477 | smoke_user = ("ambari-qa", "user")
478 | identities = {
479 | "groups": set(), # Groups with no users in them.
480 | "users": {
481 | "accumulo": ("accumulo", {"hadoop"}),
482 | "activity_analyzer": ("activity_analyzer", {"hadoop"}),
483 | "activity_explorer": ("activity_explorer", {"hadoop"}),
484 | "ambari-qa": ("ambari-qa", {"hadoop"}),
485 | "ambari-server": ("ambari-server", {"hadoop"}),
486 | "ams": ("ams", {"hadoop"}),
487 | "anonymous": ("anonymous", set()),
488 | "atlas": ("atlas", {"hadoop"}),
489 | "druid": ("druid", {"hadoop"}),
490 | "falcon": ("falcon", {"hadoop"}),
491 | "flume": ("flume", {"hadoop"}),
492 | "gpadmin": ("gpadmin", {"hadoop"}),
493 | "hadoopqa": ("hadoopqa", {"hadoop"}),
494 | "hbase": ("hbase", {"hadoop"}),
495 | "hcat": ("hcat", {"hadoop"}),
496 | "hdfs": ("hdfs", {"hadoop"}),
497 | "hive": ("hive", {"hadoop"}),
498 | "HTTP": ("HTTP", {"hadoop"}),
499 | "hue": ("hue", {"hadoop"}),
500 | "infra-solr": ("infra-solr", {"hadoop"}),
501 | "kafka": ("kafka", {"hadoop"}),
502 | "keyadmin": ("keyadmin", {"hadoop"}),
503 | "kms": ("kms", {"hadoop"}),
504 | "knox": ("knox", {"hadoop"}),
505 | "livy": ("livy", {"hadoop"}),
506 | "logsearch": ("logsearch", {"hadoop"}),
507 | "mahout": ("mahout", {"hadoop"}),
508 | "mapred": ("mapred", {"hadoop"}),
509 | "oozie": ("oozie", {"hadoop"}),
510 | "ranger": ("ranger", {"hadoop"}),
511 | "rangerlookup": ("rangerlookup", {"hadoop"}),
512 | "spark": ("spark", {"hadoop"}),
513 | "sqoop": ("sqoop", {"hadoop"}),
514 | "storm": ("storm", {"hadoop"}),
515 | "tez": ("tez", {"hadoop"}),
516 | "tracer": ("tracer", {"hadoop"}),
517 | "yarn": ("yarn", {"hadoop"}),
518 | "yarn-ats": ("yarn-ats", {"hadoop"}),
519 | "yarn-ats-hbase": ("yarn-ats-hbase", {"hadoop"}),
520 | "zeppelin": ("zeppelin", {"hadoop"}),
521 | "zookeeper": ("zookeeper", {"hadoop"}),
522 | },
523 | "proxy_users": {
524 | "ambari-server": {smoke_user},
525 | "flume": {smoke_user, ("hadoop", "group")},
526 | "hbase": {smoke_user, ("hadoop", "group")},
527 | "hcat": {smoke_user, ("hadoop", "group")},
528 | "hive": {smoke_user, ("hadoop", "group")},
529 | "HTTP": {smoke_user},
530 | "knox": {smoke_user},
531 | "livy": {smoke_user, ("hadoop", "group")},
532 | "oozie": {smoke_user, ("hadoop", "group")},
533 | "yarn": {smoke_user, ("hadoop", "group")},
534 | },
535 | }
536 | if zone.lower() != "system":
537 | identities["users"]["admin"] = ("admin", set())
538 | return identities
539 |
--------------------------------------------------------------------------------
/src/isilon_hadoop_tools/onefs.py:
--------------------------------------------------------------------------------
1 | """Classes for Interacting with OneFS"""
2 |
3 | # pylint: disable=too-many-lines
4 |
5 | from datetime import date, datetime
6 | from enum import Enum
7 | import json
8 | import logging
9 | import posixpath
10 | import socket
11 | import struct
12 | import time
13 | from urllib.parse import urlparse, urlunparse # Python 3
14 |
15 | import requests
16 | import urllib3
17 |
18 | from isilon_hadoop_tools import IsilonHadoopToolError
19 |
20 |
21 | __all__ = [
22 | # Constants / Enums
23 | "ONEFS_RELEASES",
24 | "OneFSFeature",
25 | # Decorators
26 | "accesses_onefs",
27 | # Exceptions
28 | "APIError",
29 | "ExpiredLicenseError",
30 | "MalformedAPIError",
31 | "MissingLicenseError",
32 | "MissingZoneError",
33 | "MixedModeError",
34 | "NonSDKAPIError",
35 | "OneFSError",
36 | "OneFSConnectionError",
37 | "OneFSCertificateError",
38 | "OneFSValueError",
39 | "UndecodableAPIError",
40 | "UndeterminableVersion",
41 | "UnsupportedOperation",
42 | "UnsupportedVersion",
43 | # Functions
44 | "sdk_for_revision",
45 | # Objects
46 | "Client",
47 | ]
48 |
49 | LOGGER = logging.getLogger(__name__)
50 | ONEFS_RELEASES = {
51 | "7.2.0.0": 0x70200500000000A,
52 | "8.0.0.0": 0x800005000000025,
53 | "8.0.0.4": 0x800005000400035,
54 | "8.0.1.0": 0x800015000000007,
55 | "8.0.1.1": 0x800015000100070,
56 | "8.1.0.0": 0x80100500000000B,
57 | "8.1.1.0": 0x8010150000000D4,
58 | "8.1.2.0": 0x801025000000010,
59 | "8.1.3.0": 0x80103500000000D,
60 | "8.2.0.0": 0x80200500000000B,
61 | "8.2.1.0": 0x802015000000004,
62 | "8.2.2.0": 0x802025000000007,
63 | "8.2.3.0": 0x802035000000000,
64 | }
65 | REQUEST_TIMEOUT = 60 * 3 # seconds
66 |
67 |
68 | class OneFSFeature(Enum):
69 |
70 | """OneFS Feature Flags for Use with Client.feature_is_supported"""
71 |
72 | # These values come from sys/sys/isi_upgrade_api_featuremap.h.
73 | # For example,
74 | # ISI_UPGRADE_API_FEATURE_VERSION(PIPE_UAPI_OVERRIDES, 8, 1, 3, 0)
75 | # translates to
76 | # PIPE_UAPI_OVERRIDES = (0x8010300, 0)
77 |
78 | _GEN = {
79 | "INIT": 0x0000000,
80 | "JAWS": 0x7010100,
81 | "MOBY": 0x7020000,
82 | "ORCA": 0x7020100,
83 | "RIP0": 0x7030000,
84 | "RIP1": 0x7030100,
85 | "RIPT": 0x8000000,
86 | "HAPI": 0x8000100,
87 | "FRTR": 0x8010000,
88 | "NJMA": 0x8010100,
89 | "KANA": 0x8010200,
90 | "NDUU": 0x8010300,
91 | "PIPE": 0x8020000,
92 | "ERA1": 0x9000100,
93 | }
94 |
95 | # pylint: disable=invalid-name
96 | FOREVER = (_GEN["INIT"], 0)
97 |
98 | JAWS_RU = (_GEN["JAWS"], 0)
99 |
100 | MOBY_PROTECTION = (_GEN["MOBY"], 0)
101 | MOBY_SNAPDELETE = (_GEN["MOBY"], 1)
102 | MOBY_RU = (_GEN["MOBY"], 2)
103 | MOBY_UNFS = (_GEN["MOBY"], 3)
104 | MOBY_AUTH_UPGRADE = (_GEN["MOBY"], 4)
105 |
106 | ORCA_RU = (_GEN["ORCA"], 0)
107 | RIPT_CONSISTENT_HASH = (_GEN["ORCA"], 1)
108 |
109 | RIPT_RBM_VERSIONING = (_GEN["RIP0"], 0)
110 |
111 | BATCH_ERROR_DSR = (_GEN["RIP1"], 1)
112 | RIPTIDE_MEDIASCAN = (_GEN["RIP1"], 2)
113 | RIPT_8K_INODES = (_GEN["RIP1"], 3)
114 |
115 | RIPT_DEDUPE = (_GEN["RIPT"], 0)
116 | RIPTIDE_TRUNCATE = (_GEN["RIPT"], 1)
117 | RIPTIDE_CHANGELISTCREATE = (_GEN["RIPT"], 2)
118 | RIPT_GMP_SERVICES = (_GEN["RIPT"], 3)
119 | RIPT_NLM = (_GEN["RIPT"], 4)
120 | RIPTIDE_FSA = (_GEN["RIPT"], 5)
121 | RIPT_SMARTPOOLS = (_GEN["RIPT"], 6)
122 | RIPT_AUTH_UPGRADE = (_GEN["RIPT"], 7)
123 | RIPT_CELOG_UPGRADE = (_GEN["RIPT"], 8)
124 |
125 | HALFPIPE_PARTITIONED_PERFORMANCE = (_GEN["HAPI"], 0)
126 | HP_JE = (_GEN["HAPI"], 1)
127 | HP_WORM = (_GEN["HAPI"], 2)
128 | HP_PROXY = (_GEN["HAPI"], 3)
129 | HALFPIPE_CONTAINERS = (_GEN["HAPI"], 4)
130 | HP_NEEDS_NDU_FLAG = (_GEN["HAPI"], 5)
131 | HP_RANGER = (_GEN["HAPI"], 6)
132 | HP_AMBARI_METRICS = (_GEN["HAPI"], 7)
133 | HP_DATANODE_WIRE_ENCRYPTION = (_GEN["HAPI"], 8)
134 |
135 | FT_SMARTPOOLS = (_GEN["FRTR"], 0)
136 | FRT_MIRRORED_JOURNAL = (_GEN["FRTR"], 1)
137 | FRT_LIN_SUPER_DRIVE_QUORUM = (_GEN["FRTR"], 2)
138 | FREIGHT_TRAINS_LAYOUT = (_GEN["FRTR"], 3)
139 | FT_ESRS = (_GEN["FRTR"], 4)
140 | FRT_COMPRESSED_INODES = (_GEN["FRTR"], 5)
141 | FTR_LICENSE_MIGRATION = (_GEN["FRTR"], 6)
142 |
143 | PIPE_ITER_MARK = (_GEN["NJMA"], 0)
144 | NIIJIMA_CPOOL_GOOGLE_XML = (_GEN["NJMA"], 1)
145 | NJMA_HDFS_INOTIFY = (_GEN["NJMA"], 2)
146 | NJMA_HDFS_FSIMAGE = (_GEN["NJMA"], 3)
147 | NIIJIMA_CLUSTER_TIME = (_GEN["NJMA"], 4)
148 | NIIJIMA_SMB = (_GEN["NJMA"], 5)
149 | NIIJIMA_ESRS = (_GEN["NJMA"], 6)
150 |
151 | KANA_HDFS_REF_BY_INODE = (_GEN["KANA"], 0)
152 | KANA_WEBHDFS_DELEGATION_TOKENS = (_GEN["KANA"], 1)
153 |
154 | PIPE_UAPI_OVERRIDES = (_GEN["NDUU"], 0)
155 |
156 | PIPE_AUTH_AWS_V4 = (_GEN["PIPE"], 0)
157 | PIPE_HANGDUMP = (_GEN["PIPE"], 1)
158 | PIPE_GMP_CFG_GEN = (_GEN["PIPE"], 2)
159 | PIPE_EXT_GROUP = (_GEN["PIPE"], 3)
160 | PIPE_EXT_GROUP_MSG = (_GEN["PIPE"], 4)
161 | PIPE_JE = (_GEN["PIPE"], 5)
162 | PIPE_IFS_DOMAINS = (_GEN["PIPE"], 6)
163 | PIPE_CPOOL_SECURE_KEY = (_GEN["PIPE"], 7)
164 | PIPE_CPOOL_C2S = (_GEN["PIPE"], 8)
165 | PIPE_ISI_CERTS = (_GEN["PIPE"], 9)
166 | PIPE_NDMP = (_GEN["PIPE"], 10)
167 | PIPE_ZONED_ROLES = (_GEN["PIPE"], 11)
168 | FT_JE_ZOMBIE = (_GEN["PIPE"], 12)
169 | PIPE_CPOOL_GOOGLE_XML = (_GEN["PIPE"], 13)
170 | PIPE_SIQ = (_GEN["PIPE"], 14)
171 | PIPE_QUOTAS_MS = (_GEN["PIPE"], 15)
172 | PIPE_QUOTA_USER_CONTAINERS = (_GEN["PIPE"], 16)
173 | PIPE_TREEDELETE = (_GEN["PIPE"], 17)
174 | PIPE_DOMAIN_SNAPSHOTS = (_GEN["PIPE"], 18)
175 | PIPE_QUOTA_DDQ = (_GEN["PIPE"], 19)
176 | PIPE_ARRAYD = (_GEN["PIPE"], 20)
177 | PIPE_FLEXNET_V4 = (_GEN["PIPE"], 21)
178 | PIPE_ISI_DAEMON_IPV6 = (_GEN["PIPE"], 22)
179 | PIPE_JE_PREP = (_GEN["PIPE"], 23)
180 | PIPE_IFS_LFN = (_GEN["PIPE"], 24)
181 | PIPE_SNAP_SCHED_TARDIS = (_GEN["PIPE"], 25)
182 | PIPE_DRIVE_INTEROP = (_GEN["PIPE"], 26)
183 | PIPE_READ_BLOCKS = (_GEN["PIPE"], 27)
184 | PIPE_IFS_BCM = (_GEN["PIPE"], 28)
185 | PIPE_EXT_GRP_SRO = (_GEN["PIPE"], 29)
186 | PIPE_HDFS_EXTATTR = (_GEN["PIPE"], 30)
187 | PIPE_CP_2_0 = (_GEN["PIPE"], 31)
188 | PIPE_FILEPOLICY = (_GEN["PIPE"], 32)
189 | PIPE_COAL_SUSP_AGGR = (_GEN["PIPE"], 34)
190 | PIPE_SMARTCONNECT_DNS = (_GEN["PIPE"], 35)
191 | PIPE_PDM_ENC_INATTR = (_GEN["PIPE"], 36)
192 | PIPE_NDMP_REDIRECTOR = (_GEN["PIPE"], 38)
193 | PIPE_ISI_CBIND_D = (_GEN["PIPE"], 39)
194 | PIPE_SPARSE_PUNCH = (_GEN["PIPE"], 41)
195 | PIPE_SSH_CONFIG = (_GEN["PIPE"], 42)
196 | PIPE_AUDIT_EVENTS = (_GEN["PIPE"], 43)
197 | PIPE_PURPOSEDB = (_GEN["PIPE"], 45)
198 | PIPE_JE_TREEWALK = (_GEN["PIPE"], 47)
199 |
200 | ERA1_HDFS_TDE = (_GEN["ERA1"], 1)
201 | ERA1_QUOTA_APPLOGICAL = (_GEN["ERA1"], 4)
202 | ERA1_IDI_VERIFY_SNAPID = (_GEN["ERA1"], 6)
203 | ERA1_CPOOL_ALIYUN = (_GEN["ERA1"], 7)
204 | ERA1_STF_DUMMY_LINS = (_GEN["ERA1"], 8)
205 | ERA1_PDM_COLLECT = (_GEN["ERA1"], 13)
206 | ERA1_MCP_MLIST = (_GEN["ERA1"], 14)
207 | ERA1_NFS_SCHED_CONFIG = (_GEN["ERA1"], 16)
208 | ERA1_ADS_VOPS = (_GEN["ERA1"], 17)
209 | ERA1_GMP_SERVICE_LSASS = (_GEN["ERA1"], 18)
210 | ERA1_SINLIN_LOCK_ORDER = (_GEN["ERA1"], 20)
211 | ERA1_LIN_MASTER_FLAGS = (_GEN["ERA1"], 23)
212 | ERA1_REMOTE_SYSCTL_OBJECT = (_GEN["ERA1"], 25)
213 | ERA1_LIN_BUCKET_LOCK = (_GEN["ERA1"], 27)
214 | ERA1_PDM_SNAPGOV_RENAME = (_GEN["ERA1"], 34)
215 | # pylint: enable=invalid-name
216 |
217 |
218 | class OneFSError(IsilonHadoopToolError):
219 | """All Exceptions emitted from this module inherit from this Exception."""
220 |
221 |
222 | class OneFSConnectionError(OneFSError):
223 | """
224 | This Exception is raised when a client cannot connect to OneFS
225 | (e.g. due to socket.gaierror or urllib3.exceptions.MaxRetryError).
226 | """
227 |
228 |
229 | class OneFSCertificateError(OneFSConnectionError):
230 | """This exception occurs when a client cannot connect due to an invalid HTTPS certificate."""
231 |
232 |
233 | class NonSDKAPIError(OneFSError):
234 | """This exception is raised when interacting with OneFS APIs without the SDK fails."""
235 |
236 |
237 | class _BaseAPIError(OneFSError):
238 | def __init__(self, exc):
239 | super().__init__(str(exc))
240 | self.exc = exc
241 |
242 |
243 | class MalformedAPIError(_BaseAPIError):
244 | """This exception wraps an Isilon SDK ApiException that does not have a valid JSON body."""
245 |
246 |
247 | class UndecodableAPIError(MalformedAPIError):
248 | """This exception wraps an Isilon SDK ApiException that does not have a JSON-decodable body."""
249 |
250 |
251 | class APIError(_BaseAPIError):
252 |
253 | """This exception wraps an Isilon SDK ApiException."""
254 |
255 | # pylint: disable=invalid-name
256 | gid_already_exists_error_format = "Group already exists with gid '{0}'"
257 | group_already_exists_error_format = "Group '{0}' already exists"
258 | group_not_found_error_format = "Failed to find group for 'GROUP:{0}': No such group"
259 | group_unresolvable_error_format = "Could not resolve group {0}"
260 | license_expired_error_format = (
261 | # Note: Subscriptions did not exist prior to Freight Trains,
262 | # so old code assumes that only evaluations have expiration dates.
263 | "The evaluation license key for {0} has expired."
264 | " Please contact your Isilon representative."
265 | )
266 | license_missing_error_format = (
267 | "The {0} application is not currently installed."
268 | " Please contact your Isilon account team for"
269 | " more information on evaluating and purchasing {0}."
270 | )
271 | proxy_user_already_exists_error_format = "Proxyuser '{0}' already exists"
272 | try_again_error_format = (
273 | "OneFS API is temporarily unavailable. Try your request again."
274 | )
275 | uid_already_exists_error_format = "User already exists with uid '{0}'"
276 | user_already_exists_error_format = "User '{0}' already exists"
277 | user_already_in_group_error_format = (
278 | "Failed to add member UID:{0} to group GROUP:{1}:"
279 | " User is already in local group"
280 | )
281 | user_not_found_error_format = "Failed to find user for 'USER:{0}': No such user"
282 | user_unresolvable_error_format = "Could not resolve user {0}"
283 | zone_not_found_error_format = 'Access Zone "{0}" not found.'
284 | dir_path_already_exists_error_format = (
285 | "Unable to create directory as requested -- container already exists"
286 | )
287 | # pylint: enable=invalid-name
288 |
289 | def __str__(self):
290 | try:
291 | return "\n".join(error["message"] for error in self.errors()) or str(
292 | self.exc
293 | )
294 | except MalformedAPIError as exc:
295 | return str(exc)
296 |
297 | def errors(self):
298 | """Get errors listed in the exception."""
299 | try:
300 | json_body = json.loads(self.exc.body)
301 | except (
302 | TypeError, # self.exc.body is not a str.
303 | ValueError, # self.exc.body is not JSON.
304 | ) as exc:
305 | raise UndecodableAPIError(self.exc) from exc
306 | try:
307 | for error in json_body["errors"]:
308 | # Raise a KeyError if 'message' is not in error:
309 | error["message"] # pylint: disable=pointless-statement
310 | except (
311 | KeyError, # 'errors' or 'message' is not in json_body or error, respectively.
312 | TypeError, # json_body['errors'] is not iterable.
313 | ) as exc:
314 | raise MalformedAPIError(self.exc) from exc
315 | return json_body["errors"]
316 |
317 | def filtered_errors(self, filter_func):
318 | """Arbitrarily filter errors in the exception."""
319 | for error in self.errors():
320 | if filter_func(error):
321 | yield error
322 |
323 | def gid_already_exists_error(self, gid):
324 | """Returns True if the exception contains a GID already exists error."""
325 | return any(
326 | self.filtered_errors(
327 | lambda error: error["message"]
328 | == self.gid_already_exists_error_format.format(gid),
329 | )
330 | )
331 |
332 | def group_already_exists_error(self, group_name):
333 | """Returns True if the exception contains a group already exists error."""
334 | return any(
335 | self.filtered_errors(
336 | lambda error: error["message"]
337 | == self.group_already_exists_error_format.format(
338 | group_name,
339 | ),
340 | )
341 | )
342 |
343 | def group_not_found_error(self, group_name):
344 | """Returns True if the exception contains a group not found error."""
345 | return any(
346 | self.filtered_errors(
347 | lambda error: error["message"]
348 | == self.group_not_found_error_format.format(
349 | group_name,
350 | ),
351 | )
352 | )
353 |
354 | def group_unresolvable_error(self, group_name):
355 | """Returns True if the exception contains an unresolvable group error."""
356 | return any(
357 | self.filtered_errors(
358 | lambda error: error["message"]
359 | == self.group_unresolvable_error_format.format(
360 | group_name,
361 | ),
362 | )
363 | )
364 |
365 | def license_expired_error(self, license_name):
366 | """Returns True if the exception contains an expired license error."""
367 | return any(
368 | self.filtered_errors(
369 | lambda error: error["message"]
370 | == self.license_expired_error_format.format(
371 | license_name,
372 | ),
373 | )
374 | )
375 |
376 | def license_missing_error(self, license_name):
377 | """Returns True if the exception contains a missing license error."""
378 | return any(
379 | self.filtered_errors(
380 | lambda error: error["message"]
381 | == self.license_missing_error_format.format(
382 | license_name,
383 | ),
384 | )
385 | )
386 |
387 | def proxy_user_already_exists_error(self, proxy_user_name):
388 | """Returns True if the exception contains a proxy user already exists error."""
389 | return any(
390 | self.filtered_errors(
391 | lambda error: error["message"]
392 | == self.proxy_user_already_exists_error_format.format(
393 | proxy_user_name,
394 | ),
395 | )
396 | )
397 |
398 | def try_again_error(self):
399 | """Returns True if the exception indicated PAPI is temporarily unavailable."""
400 | return any(
401 | self.filtered_errors(
402 | lambda error: error["message"] == self.try_again_error_format,
403 | )
404 | )
405 |
406 | def uid_already_exists_error(self, uid):
407 | """Returns True if the exception contains a UID already exists error."""
408 | return any(
409 | self.filtered_errors(
410 | lambda error: error["message"]
411 | == self.uid_already_exists_error_format.format(uid),
412 | )
413 | )
414 |
415 | def user_already_exists_error(self, user_name):
416 | """Returns True if the exception contains a user already exists error."""
417 | return any(
418 | self.filtered_errors(
419 | lambda error: error["message"]
420 | == self.user_already_exists_error_format.format(
421 | user_name,
422 | ),
423 | )
424 | )
425 |
426 | def user_already_in_group_error(self, uid, group_name):
427 | """Returns True if the exception contains a user already in group error."""
428 | return any(
429 | self.filtered_errors(
430 | lambda error: error["message"]
431 | == self.user_already_in_group_error_format.format(
432 | uid,
433 | group_name,
434 | ),
435 | )
436 | )
437 |
438 | def user_not_found_error(self, user_name):
439 | """Returns True if the exception contains a user not found error."""
440 | return any(
441 | self.filtered_errors(
442 | lambda error: error["message"]
443 | == self.user_not_found_error_format.format(
444 | user_name,
445 | ),
446 | )
447 | )
448 |
449 | def user_unresolvable_error(self, user_name):
450 | """Returns True if the exception contains an unresolvable user error."""
451 | return any(
452 | self.filtered_errors(
453 | lambda error: error["message"]
454 | == self.user_unresolvable_error_format.format(
455 | user_name,
456 | ),
457 | )
458 | )
459 |
460 | def zone_not_found_error(self, zone_name):
461 | """Returns True if the exception contains a zone not found error."""
462 | return any(
463 | self.filtered_errors(
464 | lambda error: error["message"]
465 | == self.zone_not_found_error_format.format(
466 | zone_name,
467 | ),
468 | )
469 | )
470 |
471 | def dir_path_already_exists_error(self):
472 | """Returns True if the exception contains a directory path already exist error."""
473 | return any(
474 | self.filtered_errors(
475 | lambda error: error["message"]
476 | == self.dir_path_already_exists_error_format,
477 | )
478 | )
479 |
480 |
481 | class MissingLicenseError(OneFSError):
482 | """This Exception is raised when a license that is expected to exist cannot be found."""
483 |
484 |
485 | class ExpiredLicenseError(MissingLicenseError):
486 | """This Exception is raised when a license has expired."""
487 |
488 |
489 | class MissingZoneError(OneFSError):
490 | """This Exception is raised when a zone that is expected to exist cannot be found."""
491 |
492 |
493 | class MixedModeError(OneFSError):
494 | """
495 | This Exception is raised when an operation cannot succeed due to
496 | the cluster containing nodes running different versions of OneFS.
497 | """
498 |
499 |
500 | class UndeterminableVersion(OneFSError):
501 | """
502 | This Exception is raised when attempting to use this
503 | module with a version of OneFS that cannot be determined (usually < 8.0.0.0).
504 | """
505 |
506 |
507 | class UnsupportedVersion(OneFSError):
508 | """
509 | This Exception is raised when attempting to use this
510 | module with an unsupported version of OneFS.
511 | """
512 |
513 |
514 | class UnsupportedOperation(OneFSError):
515 | """
516 | This Exception is raised when attempting to conduct an unsupported
517 | operation with an specific version of OneFS.
518 | """
519 |
520 |
521 | def sdk_for_revision(revision, strict=False):
522 | """Get the SDK that is intended to work with a given OneFS revision."""
523 | # pylint: disable=too-many-return-statements,import-outside-toplevel
524 | if ONEFS_RELEASES["7.2.0.0"] <= revision < ONEFS_RELEASES["8.0.0.0"]:
525 | import isi_sdk_7_2
526 |
527 | return isi_sdk_7_2
528 | if ONEFS_RELEASES["8.0.0.0"] <= revision < ONEFS_RELEASES["8.0.1.0"]:
529 | import isi_sdk_8_0
530 |
531 | return isi_sdk_8_0
532 | if ONEFS_RELEASES["8.0.1.0"] <= revision < ONEFS_RELEASES["8.1.0.0"]:
533 | import isi_sdk_8_0_1
534 |
535 | return isi_sdk_8_0_1
536 | if ONEFS_RELEASES["8.1.0.0"] <= revision < ONEFS_RELEASES["8.1.1.0"]:
537 | import isi_sdk_8_1_0
538 |
539 | return isi_sdk_8_1_0
540 | if ONEFS_RELEASES["8.1.1.0"] <= revision < ONEFS_RELEASES["8.2.0.0"]:
541 | import isi_sdk_8_1_1
542 |
543 | return isi_sdk_8_1_1
544 | if ONEFS_RELEASES["8.2.0.0"] <= revision < ONEFS_RELEASES["8.2.1.0"]:
545 | import isi_sdk_8_2_0
546 |
547 | return isi_sdk_8_2_0
548 | if ONEFS_RELEASES["8.2.1.0"] <= revision < ONEFS_RELEASES["8.2.2.0"]:
549 | import isi_sdk_8_2_1
550 |
551 | return isi_sdk_8_2_1
552 | if ONEFS_RELEASES["8.2.2.0"] <= revision < ONEFS_RELEASES["8.2.3.0"]:
553 | import isi_sdk_8_2_2
554 |
555 | return isi_sdk_8_2_2
556 | # At this point, either the cluster is too old or too new;
557 | # however, new clusters still support old SDKs,
558 | # so, unless the caller asks to fail here, we'll fall back to the newest supported SDK.
559 | if strict:
560 | raise UnsupportedVersion(f"There is no SDK for OneFS revision 0x{revision:x}!")
561 | import isi_sdk_8_2_2
562 |
563 | return isi_sdk_8_2_2 # The latest SDK available.
564 |
565 |
566 | def accesses_onefs(func):
567 | """Decorate a Client method that makes an SDK call directly."""
568 |
569 | def _decorated(self, *args, **kwargs):
570 | while True:
571 | try:
572 | return func(self, *args, **kwargs)
573 | except urllib3.exceptions.MaxRetryError as exc:
574 | if isinstance(exc.reason, urllib3.exceptions.SSLError):
575 | # https://github.com/Isilon/isilon_sdk_python/issues/14
576 | raise OneFSCertificateError from exc
577 | raise OneFSConnectionError from exc
578 | except (
579 | self._sdk.rest.ApiException # pylint: disable=protected-access
580 | ) as exc:
581 | if all(
582 | [
583 | # https://github.com/PyCQA/pylint/issues/2841
584 | not exc.body, # pylint: disable=no-member
585 | "CERTIFICATE_VERIFY_FAILED"
586 | in (exc.reason or ""), # pylint: disable=no-member
587 | ]
588 | ):
589 | raise OneFSCertificateError from exc
590 | wrapped_exc = APIError(exc)
591 | if not wrapped_exc.try_again_error():
592 | raise wrapped_exc from exc
593 | time.sleep(2)
594 | LOGGER.info(wrapped_exc.try_again_error_format)
595 |
596 | return _decorated
597 |
598 |
599 | class OneFSValueError(OneFSError, ValueError):
600 | """
601 | This exception is raised by this module instead
602 | of a ValueError (but has the same meaning).
603 | """
604 |
605 |
606 | def _license_is_active(license_):
607 | return license_.status.lower() in ["activated", "evaluation", "licensed"]
608 |
609 |
610 | class BaseClient: # pylint: disable=too-many-public-methods,too-many-instance-attributes
611 |
612 | """Interact with OneFS."""
613 |
614 | def __init__(
615 | self, address, username, password, default_zone="System", verify_ssl=True
616 | ):
617 | # Set attributes without setters first.
618 | self.default_zone = default_zone
619 | # We don't know what version we are pointed at yet, but we have to start somewhere.
620 | # Riptide was the first SDK to support ClusterApi().get_cluster_version.
621 | self._sdk = sdk_for_revision(ONEFS_RELEASES["8.0.0.0"])
622 | # Attributes with setters (see below) depend on having a Configuration object to manipulate.
623 | self._configuration = self._sdk.Configuration()
624 | self._address = None # This will truly be set last.
625 |
626 | # Set attributes with setters last.
627 | self.verify_ssl = verify_ssl
628 | self.username = username
629 | self.password = password
630 | # Set the address last so the rest of the configuration is in place for making requests
631 | # (which will be needed for checking the cluster version).
632 | self.address = address
633 |
634 | @property
635 | def _api_client(self):
636 | return self._sdk.ApiClient(self._configuration)
637 |
638 | @accesses_onefs
639 | def _groups(self, zone=None):
640 | return (
641 | self._sdk.AuthApi(self._api_client)
642 | .list_auth_groups(
643 | zone=zone or self.default_zone,
644 | )
645 | .groups
646 | )
647 |
648 | @accesses_onefs
649 | def _keytab_entries(self, provider):
650 | providers_krb5 = self._sdk.AuthApi(self._api_client).get_providers_krb5_by_id(
651 | provider
652 | )
653 | return providers_krb5.krb5[0].keytab_entries
654 |
655 | @accesses_onefs
656 | def _license(self, name):
657 | return self._sdk.LicenseApi(self._api_client).get_license_license(name)
658 |
659 | @accesses_onefs
660 | def _pools(self, *args, **kwargs):
661 | return (
662 | self._sdk.NetworkApi(self._api_client)
663 | .get_network_pools(*args, **kwargs)
664 | .pools
665 | )
666 |
667 | @accesses_onefs
668 | def _realms(self):
669 | return self._sdk.AuthApi(self._api_client).list_settings_krb5_realms().realm
670 |
671 | def _refresh_sdk(self):
672 | try:
673 | self._revision = ( # pylint: disable=attribute-defined-outside-init
674 | self.revision()
675 | )
676 | except AttributeError as exc:
677 | raise UndeterminableVersion from exc
678 | self._sdk = sdk_for_revision(self._revision)
679 |
680 | @accesses_onefs
681 | def _upgrade_cluster(self):
682 | return self._sdk.UpgradeApi(self._api_client).get_upgrade_cluster()
683 |
684 | @accesses_onefs
685 | def _version(self):
686 | return self._sdk.ClusterApi(self._api_client).get_cluster_version()
687 |
688 | def _zone(self, name):
689 | for zone in self._zones():
690 | # Zone names are NOT case-sensitive.
691 | if zone.name.lower() == name.lower():
692 | return zone
693 | raise MissingZoneError(name)
694 |
695 | def _zone_real_path(self, path, zone=None):
696 | return posixpath.join(
697 | self.zone_settings(zone=zone or self.default_zone)["path"],
698 | path.lstrip(posixpath.sep),
699 | )
700 |
701 | @accesses_onefs
702 | def _zones(self):
703 | return self._sdk.ZonesApi(self._api_client).list_zones().zones
704 |
705 | @accesses_onefs
706 | def acl_settings(self):
707 | """Get global ACL settings."""
708 | acl_settings = (
709 | self._sdk.AuthApi(self._api_client).get_settings_acls().acl_policy_settings
710 | )
711 | return {
712 | "access": acl_settings.access,
713 | "calcmode": acl_settings.calcmode,
714 | "calcmode_group": acl_settings.calcmode_group,
715 | "calcmode_owner": acl_settings.calcmode_owner,
716 | "chmod": acl_settings.chmod,
717 | "chmod_007": acl_settings.chmod_007,
718 | "chmod_inheritable": acl_settings.chmod_inheritable,
719 | "chown": acl_settings.chown,
720 | "create_over_smb": acl_settings.create_over_smb,
721 | "dos_attr": acl_settings.dos_attr,
722 | "group_owner_inheritance": acl_settings.group_owner_inheritance,
723 | "rwx": acl_settings.rwx,
724 | "synthetic_denies": acl_settings.synthetic_denies,
725 | "utimes": acl_settings.utimes,
726 | }
727 |
728 | @accesses_onefs
729 | def add_user_to_group(self, user_name, group_name, zone=None):
730 | """Add a user to a group."""
731 | group_member_cls = (
732 | self._sdk.GroupMember
733 | if self._revision < ONEFS_RELEASES["8.0.1.0"]
734 | else self._sdk.AuthAccessAccessItemFileGroup
735 | )
736 | try:
737 | self._sdk.AuthGroupsApi(self._api_client).create_group_member(
738 | group_member_cls(
739 | type="user",
740 | name=user_name,
741 | ),
742 | group_name,
743 | zone=zone or self.default_zone,
744 | )
745 | except ValueError as exc:
746 | # https://bugs.west.isilon.com/show_bug.cgi?id=231922
747 | assert all(
748 | [
749 | str(exc) == "Invalid value for `id`, must not be `None`",
750 | user_name
751 | in [
752 | member.name
753 | for member in self._sdk.AuthGroupsApi(self._api_client)
754 | .list_group_members(
755 | group_name,
756 | zone=zone or self.default_zone,
757 | )
758 | .members
759 | ],
760 | ]
761 | )
762 |
763 | @property
764 | def address(self):
765 | """Get the address to connect to OneFS at."""
766 | # self._address may be None if self.host was set directly.
767 | return self._address or urlparse(self.host).hostname
768 |
769 | @address.setter
770 | def address(self, address):
771 | """
772 | Set the address to connect to OneFS at.
773 | If the address is a name, it will be resolved first to avoid config propagation problems.
774 | To avoid that, set host instead.
775 | """
776 | try:
777 | # If address is a SmartConnect name, making calls too fast can result in errors
778 | # due to changes not propagating fast enough across a cluster.
779 | # This problem gets worse on larger clusters.
780 | # So, we will choose 1 node to connect to and use that.
781 | netloc = socket.gethostbyname(address)
782 | except socket.gaierror as exc:
783 | raise OneFSConnectionError from exc
784 | if ":" in netloc: # IPv6
785 | netloc = f"[{netloc}]"
786 |
787 | # Keep every part of self.host, except the hostname/address.
788 | parsed = urlparse(self.host)
789 | if parsed.port is not None:
790 | netloc += ":" + str(parsed.port)
791 | self.host = urlunparse(parsed._replace(netloc=netloc))
792 |
793 | # Setting self.host unsets self._address:
794 | self._address = address
795 |
796 | def check_license(self, name):
797 | """Check for a license on OneFS and raise a MissingLicenseError if it doesn't exist."""
798 | [license_] = self._license(name).licenses
799 | if not _license_is_active(license_):
800 | if (
801 | license_.expiration
802 | and datetime.strptime(license_.expiration, "%Y-%m-%d").date()
803 | < date.today()
804 | ):
805 | raise ExpiredLicenseError(name)
806 | raise MissingLicenseError(name)
807 |
808 | def check_zone(self, name):
809 | """Check for a zone on OneFS and raise a MissingZoneError if it doesn't exist."""
810 | if not self.has_zone(name):
811 | raise MissingZoneError(name)
812 |
813 | @accesses_onefs
814 | def chmod(self, path, mode, zone=None):
815 | """Change the (integer) mode of a (zone-root-relative) path."""
816 | real_path = self._zone_real_path(path, zone=zone or self.default_zone)
817 | self._sdk.NamespaceApi(self._api_client).set_acl(
818 | namespace_path=real_path.lstrip(posixpath.sep),
819 | acl=True,
820 | namespace_acl=self._sdk.NamespaceAcl(
821 | authoritative="mode",
822 | mode=f"{mode:o}",
823 | ),
824 | )
825 |
826 | @accesses_onefs
827 | def chown(self, path, owner=None, group=None, zone=None):
828 | """Change the owning user and/or group of a (zone-root-relative) path."""
829 | real_path = self._zone_real_path(path, zone=zone)
830 | ns_acl_kwargs = {"authoritative": "mode"}
831 | if owner is not None:
832 | # Get the UID of the owner to avoid name resolution problems across zones
833 | # (e.g. using the System zone to configure a different zone).
834 | uid = (
835 | owner if isinstance(owner, int) else self.uid_of_user(owner, zone=zone)
836 | )
837 | ns_acl_kwargs["owner"] = self._sdk.MemberObject(type="UID", id=f"UID:{uid}")
838 | if group is not None:
839 | # Get the GID of the group to avoid name resolution problems across zones
840 | # (e.g. using the System zone to configure a different zone).
841 | gid = (
842 | group if isinstance(group, int) else self.gid_of_group(group, zone=zone)
843 | )
844 | ns_acl_kwargs["group"] = self._sdk.MemberObject(type="GID", id=f"GID:{gid}")
845 | self._sdk.NamespaceApi(self._api_client).set_acl(
846 | namespace_path=real_path.lstrip(posixpath.sep),
847 | acl=True,
848 | namespace_acl=self._sdk.NamespaceAcl(**ns_acl_kwargs),
849 | )
850 |
851 | @accesses_onefs
852 | def create_auth_provider(self, realm, user, password):
853 | """Create a Kerberos auth provider."""
854 | self._sdk.AuthApi(self._api_client).create_providers_krb5_item(
855 | self._sdk.ProvidersKrb5Item(
856 | realm=realm,
857 | user=user,
858 | password=password,
859 | ),
860 | )
861 |
862 | @accesses_onefs
863 | def create_group(self, name, gid=None, zone=None):
864 | """Create a group."""
865 | self._sdk.AuthApi(self._api_client).create_auth_group(
866 | self._sdk.AuthGroupCreateParams(
867 | name=name,
868 | gid=gid,
869 | ),
870 | zone=zone or self.default_zone,
871 | )
872 |
873 | @accesses_onefs
874 | def create_hdfs_proxy_user(self, name, members=None, zone=None):
875 | """Create an HDFS proxy user."""
876 | if members is not None:
877 | group_member_cls = (
878 | self._sdk.GroupMember
879 | if self._revision < ONEFS_RELEASES["8.0.1.0"]
880 | else self._sdk.AuthAccessAccessItemFileGroup
881 | )
882 | members = [
883 | group_member_cls(
884 | name=member_name,
885 | type=member_type,
886 | )
887 | for member_name, member_type in members
888 | ]
889 | self._sdk.ProtocolsApi(self._api_client).create_hdfs_proxyuser(
890 | self._sdk.HdfsProxyuserCreateParams(name=name, members=members),
891 | zone=zone or self.default_zone,
892 | )
893 |
894 | @accesses_onefs
895 | def create_realm(self, name, admin_server, kdcs):
896 | """Create a realm configuration on OneFS."""
897 | try:
898 | self._sdk.AuthApi(self._api_client).create_settings_krb5_realm(
899 | self._sdk.SettingsKrb5RealmCreateParams(
900 | realm=name,
901 | admin_server=admin_server,
902 | kdc=kdcs,
903 | ),
904 | )
905 | except ValueError as exc:
906 | # https://bugs.west.isilon.com/show_bug.cgi?id=231054
907 | auth_api = self._sdk.AuthApi(self._api_client)
908 | assert all(
909 | [
910 | str(exc) == "Invalid value for `id`, must not be `None`",
911 | name
912 | in [
913 | krb5.realm
914 | for krb5 in auth_api.list_settings_krb5_realms().realm
915 | ],
916 | ]
917 | )
918 |
919 | @accesses_onefs
920 | def create_spn(self, spn, realm, user, password):
921 | """Create an SPN in a Kerberos realm."""
922 | providers_krb5_item = self._sdk.ProvidersKrb5Item(
923 | realm=realm,
924 | user=user,
925 | password=password,
926 | )
927 | keytab_entry = self._sdk.ProvidersKrb5IdParamsKeytabEntry()
928 | keytab_entry.spn = spn
929 | providers_krb5_item.keytab_entries = [keytab_entry]
930 | self._sdk.AuthApi(self._api_client).create_providers_krb5_item(
931 | providers_krb5_item
932 | )
933 |
934 | @accesses_onefs
935 | def create_user(
936 | self, name, primary_group_name, uid=None, zone=None, enabled=None, password=None
937 | ):
938 | """Create a user."""
939 | group_member_cls = (
940 | self._sdk.GroupMember
941 | if self._revision < ONEFS_RELEASES["8.0.1.0"]
942 | else self._sdk.AuthAccessAccessItemFileGroup
943 | )
944 | self._sdk.AuthApi(self._api_client).create_auth_user(
945 | self._sdk.AuthUserCreateParams(
946 | name=name,
947 | enabled=enabled,
948 | primary_group=group_member_cls(
949 | type="group",
950 | name=primary_group_name,
951 | ),
952 | uid=uid,
953 | password=password,
954 | ),
955 | zone=zone or self.default_zone,
956 | )
957 |
958 | @accesses_onefs
959 | def delete_auth_provider(self, name):
960 | """Delete a Kerberos auth provider."""
961 | self._sdk.AuthApi(self._api_client).delete_providers_krb5_by_id(name)
962 |
963 | @accesses_onefs
964 | def delete_group(self, name, zone=None):
965 | """Delete a group."""
966 | self._sdk.AuthApi(self._api_client).delete_auth_group(
967 | name,
968 | zone=zone or self.default_zone,
969 | )
970 |
971 | @accesses_onefs
972 | def delete_hdfs_proxy_user(
973 | self,
974 | name,
975 | zone=None,
976 | ):
977 | """Delete an HDFS proxy user."""
978 | self._sdk.ProtocolsApi(self._api_client).delete_hdfs_proxyuser(
979 | name,
980 | zone=zone or self.default_zone,
981 | )
982 |
983 | @accesses_onefs
984 | def delete_realm(self, name):
985 | """Delete a Kerberos realm configuration."""
986 | self._sdk.AuthApi(self._api_client).delete_settings_krb5_realm(name)
987 |
988 | @accesses_onefs
989 | def delete_spn(self, spn, provider):
990 | """Delete a Kerberos SPN."""
991 | self._sdk.AuthApi(self._api_client).update_providers_krb5_by_id(
992 | self._sdk.ProvidersKrb5IdParams(
993 | keytab_entries=[
994 | keytab_entry
995 | for keytab_entry in self._keytab_entries(provider=provider)
996 | if keytab_entry.spn not in [spn, spn + "@" + provider]
997 | ]
998 | ),
999 | provider,
1000 | )
1001 |
1002 | @accesses_onefs
1003 | def delete_user(self, name, zone=None):
1004 | """Delete a user."""
1005 | self._sdk.AuthApi(self._api_client).delete_auth_user(
1006 | name,
1007 | zone=zone or self.default_zone,
1008 | )
1009 |
1010 | def feature_is_supported(self, feature):
1011 | """Determine if a given OneFSFeature is supported."""
1012 |
1013 | feature_gen, feature_bit = feature.value
1014 |
1015 | upgrade_cluster = self._upgrade_cluster()
1016 | try:
1017 | committed_features = upgrade_cluster.committed_features
1018 | except AttributeError as exc:
1019 | raise UnsupportedOperation(
1020 | "OneFS 8.2.0 or later is required for feature flag support.",
1021 | ) from exc
1022 |
1023 | entries_for_gen = [
1024 | entry.bits
1025 | for entry in committed_features.gen_bits
1026 | if entry.gen == feature_gen
1027 | ]
1028 | if not entries_for_gen:
1029 | return bool(feature_gen <= committed_features.default_gen)
1030 |
1031 | return any(
1032 | feature_bit == (i * 64) + offset # Each entry can have up to 64 offsets.
1033 | for i, offsets in enumerate(entries_for_gen)
1034 | for offset in offsets
1035 | )
1036 |
1037 | @accesses_onefs
1038 | def flush_auth_cache(self, zone=None):
1039 | """Flush the Security Objects Cache."""
1040 | if self._revision < ONEFS_RELEASES["8.0.1.0"]:
1041 | _zone = zone or self.default_zone
1042 | if _zone and _zone.lower() != "system":
1043 | raise UnsupportedOperation(
1044 | "The auth cache can only be flushed on the System zone before OneFS 8.0.1.",
1045 | )
1046 | response = requests.delete(
1047 | url=self.host + "/platform/3/auth/users",
1048 | verify=self.verify_ssl,
1049 | auth=(self.username, self.password),
1050 | params={"cached": True},
1051 | timeout=REQUEST_TIMEOUT,
1052 | )
1053 | try:
1054 | response.raise_for_status()
1055 | except requests.exceptions.HTTPError as exc:
1056 | raise NonSDKAPIError("The auth cache could not be flushed.") from exc
1057 | else:
1058 | assert bool(
1059 | response.status_code
1060 | == requests.codes.no_content, # pylint: disable=no-member
1061 | )
1062 | else:
1063 | try:
1064 | self._sdk.AuthApi(self._api_client).create_auth_cache_item(
1065 | auth_cache_item=self._sdk.AuthCacheItem(all="all"),
1066 | zone=zone or self.default_zone,
1067 | )
1068 | except ValueError as exc:
1069 | # https://bugs.west.isilon.com/show_bug.cgi?id=232142
1070 | assert str(exc) == "Invalid value for `id`, must not be `None`"
1071 |
1072 | @accesses_onefs
1073 | def gid_of_group(self, group_name, zone=None):
1074 | """Get the GID of a group."""
1075 | auth_groups = self._sdk.AuthApi(self._api_client).get_auth_group(
1076 | group_name,
1077 | zone=zone or self.default_zone,
1078 | )
1079 | assert (
1080 | len(auth_groups.groups) == 1
1081 | ), "Do you have duplicate groups (e.g. local and LDAP)?"
1082 | return int(auth_groups.groups[0].gid.id.split(":")[1])
1083 |
1084 | def groups(self, zone=None):
1085 | """Get the auth groups OneFS knows about."""
1086 | for group in self._groups(zone=zone or self.default_zone):
1087 | yield group.name
1088 |
1089 | def has_license(self, name):
1090 | """Check for a OneFS license on OneFS."""
1091 | return any(
1092 | _license_is_active(license) for license in self._license(name).licenses
1093 | )
1094 |
1095 | def has_zone(self, name):
1096 | """Check for a zone on OneFS."""
1097 | return self._zone(name) is not None
1098 |
1099 | @accesses_onefs
1100 | def hdfs_inotify_settings(self, zone=None):
1101 | """Get HDFS inotify settings for an access zone."""
1102 | try:
1103 | hdfs_inotify_settings = (
1104 | self._sdk.ProtocolsApi(
1105 | self._api_client,
1106 | )
1107 | .get_hdfs_inotify_settings(
1108 | zone=zone or self.default_zone,
1109 | )
1110 | .settings
1111 | )
1112 | except AttributeError as exc:
1113 | raise UnsupportedOperation(
1114 | "OneFS 8.1.1 or later is required for INotify support."
1115 | ) from exc
1116 | return {
1117 | "enabled": hdfs_inotify_settings.enabled,
1118 | "maximum_delay": hdfs_inotify_settings.maximum_delay,
1119 | "retention": hdfs_inotify_settings.retention,
1120 | }
1121 |
1122 | @accesses_onefs
1123 | def hdfs_settings(self, zone=None):
1124 | """Get HDFS settings for an access zone."""
1125 | hdfs_settings = (
1126 | self._sdk.ProtocolsApi(self._api_client)
1127 | .get_hdfs_settings(
1128 | zone=zone or self.default_zone,
1129 | )
1130 | .settings
1131 | )
1132 | return {
1133 | "ambari_namenode": hdfs_settings.ambari_namenode,
1134 | "ambari_server": hdfs_settings.ambari_server,
1135 | "authentication_mode": hdfs_settings.authentication_mode,
1136 | "default_block_size": hdfs_settings.default_block_size,
1137 | "default_checksum_type": hdfs_settings.default_checksum_type,
1138 | "odp_version": hdfs_settings.odp_version,
1139 | "root_directory": hdfs_settings.root_directory,
1140 | "service": hdfs_settings.service,
1141 | "webhdfs_enabled": hdfs_settings.webhdfs_enabled,
1142 | }
1143 |
1144 | @property
1145 | def host(self):
1146 | """Get the URL to connect to OneFS at."""
1147 | return self._configuration.host
1148 |
1149 | @host.setter
1150 | def host(self, host):
1151 | """Set the URL to connect to OneFS at."""
1152 | self._configuration.host = host
1153 | # self.host may now point to an unrelated address:
1154 | self._address = None
1155 | # self.host may now point to a different version of OneFS:
1156 | self._refresh_sdk()
1157 |
1158 | def list_spns(self, provider):
1159 | """Get a list of keytab entries for a Kerberos auth provider."""
1160 | return [
1161 | keytab_entry.spn for keytab_entry in self._keytab_entries(provider=provider)
1162 | ]
1163 |
1164 | @accesses_onefs
1165 | def mkdir(self, path, mode, recursive=False, overwrite=False, zone=None):
1166 | """Create a directory at a (zone-root-relative) path with the given (integer) mode."""
1167 | real_path = self._zone_real_path(path, zone=zone or self.default_zone)
1168 | if posixpath.sep not in real_path.strip(posixpath.sep):
1169 | # The first component of real_path is actually a RAN namespace.
1170 | # In this case, there is only one component: ifs.
1171 | # The ifs namespace cannot be modified,
1172 | # but calling create_directory on any namespace will fail.
1173 | raise OneFSValueError("Calling mkdir on the ifs namespace will fail.")
1174 | self._sdk.NamespaceApi(self._api_client).create_directory(
1175 | directory_path=real_path.lstrip(posixpath.sep),
1176 | x_isi_ifs_target_type="container",
1177 | x_isi_ifs_access_control=f"{mode:o}",
1178 | recursive=recursive,
1179 | overwrite=overwrite,
1180 | )
1181 |
1182 | @accesses_onefs
1183 | def node_addresses(self, zone=None):
1184 | """Get IP addresses in pools associated with a zone."""
1185 | return {
1186 | socket.inet_ntoa(struct.pack("!I", ip))
1187 | for pool in self._sdk.NetworkApi(self._api_client)
1188 | .get_network_pools(
1189 | access_zone=zone or self.default_zone,
1190 | )
1191 | .pools
1192 | for range_ in pool.ranges
1193 | for ip in range(
1194 | struct.unpack("!I", socket.inet_aton(range_.low))[0],
1195 | struct.unpack("!I", socket.inet_aton(range_.high))[0] + 1,
1196 | )
1197 | }
1198 |
1199 | @property
1200 | def password(self):
1201 | """Get the password to connect to OneFS with."""
1202 | return self._configuration.password
1203 |
1204 | @password.setter
1205 | def password(self, password):
1206 | """Set the password to connect to OneFS with."""
1207 | self._configuration.password = password
1208 |
1209 | @accesses_onefs
1210 | def permissions(self, path, zone=None):
1211 | """Get the owner, group, and (integer) mode of a (zone-root-relative) path."""
1212 | real_path = self._zone_real_path(path, zone=zone or self.default_zone)
1213 | acl = self._sdk.NamespaceApi(self._api_client).get_acl(
1214 | namespace_path=real_path.lstrip(posixpath.sep),
1215 | acl=True,
1216 | )
1217 | return {
1218 | "group": acl.group.name,
1219 | "mode": int(acl.mode, base=8),
1220 | "owner": acl.owner.name,
1221 | }
1222 |
1223 | @accesses_onefs
1224 | def primary_group_of_user(self, user_name, zone=None):
1225 | """Get the name of the primary group of a user."""
1226 | auth_users = self._sdk.AuthApi(self._api_client).get_auth_user(
1227 | user_name,
1228 | zone=zone or self.default_zone,
1229 | )
1230 | assert (
1231 | len(auth_users.users) == 1
1232 | ), "Do you have duplicate users (e.g. local and LDAP)?"
1233 | return auth_users.users[0].gid.name
1234 |
1235 | def realms(self):
1236 | """Get the Kerberos realms OneFS knows about."""
1237 | for realm in self._realms():
1238 | yield realm.realm
1239 |
1240 | def revision(self):
1241 | """Get the revision number of the cluster."""
1242 | revisions = set(self.revisions().values())
1243 | if len(revisions) != 1:
1244 | raise MixedModeError(", ".join(revisions))
1245 | return revisions.pop()
1246 |
1247 | def revisions(self):
1248 | """Get the revision numbers of each node in the cluster."""
1249 | return {node.id: int(node.revision) for node in self._version().nodes}
1250 |
1251 | @accesses_onefs
1252 | def rmdir(self, path, recursive=False, zone=None):
1253 | """Delete the directory at a (zone-root-relative) path."""
1254 | real_path = self._zone_real_path(path, zone=zone or self.default_zone)
1255 | self._sdk.NamespaceApi(self._api_client).delete_directory(
1256 | directory_path=real_path.lstrip(posixpath.sep),
1257 | recursive=recursive,
1258 | )
1259 |
1260 | def smartconnect_zone(self, smartconnect):
1261 | """Get the access zone name associated with a SmartConnect name."""
1262 | for pool in self._pools():
1263 | if pool.sc_dns_zone.lower() == smartconnect.lower():
1264 | return pool.access_zone
1265 | return None
1266 |
1267 | @accesses_onefs
1268 | def uid_of_user(self, user_name, zone=None):
1269 | """Get the UID of a user."""
1270 | auth_users = self._sdk.AuthApi(self._api_client).get_auth_user(
1271 | user_name,
1272 | zone=zone or self.default_zone,
1273 | )
1274 | assert (
1275 | len(auth_users.users) == 1
1276 | ), "Do you have duplicate users (e.g. local and LDAP)?"
1277 | return int(auth_users.users[0].uid.id.split(":")[1])
1278 |
1279 | @accesses_onefs
1280 | def update_acl_settings(self, settings):
1281 | """Set ACL settings."""
1282 | acl_settings = self._sdk.SettingsAclsAclPolicySettings()
1283 | for key, value in settings.items():
1284 | try:
1285 | getattr(acl_settings, key)
1286 | except AttributeError as exc:
1287 | raise OneFSValueError(f'"{key}" is not a valid ACL setting.') from exc
1288 | setattr(acl_settings, key, value)
1289 | self._sdk.AuthApi(self._api_client).update_settings_acls(acl_settings)
1290 |
1291 | @accesses_onefs
1292 | def update_hdfs_settings(self, settings, zone=None):
1293 | """Set HDFS settings for an access zone."""
1294 | hdfs_settings = self._sdk.HdfsSettingsSettings()
1295 | for key, value in settings.items():
1296 | try:
1297 | getattr(hdfs_settings, key)
1298 | except AttributeError as exc:
1299 | raise OneFSValueError(f'"{key}" is not a valid HDFS setting.') from exc
1300 | setattr(hdfs_settings, key, value)
1301 | self._sdk.ProtocolsApi(self._api_client).update_hdfs_settings(
1302 | hdfs_settings,
1303 | zone=zone or self.default_zone,
1304 | )
1305 |
1306 | @accesses_onefs
1307 | def update_zone_settings(self, settings, zone=None):
1308 | """Set the settings for an access zone."""
1309 | zone_settings = self._sdk.Zone()
1310 | for key, value in settings.items():
1311 | try:
1312 | getattr(zone_settings, key)
1313 | except AttributeError as exc:
1314 | raise OneFSValueError(f'"{key}" is not a valid zone setting.') from exc
1315 | setattr(zone_settings, key, value)
1316 | self._sdk.ZonesApi(self._api_client).update_zone(
1317 | zone_settings, zone or self.default_zone
1318 | )
1319 |
1320 | @property
1321 | def username(self):
1322 | """Get the user to connect to OneFS as."""
1323 | return self._configuration.username
1324 |
1325 | @username.setter
1326 | def username(self, username):
1327 | """Set the user to connect to OneFS as."""
1328 | self._configuration.username = username
1329 |
1330 | @accesses_onefs
1331 | def user_groups(self, user_name, zone=None):
1332 | """Get the groups a user is in."""
1333 | auth_users = self._sdk.AuthApi(self._api_client).get_auth_user(
1334 | auth_user_id=f"USER:{user_name}",
1335 | query_member_of=True,
1336 | zone=zone or self.default_zone,
1337 | )
1338 | assert (
1339 | len(auth_users.users) == 1
1340 | ), "Do you have duplicate users (e.g. local and LDAP)?"
1341 | return [group.name for group in auth_users.users[0].member_of]
1342 |
1343 | @accesses_onefs
1344 | def users(
1345 | self,
1346 | zone=None,
1347 | key=lambda sdk_auth_user: sdk_auth_user.name,
1348 | filter_=lambda _: True,
1349 | ):
1350 | """Get a list of users that exist in an access zone on OneFS."""
1351 | for user in (
1352 | self._sdk.AuthApi(self._api_client)
1353 | .list_auth_users(
1354 | zone=zone or self.default_zone,
1355 | )
1356 | .users
1357 | ):
1358 | if filter_(user):
1359 | yield key(user)
1360 |
1361 | @property
1362 | def verify_ssl(self):
1363 | """Determine whether the OneFS SSL certificate will be verified or not."""
1364 | return self._configuration.verify_ssl
1365 |
1366 | @verify_ssl.setter
1367 | def verify_ssl(self, verify_ssl):
1368 | """Specify whether to verify the OneFS SSL certificate or not."""
1369 | self._configuration.verify_ssl = bool(verify_ssl)
1370 |
1371 | @property
1372 | def zone(self):
1373 | """Get the default zone (used when a zone is not provided for a zone-specific operation)."""
1374 | return self.default_zone
1375 |
1376 | def zone_settings(self, zone=None):
1377 | """Get settings for an access zone."""
1378 | zone_settings = self._zone(zone or self.default_zone)
1379 | return {
1380 | "alternate_system_provider": zone_settings.alternate_system_provider,
1381 | "auth_providers": zone_settings.auth_providers,
1382 | "cache_entry_expiry": zone_settings.cache_entry_expiry,
1383 | "create_path": zone_settings.create_path,
1384 | "groupnet": zone_settings.groupnet,
1385 | "home_directory_umask": zone_settings.home_directory_umask,
1386 | "id": zone_settings.id,
1387 | "map_untrusted": zone_settings.map_untrusted,
1388 | "name": zone_settings.name,
1389 | "netbios_name": zone_settings.netbios_name,
1390 | "path": zone_settings.path,
1391 | "skeleton_directory": zone_settings.skeleton_directory,
1392 | "system": zone_settings.system,
1393 | "system_provider": zone_settings.system_provider,
1394 | "user_mapping_rules": zone_settings.user_mapping_rules,
1395 | "zone_id": zone_settings.zone_id,
1396 | }
1397 |
1398 | def zones(self):
1399 | """Get the list of access zone names available on the cluster."""
1400 | for zone in self._zones():
1401 | yield zone.name
1402 |
1403 |
1404 | class Client(BaseClient):
1405 |
1406 | """Do some basic checks after connecting to OneFS."""
1407 |
1408 | @classmethod
1409 | def for_hdfs(cls, *args, **kwargs):
1410 | """Connect to OneFS and do some basic HDFS-related checks."""
1411 | onefs = cls(*args, **kwargs)
1412 | onefs.check_license("HDFS")
1413 | LOGGER.debug("HDFS is licensed.")
1414 | return onefs
1415 |
1416 | def __init__(self, *args, **kwargs):
1417 | LOGGER.debug("Connecting to the OneFS cluster...")
1418 | super().__init__(*args, **kwargs)
1419 | LOGGER.debug("OneFS interactions will go to %s.", self.host)
1420 | self.check_zone(self.zone)
1421 | LOGGER.debug("The %s zone exists.", self.zone)
1422 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Define config and fixtures for testing the functionality of isilon_hadoop_tools."""
2 |
3 |
4 | import configparser
5 | from contextlib2 import ExitStack as does_not_raise
6 | from enum import Enum
7 | import json
8 | import os
9 | import random
10 | import tempfile
11 | from unittest.mock import Mock, patch # Python 3
12 | import uuid
13 |
14 | import kadmin
15 | import pytest
16 | import requests
17 | import urllib3
18 |
19 | from isilon_hadoop_tools import directories, identities, onefs, IsilonHadoopToolError
20 |
21 |
22 | urllib3.disable_warnings() # Without this, the SDK will emit InsecureRequestWarning on every call.
23 |
24 |
25 | def pytest_addoption(parser):
26 | parser.addoption(
27 | "--address",
28 | help="OneFS Address",
29 | )
30 | parser.addoption(
31 | "--password",
32 | help="OneFS Admin Password",
33 | )
34 | parser.addoption(
35 | "--username",
36 | default="root",
37 | help="OneFS Admin Username",
38 | )
39 | parser.addoption(
40 | "--realm",
41 | help="Kerberos Realm",
42 | )
43 | parser.addoption(
44 | "--kadmin-address",
45 | help="Kerberos Administration Server Address",
46 | )
47 | parser.addoption(
48 | "--kdc-addresses",
49 | help="Kerberos Key Distribution Center Addresses",
50 | nargs="+",
51 | )
52 | parser.addoption(
53 | "--kadmin-username",
54 | help="Kerberos Administration Server Admin Username",
55 | )
56 | parser.addoption(
57 | "--kadmin-password",
58 | help="Kerberos Administration Server Admin Password",
59 | )
60 |
61 |
62 | @pytest.fixture
63 | def max_retry_exception_mock():
64 | """Get an object that raises MaxRetryError (from urllib3) when called."""
65 | return Mock(side_effect=urllib3.exceptions.MaxRetryError(pool=None, url=None))
66 |
67 |
68 | @pytest.fixture(
69 | params=[
70 | "unresolvable.invalid", # unresolvable
71 | "localhost", # resolvable, not OneFS
72 | "127.0.0.1", # IPv4, not OneFS
73 | "::1", # IPv6, not OneFS -- If IPv6 is not enabled, this is the same as "unresolvable".
74 | ],
75 | )
76 | def invalid_address(request, max_retry_exception_mock):
77 | """Get an address that will cause connection errors for onefs.Client."""
78 | try:
79 | # This is how the SDK checks whether localhost is OneFS:
80 | # https://github.com/Isilon/isilon_sdk_python/blob/19958108ec550865ebeb1f2a4d250322cf4681c2/isi_sdk/rest.py#L33
81 | __import__("isi.rest")
82 | except ImportError:
83 | # Different hostnames/addresses hit errors in different code paths.
84 | # The first error that can be hit is a socket.gaierror if a hostname is unresolvable.
85 | # That won't get hit for addresses (e.g. 127.0.0.1 or ::1) or resolvable names, though.
86 | # Instead, those connections will succeed but will not respond correctly to API requests.
87 | # The first API request that's made is to get the cluster version (using isi_sdk_8_0).
88 | # To avoid having to wait for such a connection to time out, here we patch that request.
89 | with patch(
90 | "isi_sdk_8_0.ClusterApi.get_cluster_version", max_retry_exception_mock
91 | ):
92 | yield request.param # yield to keep the patch until the teardown of the test.
93 | else:
94 | pytest.skip("Localhost is OneFS.")
95 |
96 |
97 | @pytest.fixture(scope="session")
98 | def onefs_client(pytestconfig):
99 | """Get an instance of onefs.Client."""
100 | return onefs.Client(
101 | address=pytestconfig.getoption("--address", skip=True),
102 | username=pytestconfig.getoption("--username", skip=True),
103 | password=pytestconfig.getoption("--password", skip=True),
104 | verify_ssl=False, # OneFS uses a self-signed certificate by default.
105 | )
106 |
107 |
108 | @pytest.fixture(scope="session")
109 | def riptide_client(onefs_client):
110 | """Get an instance of onefs.Client that points to Riptide."""
111 | if (
112 | onefs.ONEFS_RELEASES["8.0.0.0"]
113 | <= onefs_client.revision()
114 | < onefs.ONEFS_RELEASES["8.0.1.0"]
115 | ):
116 | return onefs_client
117 | pytest.skip("The OneFS cluster is not running Riptide.")
118 |
119 |
120 | def new_name(request):
121 | """Get a name that may be used to create a new user or group."""
122 | return "-".join(
123 | [
124 | request.function.__name__,
125 | str(uuid.uuid4()),
126 | ]
127 | )
128 |
129 |
130 | def _new_group_name(request):
131 | return new_name(request)
132 |
133 |
134 | @pytest.fixture
135 | def new_group_name(request):
136 | """Get a name that may be used to create a new group."""
137 | return _new_group_name(request)
138 |
139 |
140 | def new_id():
141 | """Get an ID that may be used to create a new user or group."""
142 | return random.randint(1024, 65536)
143 |
144 |
145 | def _new_gid():
146 | return new_id()
147 |
148 |
149 | @pytest.fixture
150 | def new_gid():
151 | """Get a GID that may be used to create a new group."""
152 | return _new_gid()
153 |
154 |
155 | def _deletable_group(request, onefs_client):
156 | name, gid = _new_group_name(request), _new_gid()
157 | onefs_client.create_group(name=name, gid=gid)
158 | return name, gid
159 |
160 |
161 | @pytest.fixture
162 | def deletable_group(request, onefs_client):
163 | """Get the name of an existing group that it is ok to delete."""
164 | return _deletable_group(request, onefs_client)
165 |
166 |
167 | def _created_group(request, onefs_client):
168 | name, gid = _deletable_group(request, onefs_client)
169 | request.addfinalizer(lambda: onefs_client.delete_group(name="GID:" + str(gid)))
170 | return name, gid
171 |
172 |
173 | @pytest.fixture
174 | def created_group(request, onefs_client):
175 | """Get an existing group with a known GID."""
176 | return _created_group(request, onefs_client)
177 |
178 |
179 | def _new_user_name(request):
180 | return new_name(request)
181 |
182 |
183 | @pytest.fixture
184 | def new_user_name(request):
185 | """Get a name that may be used to create a new user."""
186 | return _new_user_name(request)
187 |
188 |
189 | def _new_uid():
190 | return new_id()
191 |
192 |
193 | @pytest.fixture
194 | def new_uid():
195 | """Get a UID that may be used to create a new user."""
196 | return _new_uid()
197 |
198 |
199 | def _deletable_user(request, onefs_client):
200 | name = _new_user_name(request)
201 | primary_group_name, _ = _created_group(request, onefs_client)
202 | uid = _new_uid()
203 | onefs_client.create_user(name=name, primary_group_name=primary_group_name, uid=uid)
204 | return name, primary_group_name, uid
205 |
206 |
207 | @pytest.fixture
208 | def deletable_user(request, onefs_client):
209 | """Get the name of an existing user that it is ok to delete."""
210 | return _deletable_user(request, onefs_client)
211 |
212 |
213 | def _created_user(request, onefs_client):
214 | name, primary_group_name, uid = _deletable_user(request, onefs_client)
215 | request.addfinalizer(lambda: onefs_client.delete_user(name="UID:" + str(uid)))
216 | return name, primary_group_name, uid
217 |
218 |
219 | @pytest.fixture
220 | def created_user(request, onefs_client):
221 | """Get an existing user with a known UID."""
222 | return _created_user(request, onefs_client)
223 |
224 |
225 | def _deletable_proxy_user(request, onefs_client):
226 | user_name = _created_user(request, onefs_client)[0]
227 | members = []
228 | onefs_client.create_hdfs_proxy_user(name=user_name, members=members)
229 | return user_name, members
230 |
231 |
232 | @pytest.fixture
233 | def deletable_proxy_user(request, onefs_client):
234 | """Get the name of an existing proxy user that it is ok to delete."""
235 | return _deletable_proxy_user(request, onefs_client)
236 |
237 |
238 | def _created_proxy_user(request, onefs_client):
239 | user_name, members = _deletable_proxy_user(request, onefs_client)
240 | request.addfinalizer(lambda: onefs_client.delete_hdfs_proxy_user(name=user_name))
241 | return user_name, members
242 |
243 |
244 | @pytest.fixture
245 | def created_proxy_user(request, onefs_client):
246 | """Get an existing proxy user with known members."""
247 | return _created_proxy_user(request, onefs_client)
248 |
249 |
250 | def _deletable_realm(pytestconfig, onefs_client):
251 | realm = pytestconfig.getoption("--realm", skip=True)
252 | onefs_client.create_realm(
253 | name=realm,
254 | admin_server=pytestconfig.getoption("--kadmin-address", skip=True),
255 | kdcs=pytestconfig.getoption("--kdc-addresses", skip=True),
256 | )
257 | return realm
258 |
259 |
260 | @pytest.fixture
261 | def deletable_realm(pytestconfig, onefs_client):
262 | """Get the name of an existing realm that it is ok to delete."""
263 | return _deletable_realm(pytestconfig, onefs_client)
264 |
265 |
266 | def _created_realm(request, onefs_client):
267 | realm = _deletable_realm(request.config, onefs_client)
268 | request.addfinalizer(lambda: onefs_client.delete_realm(name=realm))
269 | return realm
270 |
271 |
272 | @pytest.fixture
273 | def created_realm(request, onefs_client):
274 | """Get the name of an existing realm."""
275 | return _created_realm(request, onefs_client)
276 |
277 |
278 | def _deletable_auth_provider(request, onefs_client):
279 | realm = _created_realm(request, onefs_client)
280 | onefs_client.create_auth_provider(
281 | realm=realm,
282 | user=request.config.getoption("--kadmin-username", skip=True),
283 | password=request.config.getoption("--kadmin-password", skip=True),
284 | )
285 | return realm
286 |
287 |
288 | @pytest.fixture
289 | def deletable_auth_provider(request, onefs_client):
290 | """Get the name of an existing Kerberos auth provider that it is ok to delete."""
291 | return _deletable_auth_provider(request, onefs_client)
292 |
293 |
294 | def _created_auth_provider(request, onefs_client):
295 | auth_provider = _deletable_auth_provider(request, onefs_client)
296 | request.addfinalizer(lambda: onefs_client.delete_auth_provider(name=auth_provider))
297 | return auth_provider
298 |
299 |
300 | @pytest.fixture
301 | def created_auth_provider(request, onefs_client):
302 | """Get the name of an existing Kerberos auth provider."""
303 | return _created_auth_provider(request, onefs_client)
304 |
305 |
306 | def _new_spn(request, onefs_client):
307 | return (
308 | _new_user_name(request) + "/" + onefs_client.address,
309 | _created_auth_provider(request, onefs_client),
310 | )
311 |
312 |
313 | @pytest.fixture
314 | def new_spn(request, onefs_client):
315 | """Get a principal that may be used to create a new SPN."""
316 | return _new_spn(request, onefs_client)
317 |
318 |
319 | def _remove_principal_from_kdc(
320 | principal,
321 | realm,
322 | kdc,
323 | admin_server,
324 | admin_principal,
325 | admin_password,
326 | ):
327 | """Delete a Kerberos principal."""
328 | # Note: kadmin.init_with_password requires a Kerberos config file.
329 |
330 | # Create a temporary Kerberos config file.
331 | krb5_config = configparser.ConfigParser()
332 | krb5_config.optionxform = str
333 | krb5_config.add_section("libdefaults")
334 | krb5_config.set("libdefaults", "default_realm", realm)
335 | krb5_config.add_section("realms")
336 | krb5_config.set(
337 | "realms",
338 | realm,
339 | "\n".join(
340 | [
341 | "{",
342 | " kdc = " + kdc,
343 | " admin_server = " + admin_server,
344 | "}",
345 | ]
346 | ),
347 | )
348 | with tempfile.NamedTemporaryFile(mode="w", delete=False) as krb5_conf:
349 | krb5_config.write(krb5_conf)
350 |
351 | # Activate the config file via an env var.
352 | previous_krb5_conf = os.environ.get("KRB5_CONFIG")
353 | os.environ["KRB5_CONFIG"] = krb5_conf.name
354 |
355 | # Delete the principal.
356 | kadmin.init_with_password(admin_principal, admin_password).delete_principal(
357 | principal
358 | )
359 |
360 | # Reset the env var.
361 | if previous_krb5_conf is None:
362 | del os.environ["KRB5_CONFIG"]
363 | else:
364 | os.environ["KRB5_CONFIG"] = previous_krb5_conf
365 |
366 | # Delete the config file.
367 | os.remove(krb5_conf.name)
368 |
369 |
370 | def _deletable_spn(request, onefs_client):
371 | spn, auth_provider = _new_spn(request, onefs_client)
372 | kadmin_username = request.config.getoption("--kadmin-username", skip=True)
373 | kadmin_password = request.config.getoption("--kadmin-password", skip=True)
374 | onefs_client.create_spn(
375 | spn=spn,
376 | realm=auth_provider,
377 | user=kadmin_username,
378 | password=kadmin_password,
379 | )
380 | request.addfinalizer(
381 | lambda: _remove_principal_from_kdc(
382 | principal=spn,
383 | realm=auth_provider,
384 | kdc=request.config.getoption("--kdc-addresses", skip=True)[0],
385 | admin_server=request.config.getoption("--kadmin-address", skip=True),
386 | admin_principal=kadmin_username,
387 | admin_password=kadmin_password,
388 | ),
389 | )
390 | return spn, auth_provider
391 |
392 |
393 | @pytest.fixture
394 | def deletable_spn(request, onefs_client):
395 | """Get the name of an existing SPN that it is ok to delete."""
396 | spn, auth_provider = _deletable_spn(request, onefs_client)
397 | yield spn, auth_provider
398 | assert (spn + "@" + auth_provider) not in onefs_client.list_spns(
399 | provider=auth_provider
400 | )
401 |
402 |
403 | @pytest.fixture
404 | def created_spn(request, onefs_client):
405 | """Get the name of an existing Kerberos SPN."""
406 | spn, auth_provider = _deletable_spn(request, onefs_client)
407 | request.addfinalizer(
408 | lambda: onefs_client.delete_spn(spn=spn, provider=auth_provider)
409 | )
410 | return spn, auth_provider
411 |
412 |
413 | @pytest.fixture
414 | def exception():
415 | """Get an exception."""
416 | return random.choice(
417 | [
418 | Exception,
419 | IsilonHadoopToolError,
420 | ]
421 | )
422 |
423 |
424 | def _api_exception_from_http_resp(onefs_client, body):
425 | return onefs_client._sdk.rest.ApiException(
426 | http_resp=urllib3.response.HTTPResponse(body=body),
427 | )
428 |
429 |
430 | def _api_exception(onefs_client, messages=()):
431 | return _api_exception_from_http_resp(
432 | onefs_client,
433 | body=json.dumps(
434 | {
435 | "errors": [{"message": message} for message in messages],
436 | }
437 | ),
438 | )
439 |
440 |
441 | @pytest.fixture
442 | def empty_api_exception_mock(onefs_client):
443 | """Get an object that raises an ApiException (from the Isilon SDK) when called."""
444 | return Mock(side_effect=_api_exception(onefs_client, messages=[""]))
445 |
446 |
447 | @pytest.fixture
448 | def retriable_api_exception_mock(onefs_client):
449 | """Get an object that raises a retriable ApiException (from the Isilon SDK) when called."""
450 | return_value = None
451 | return (
452 | Mock(
453 | side_effect=[
454 | # First raise an exception, then return a value.
455 | _api_exception(
456 | onefs_client, messages=[onefs.APIError.try_again_error_format]
457 | ),
458 | return_value,
459 | ],
460 | ),
461 | return_value,
462 | )
463 |
464 |
465 | @pytest.fixture(
466 | params=[
467 | lambda onefs_client: (
468 | # body, decodable, valid, iterable, not empty, valid
469 | # This is known to occur in the wild.
470 | _api_exception(
471 | onefs_client, messages=[onefs.APIError.try_again_error_format]
472 | ),
473 | does_not_raise(),
474 | ),
475 | lambda onefs_client: (
476 | # body, decodable, valid, iterable, not empty, invalid (KeyError)
477 | _api_exception_from_http_resp(onefs_client, body='{"errors": [{}]}'),
478 | pytest.raises(onefs.MalformedAPIError),
479 | ),
480 | lambda onefs_client: (
481 | # body, decodable, valid, iterable, not empty, invalid (TypeError)
482 | _api_exception_from_http_resp(onefs_client, body='{"errors": [[]]}'),
483 | pytest.raises(onefs.MalformedAPIError),
484 | ),
485 | lambda onefs_client: (
486 | # body, decodable, valid, iterable, empty
487 | _api_exception_from_http_resp(onefs_client, body='{"errors": []}'),
488 | does_not_raise(),
489 | ),
490 | lambda onefs_client: (
491 | # body, decodable, valid, not iterable
492 | _api_exception_from_http_resp(onefs_client, body='{"errors": null}'),
493 | pytest.raises(onefs.MalformedAPIError),
494 | ),
495 | lambda onefs_client: (
496 | # body, decodable, invalid (KeyError)
497 | # This is known to occur in the wild (e.g. bug 248011)
498 | _api_exception_from_http_resp(onefs_client, body="{}"),
499 | pytest.raises(onefs.MalformedAPIError),
500 | ),
501 | lambda onefs_client: (
502 | # body, decodable, invalid (TypeError)
503 | _api_exception_from_http_resp(onefs_client, body="[]"),
504 | pytest.raises(onefs.MalformedAPIError),
505 | ),
506 | lambda onefs_client: (
507 | # body, undecodable
508 | # This is known to occur in the wild (e.g. if Apache errors before PAPI).
509 | _api_exception_from_http_resp(onefs_client, body="not JSON"),
510 | pytest.raises(onefs.UndecodableAPIError),
511 | ),
512 | lambda onefs_client: (
513 | # no body
514 | # This is known to occur in the wild.
515 | onefs_client._sdk.rest.ApiException(
516 | status=0, reason="built without http_resp"
517 | ),
518 | pytest.raises(onefs.UndecodableAPIError),
519 | ),
520 | lambda onefs_client: (
521 | # uninitialized
522 | onefs_client._sdk.rest.ApiException(),
523 | pytest.raises(onefs.UndecodableAPIError),
524 | ),
525 | ],
526 | )
527 | def api_error_errors_expectation(request, onefs_client):
528 | """
529 | Get an APIError and the expectation (context manager)
530 | of what happens when the errors method is called.
531 | """
532 | api_exception, expectation = request.param(onefs_client)
533 | return onefs.APIError(api_exception), expectation
534 |
535 |
536 | @pytest.fixture
537 | def api_error(api_error_errors_expectation):
538 | """Get a onefs.APIError exception."""
539 | return api_error_errors_expectation[0]
540 |
541 |
542 | MAX_MODE = 0o1777
543 |
544 |
545 | @pytest.fixture
546 | def max_mode():
547 | """Get the highest integer mode this test suite will use."""
548 | return MAX_MODE
549 |
550 |
551 | def _deletable_directory(request, onefs_client):
552 | path = "/" + new_name(request)
553 | mode = random.randint(0, MAX_MODE)
554 | mode &= 0o777 # https://bugs.west.isilon.com/show_bug.cgi?id=250615
555 | onefs_client.mkdir(path=path, mode=mode)
556 | return path, {
557 | "group": onefs_client.primary_group_of_user(onefs_client.username),
558 | "mode": mode,
559 | "owner": onefs_client.username,
560 | }
561 |
562 |
563 | @pytest.fixture
564 | def deletable_directory(request, onefs_client):
565 | """Get the path and mode of an existing directory that it is ok to delete."""
566 | return _deletable_directory(request, onefs_client)
567 |
568 |
569 | def _created_directory(request, onefs_client):
570 | path, permissions = _deletable_directory(request, onefs_client)
571 | request.addfinalizer(lambda: onefs_client.rmdir(path=path, recursive=True))
572 | return path, permissions
573 |
574 |
575 | @pytest.fixture
576 | def created_directory(request, onefs_client):
577 | """Get an existing directory with a known mode."""
578 | return _created_directory(request, onefs_client)
579 |
580 |
581 | @pytest.fixture
582 | def supported_feature():
583 | """Get a OneFSFeature that is guaranteed to be supported."""
584 | return onefs.OneFSFeature.FOREVER
585 |
586 |
587 | @pytest.fixture
588 | def unsupported_feature():
589 | """Get a OneFSFeature that is guaranteed to be unsupported."""
590 |
591 | class OneFSFakeFeature(Enum):
592 | FAKE_FEATURE = (float("inf"), 0)
593 |
594 | return OneFSFakeFeature.FAKE_FEATURE
595 |
596 |
597 | @pytest.fixture
598 | def requests_delete_raises():
599 | class _DummyResponse(object):
600 | def raise_for_status(self):
601 | raise requests.exceptions.HTTPError
602 |
603 | with patch("requests.delete", lambda *args, **kwargs: _DummyResponse()):
604 | yield
605 |
606 |
607 | @pytest.fixture(params=["cdh", "cdp", "hdp"])
608 | def users_groups_for_directories(request, onefs_client):
609 | """
610 | Get users and groups from the identities module that
611 | correspond to directories from the directories module
612 | (i.e. get the identities guaranteed to exist for a set of directories).
613 | """
614 |
615 | users, groups = set(), set()
616 |
617 | def _pass(*args, **kwargs):
618 | pass
619 |
620 | identities.iterate_identities(
621 | {
622 | "cdh": identities.cdh_identities,
623 | "cdp": identities.cdh_identities,
624 | "hdp": identities.hdp_identities,
625 | }[request.param](onefs_client.zone),
626 | create_group=lambda group_name: groups.add(group_name),
627 | create_user=lambda user_name, _: users.add(user_name),
628 | add_user_to_group=_pass,
629 | create_proxy_user=_pass,
630 | )
631 | return (
632 | (users, groups),
633 | {
634 | "cdh": directories.cdh_directories,
635 | "cdp": directories.cdh_directories,
636 | "hdp": directories.hdp_directories,
637 | }[request.param](),
638 | )
639 |
--------------------------------------------------------------------------------
/tests/test___init__.py:
--------------------------------------------------------------------------------
1 | """Verify the functionality of isilon_hadoop_tools.__init__."""
2 |
3 |
4 | import pytest
5 |
6 | import isilon_hadoop_tools
7 |
8 |
9 | @pytest.mark.parametrize(
10 | "error, classinfo",
11 | [
12 | (isilon_hadoop_tools.IsilonHadoopToolError, Exception),
13 | ],
14 | )
15 | def test_errors(error, classinfo):
16 | """Ensure that exception types remain consistent."""
17 | assert issubclass(error, isilon_hadoop_tools.IsilonHadoopToolError)
18 | assert issubclass(error, classinfo)
19 |
--------------------------------------------------------------------------------
/tests/test__scripts.py:
--------------------------------------------------------------------------------
1 | import posixpath
2 | import subprocess
3 | import uuid
4 |
5 | import pytest
6 |
7 |
8 | @pytest.fixture
9 | def empty_hdfs_root(onefs_client):
10 | """Create a temporary directory and make it the HDFS root."""
11 | old_hdfs_root = onefs_client.hdfs_settings()["root_directory"]
12 | new_root_name = str(uuid.uuid4())
13 | onefs_client.mkdir(new_root_name, 0o755)
14 | onefs_client.update_hdfs_settings(
15 | {
16 | "root_directory": posixpath.join(
17 | onefs_client.zone_settings()["path"], new_root_name
18 | ),
19 | }
20 | )
21 | yield
22 | onefs_client.update_hdfs_settings({"root_directory": old_hdfs_root})
23 | onefs_client.rmdir(new_root_name, recursive=True)
24 |
25 |
26 | @pytest.mark.usefixtures("empty_hdfs_root")
27 | @pytest.mark.parametrize("script", ["isilon_create_users", "isilon_create_directories"])
28 | @pytest.mark.parametrize("dist", ["cdh", "cdp", "hdp"])
29 | def test_dry_run(script, onefs_client, dist):
30 | subprocess.check_call(
31 | [
32 | script,
33 | "--append-cluster-name",
34 | str(uuid.uuid4()),
35 | "--dist",
36 | dist,
37 | "--dry",
38 | "--no-verify",
39 | "--onefs-password",
40 | onefs_client.password,
41 | "--onefs-user",
42 | onefs_client.username,
43 | "--zone",
44 | "System",
45 | onefs_client.address,
46 | ]
47 | )
48 |
--------------------------------------------------------------------------------
/tests/test_cli.py:
--------------------------------------------------------------------------------
1 | """Verify the functionality of isilon_hadoop_tools.cli."""
2 |
3 |
4 | from unittest.mock import Mock
5 |
6 | import pytest
7 |
8 | from isilon_hadoop_tools import IsilonHadoopToolError, cli
9 |
10 |
11 | def test_catches(exception):
12 | """Ensure cli.catches detects the desired exception."""
13 | assert cli.catches(exception)(Mock(side_effect=exception))() == 1
14 |
15 |
16 | def test_not_catches(exception):
17 | """Ensure cli.catches does not catch undesirable exceptions."""
18 | with pytest.raises(exception):
19 | cli.catches(())(Mock(side_effect=exception))()
20 |
21 |
22 | @pytest.mark.parametrize(
23 | "error, classinfo",
24 | [
25 | (cli.CLIError, IsilonHadoopToolError),
26 | (cli.HintedError, cli.CLIError),
27 | ],
28 | )
29 | def test_errors_cli(error, classinfo):
30 | """Ensure that exception types remain consistent."""
31 | assert issubclass(error, IsilonHadoopToolError)
32 | assert issubclass(error, cli.CLIError)
33 | assert issubclass(error, classinfo)
34 |
--------------------------------------------------------------------------------
/tests/test_directories.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from isilon_hadoop_tools import IsilonHadoopToolError, directories
4 |
5 |
6 | def test_directory_identities(users_groups_for_directories):
7 | """
8 | Verify that identities needed by the directories module
9 | are guaranteed to exist by the identities module.
10 | """
11 | (users, groups), dirs = users_groups_for_directories
12 | for hdfs_directory in dirs:
13 | assert hdfs_directory.owner in users
14 | assert hdfs_directory.group in groups
15 |
16 |
17 | @pytest.mark.parametrize(
18 | "error, classinfo",
19 | [
20 | (directories.DirectoriesError, IsilonHadoopToolError),
21 | (directories.HDFSRootDirectoryError, directories.DirectoriesError),
22 | ],
23 | )
24 | def test_errors_cli(error, classinfo):
25 | """Ensure that exception types remain consistent."""
26 | assert issubclass(error, IsilonHadoopToolError)
27 | assert issubclass(error, directories.DirectoriesError)
28 | assert issubclass(error, classinfo)
29 |
--------------------------------------------------------------------------------
/tests/test_identities.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import isilon_hadoop_tools.identities
4 |
5 |
6 | @pytest.mark.parametrize("zone", ["System", "notSystem"])
7 | @pytest.mark.parametrize(
8 | "identities",
9 | [
10 | isilon_hadoop_tools.identities.cdh_identities,
11 | isilon_hadoop_tools.identities.cdp_identities,
12 | isilon_hadoop_tools.identities.hdp_identities,
13 | ],
14 | )
15 | def test_log_identities(identities, zone):
16 | """Verify that log_identities returns None."""
17 | assert isilon_hadoop_tools.identities.log_identities(identities(zone)) is None
18 |
--------------------------------------------------------------------------------
/tests/test_onefs.py:
--------------------------------------------------------------------------------
1 | """Verify the functionality of isilon_hadoop_tools.onefs."""
2 |
3 |
4 | import socket
5 |
6 | from unittest.mock import Mock
7 | from urllib.parse import urlparse
8 | import uuid
9 |
10 | import isi_sdk_7_2
11 | import isi_sdk_8_0
12 | import isi_sdk_8_0_1
13 | import isi_sdk_8_1_0
14 | import isi_sdk_8_1_1
15 | import isi_sdk_8_2_0
16 | import isi_sdk_8_2_1
17 | import isi_sdk_8_2_2
18 | import pytest
19 |
20 | from isilon_hadoop_tools import IsilonHadoopToolError, onefs
21 |
22 |
23 | def test_init_connection_error(invalid_address, pytestconfig):
24 | """Creating a Client for an unusable host should raise a OneFSConnectionError."""
25 | with pytest.raises(onefs.OneFSConnectionError):
26 | onefs.Client(address=invalid_address, username=None, password=None)
27 |
28 |
29 | @pytest.mark.xfail(
30 | raises=onefs.MalformedAPIError,
31 | reason="https://bugs.west.isilon.com/show_bug.cgi?id=248011",
32 | )
33 | def test_init_bad_creds(pytestconfig):
34 | """Creating a Client with invalid credentials should raise an appropriate exception."""
35 | with pytest.raises(onefs.APIError):
36 | onefs.Client(
37 | address=pytestconfig.getoption("--address", skip=True),
38 | username=str(uuid.uuid4()),
39 | password=str(uuid.uuid4()),
40 | verify_ssl=False, # OneFS uses a self-signed certificate by default.
41 | )
42 |
43 |
44 | def test_init(request):
45 | """Creating a Client should not raise an Exception."""
46 | assert isinstance(request.getfixturevalue("onefs_client"), onefs.Client)
47 |
48 |
49 | def test_api_error_errors(api_error_errors_expectation):
50 | """Verify that APIError.errors raises appropriate exceptions."""
51 | api_error, expectation = api_error_errors_expectation
52 | with expectation:
53 | api_error.errors()
54 |
55 |
56 | def test_api_error_str(api_error):
57 | """Verify that APIErrors can be stringified."""
58 | assert isinstance(str(api_error), str)
59 |
60 |
61 | @pytest.mark.parametrize(
62 | "revision, expected_sdk",
63 | [
64 | (0, isi_sdk_8_2_2),
65 | (onefs.ONEFS_RELEASES["7.2.0.0"], isi_sdk_7_2),
66 | (onefs.ONEFS_RELEASES["8.0.0.0"], isi_sdk_8_0),
67 | (onefs.ONEFS_RELEASES["8.0.0.4"], isi_sdk_8_0),
68 | (onefs.ONEFS_RELEASES["8.0.1.0"], isi_sdk_8_0_1),
69 | (onefs.ONEFS_RELEASES["8.0.1.1"], isi_sdk_8_0_1),
70 | (onefs.ONEFS_RELEASES["8.1.0.0"], isi_sdk_8_1_0),
71 | (onefs.ONEFS_RELEASES["8.1.1.0"], isi_sdk_8_1_1),
72 | (onefs.ONEFS_RELEASES["8.1.2.0"], isi_sdk_8_1_1),
73 | (onefs.ONEFS_RELEASES["8.2.0.0"], isi_sdk_8_2_0),
74 | (onefs.ONEFS_RELEASES["8.2.1.0"], isi_sdk_8_2_1),
75 | (onefs.ONEFS_RELEASES["8.2.2.0"], isi_sdk_8_2_2),
76 | (onefs.ONEFS_RELEASES["8.2.3.0"], isi_sdk_8_2_2),
77 | (float("inf"), isi_sdk_8_2_2),
78 | ],
79 | )
80 | def test_sdk_for_revision(revision, expected_sdk):
81 | """Verify that an appropriate SDK is selected for a given revision."""
82 | assert onefs.sdk_for_revision(revision) is expected_sdk
83 |
84 |
85 | def test_sdk_for_revision_unsupported():
86 | """Ensure that an UnsupportedVersion exception for unsupported revisions."""
87 | with pytest.raises(onefs.UnsupportedVersion):
88 | onefs.sdk_for_revision(revision=0, strict=True)
89 |
90 |
91 | def test_accesses_onefs_connection_error(max_retry_exception_mock, onefs_client):
92 | """Verify that MaxRetryErrors are converted to OneFSConnectionErrors."""
93 | with pytest.raises(onefs.OneFSConnectionError):
94 | onefs.accesses_onefs(max_retry_exception_mock)(onefs_client)
95 |
96 |
97 | def test_accesses_onefs_api_error(empty_api_exception_mock, onefs_client):
98 | """Verify that APIExceptions are converted to APIErrors."""
99 | with pytest.raises(onefs.APIError):
100 | onefs.accesses_onefs(empty_api_exception_mock)(onefs_client)
101 |
102 |
103 | def test_accesses_onefs_try_again(retriable_api_exception_mock, onefs_client):
104 | """Verify that APIExceptions are retried appropriately."""
105 | mock, return_value = retriable_api_exception_mock
106 | assert onefs.accesses_onefs(mock)(onefs_client) == return_value
107 |
108 |
109 | def test_accesses_onefs_other(exception, onefs_client):
110 | """Verify that arbitrary exceptions are not caught."""
111 | with pytest.raises(exception):
112 | onefs.accesses_onefs(Mock(side_effect=exception))(onefs_client)
113 |
114 |
115 | def test_address(onefs_client, pytestconfig):
116 | """Verify that onefs.Client.address is exactly what was passed in."""
117 | assert onefs_client.address == pytestconfig.getoption("--address")
118 |
119 |
120 | def test_username(onefs_client, pytestconfig):
121 | """Verify that onefs.Client.username is exactly what was passed in."""
122 | assert onefs_client.username == pytestconfig.getoption("--username")
123 |
124 |
125 | def test_password(onefs_client, pytestconfig):
126 | """Verify that onefs.Client.password is exactly what was passed in."""
127 | assert onefs_client.password == pytestconfig.getoption("--password")
128 |
129 |
130 | def test_host(onefs_client):
131 | """Verify that onefs.Client.host is a parsable url."""
132 | parsed = urlparse(onefs_client.host)
133 | assert parsed.scheme == "https"
134 | assert socket.gethostbyname(parsed.hostname)
135 | assert parsed.port == 8080
136 |
137 |
138 | def test_create_group(request):
139 | """Ensure that a group can be created successfully."""
140 | request.getfixturevalue("created_group")
141 |
142 |
143 | def test_delete_group(onefs_client, deletable_group):
144 | """Verify that a group can be deleted successfully."""
145 | group_name, _ = deletable_group
146 | assert onefs_client.delete_group(name=group_name) is None
147 |
148 |
149 | def test_gid_of_group(onefs_client, created_group):
150 | """Verify that the correct GID is fetched for an existing group."""
151 | group_name, gid = created_group
152 | assert onefs_client.gid_of_group(group_name=group_name) == gid
153 |
154 |
155 | def test_groups(onefs_client, created_group):
156 | """Verify that a group that is known to exist appears in the list of existing groups."""
157 | group_name, _ = created_group
158 | assert group_name in onefs_client.groups()
159 |
160 |
161 | def test_delete_user(onefs_client, deletable_user):
162 | """Verify that a user can be deleted successfully."""
163 | assert onefs_client.delete_user(name=deletable_user[0]) is None
164 |
165 |
166 | def test_create_user(request):
167 | """Ensure that a user can be created successfully."""
168 | request.getfixturevalue("created_user")
169 |
170 |
171 | def test_add_user_to_group(onefs_client, created_user, created_group):
172 | """Ensure that a user can be added to a group successfully."""
173 | assert (
174 | onefs_client.add_user_to_group(
175 | user_name=created_user[0],
176 | group_name=created_group[0],
177 | )
178 | is None
179 | )
180 |
181 |
182 | def test_create_hdfs_proxy_user(request):
183 | """Ensure that an HDFS proxy user can be created successfully."""
184 | request.getfixturevalue("created_proxy_user")
185 |
186 |
187 | def test_delete_proxy_user(onefs_client, deletable_proxy_user):
188 | """Verify that a proxy user can be deleted successfully."""
189 | assert onefs_client.delete_hdfs_proxy_user(name=deletable_proxy_user[0]) is None
190 |
191 |
192 | def test_uid_of_user(onefs_client, created_user):
193 | """Verify that the correct UID is fetched for an existing user."""
194 | user_name, _, uid = created_user
195 | assert onefs_client.uid_of_user(user_name=user_name) == uid
196 |
197 |
198 | def test_primary_group_of_user(onefs_client, created_user):
199 | """Verify that the correct primary group is fetched for an existing user."""
200 | user_name, primary_group, _ = created_user
201 | assert onefs_client.primary_group_of_user(user_name=user_name) == primary_group
202 |
203 |
204 | def test_create_realm(request):
205 | """Verify that a Kerberos realm can be created successfully."""
206 | request.getfixturevalue("created_realm")
207 |
208 |
209 | def test_delete_realm(onefs_client, deletable_realm):
210 | """Verify that a realm can be deleted successfully."""
211 | onefs_client.delete_realm(name=deletable_realm)
212 |
213 |
214 | def test_create_auth_provider(request):
215 | """Verify that a Kerberos auth provider can be created successfully."""
216 | request.getfixturevalue("created_auth_provider")
217 |
218 |
219 | def test_delete_auth_provider(onefs_client, deletable_auth_provider):
220 | """Verify that a Kerberos auth provider can be deleted successfully."""
221 | onefs_client.delete_auth_provider(name=deletable_auth_provider)
222 |
223 |
224 | def test_delete_spn(onefs_client, deletable_spn):
225 | """Verify that an SPN can be deleted successfully."""
226 | spn, provider = deletable_spn
227 | onefs_client.delete_spn(spn=spn, provider=provider)
228 |
229 |
230 | def test_create_spn(request):
231 | """Verify that a Kerberos SPN can be created successfully."""
232 | request.getfixturevalue("created_spn")
233 |
234 |
235 | def test_list_spns(onefs_client, created_spn):
236 | """Verify that a Kerberos SPN can be listed successfully."""
237 | spn, provider = created_spn
238 | assert (spn + "@" + provider) in onefs_client.list_spns(provider=provider)
239 |
240 |
241 | def test_flush_auth_cache(onefs_client):
242 | """Verify that flushing the auth cache does not raise an exception."""
243 | assert onefs_client.flush_auth_cache() is None
244 |
245 |
246 | def test_flush_auth_cache_unsupported(riptide_client):
247 | """
248 | Verify that trying flush the auth cache of a non-System zone
249 | before Halfpipe raises an UnsupportedOperation exception.
250 | """
251 | with pytest.raises(onefs.UnsupportedOperation):
252 | riptide_client.flush_auth_cache(zone="notSystem")
253 |
254 |
255 | @pytest.mark.usefixtures("requests_delete_raises")
256 | def test_flush_auth_cache_error(riptide_client):
257 | """
258 | Verify that flushing the auth cache raises an appropriate exception
259 | when things go wrong before Halfpipe.
260 | """
261 | with pytest.raises(onefs.NonSDKAPIError):
262 | riptide_client.flush_auth_cache()
263 |
264 |
265 | def test_hdfs_inotify_settings(onefs_client):
266 | """Ensure hdfs_inotify_settings returns all available settings appropriately."""
267 | try:
268 | hdfs_inotify_settings = onefs_client.hdfs_inotify_settings()
269 | except onefs.UnsupportedOperation:
270 | assert onefs_client.revision() < onefs.ONEFS_RELEASES["8.1.1.0"]
271 | else:
272 | assert isinstance(hdfs_inotify_settings, dict)
273 | assert all(
274 | setting in hdfs_inotify_settings
275 | for setting in ["enabled", "maximum_delay", "retention"]
276 | )
277 |
278 |
279 | @pytest.mark.parametrize(
280 | "setting_and_type",
281 | {
282 | "alternate_system_provider": str,
283 | "auth_providers": list,
284 | "cache_entry_expiry": int,
285 | "create_path": (bool, type(None)),
286 | "groupnet": str,
287 | "home_directory_umask": int,
288 | "id": str,
289 | "map_untrusted": str,
290 | "name": str,
291 | "netbios_name": str,
292 | "path": str,
293 | "skeleton_directory": str,
294 | "system": bool,
295 | "system_provider": str,
296 | "user_mapping_rules": list,
297 | "zone_id": int,
298 | }.items(),
299 | )
300 | def test_zone_settings(onefs_client, setting_and_type):
301 | """Ensure zone_settings returns all available settings appropriately."""
302 | setting, setting_type = setting_and_type
303 | assert isinstance(onefs_client.zone_settings()[setting], setting_type)
304 |
305 |
306 | def test_zone_settings_bad_zone(onefs_client):
307 | """Ensure zone_settings fails appropriately when given a nonexistent zone."""
308 | with pytest.raises(onefs.MissingZoneError):
309 | onefs_client.zone_settings(zone=str(uuid.uuid4()))
310 |
311 |
312 | def test_mkdir(request, onefs_client):
313 | """Ensure that a directory can be created successfully."""
314 | path, permissions = request.getfixturevalue("created_directory")
315 |
316 | def _check_postconditions():
317 | assert onefs_client.permissions(path) == permissions
318 |
319 | request.addfinalizer(_check_postconditions)
320 |
321 |
322 | @pytest.mark.parametrize("recursive", [False, True])
323 | def test_rmdir(onefs_client, deletable_directory, recursive, request):
324 | """Verify that a directory can be deleted successfully."""
325 | path, _ = deletable_directory
326 | assert onefs_client.rmdir(path=path, recursive=recursive) is None
327 |
328 | def _check_postconditions():
329 | with pytest.raises(onefs.APIError):
330 | onefs_client.permissions(path)
331 |
332 | request.addfinalizer(_check_postconditions)
333 |
334 |
335 | def test_permissions(onefs_client, created_directory):
336 | """Check that permissions returns correct information."""
337 | path, permissions = created_directory
338 | assert onefs_client.permissions(path) == permissions
339 |
340 |
341 | def test_chmod(onefs_client, created_directory, max_mode, request):
342 | """Check that chmod modifies the mode correctly."""
343 | path, permissions = created_directory
344 | new_mode = (permissions["mode"] + 1) % (max_mode + 1)
345 | assert onefs_client.chmod(path, new_mode) is None
346 |
347 | def _check_postconditions():
348 | assert onefs_client.permissions(path)["mode"] == new_mode
349 |
350 | request.addfinalizer(_check_postconditions)
351 |
352 |
353 | @pytest.mark.parametrize("new_owner", [True, False])
354 | @pytest.mark.parametrize("new_group", [True, False])
355 | def test_chown(
356 | onefs_client,
357 | created_directory,
358 | created_user,
359 | created_group,
360 | new_owner,
361 | new_group,
362 | request,
363 | ):
364 | """Check that chown modifies ownership correctly."""
365 | path, permissions = created_directory
366 | user_name = created_user[0]
367 | group_name = created_group[0]
368 | assert (
369 | onefs_client.chown(
370 | path,
371 | owner=user_name if new_owner else None,
372 | group=group_name if new_group else None,
373 | )
374 | is None
375 | )
376 |
377 | def _check_postconditions():
378 | owner = user_name if new_owner else permissions["owner"]
379 | assert onefs_client.permissions(path)["owner"] == owner
380 | group = group_name if new_group else permissions["group"]
381 | assert onefs_client.permissions(path)["group"] == group
382 |
383 | request.addfinalizer(_check_postconditions)
384 |
385 |
386 | def test_feature_supported(onefs_client, supported_feature):
387 | """Ensure that feature_is_supported correctly identifies a supported feature."""
388 | try:
389 | assert onefs_client.feature_is_supported(supported_feature)
390 | except onefs.UnsupportedOperation:
391 | assert onefs_client.revision() < onefs.ONEFS_RELEASES["8.2.0.0"]
392 |
393 |
394 | def test_feature_unsupported(onefs_client, unsupported_feature):
395 | """Ensure that feature_is_supported correctly identifies an unsupported feature."""
396 | try:
397 | assert not onefs_client.feature_is_supported(unsupported_feature)
398 | except onefs.UnsupportedOperation:
399 | assert onefs_client.revision() < onefs.ONEFS_RELEASES["8.2.0.0"]
400 |
401 |
402 | @pytest.mark.parametrize(
403 | "error, classinfo",
404 | [
405 | (onefs.APIError, onefs.OneFSError),
406 | (onefs.ExpiredLicenseError, onefs.MissingLicenseError),
407 | (onefs.MalformedAPIError, onefs.OneFSError),
408 | (onefs.MissingLicenseError, onefs.OneFSError),
409 | (onefs.MissingZoneError, onefs.OneFSError),
410 | (onefs.MixedModeError, onefs.OneFSError),
411 | (onefs.OneFSCertificateError, onefs.OneFSConnectionError),
412 | (onefs.OneFSConnectionError, onefs.OneFSError),
413 | (onefs.OneFSError, IsilonHadoopToolError),
414 | (onefs.OneFSValueError, ValueError),
415 | (onefs.NonSDKAPIError, onefs.OneFSError),
416 | (onefs.UndecodableAPIError, onefs.MalformedAPIError),
417 | (onefs.UndeterminableVersion, onefs.OneFSError),
418 | (onefs.UnsupportedOperation, onefs.OneFSError),
419 | (onefs.UnsupportedVersion, onefs.OneFSError),
420 | ],
421 | )
422 | def test_errors_onefs(error, classinfo):
423 | """Ensure that exception types remain consistent."""
424 | assert issubclass(error, IsilonHadoopToolError)
425 | assert issubclass(error, onefs.OneFSError)
426 | assert issubclass(error, classinfo)
427 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | minversion = 4.0.0
3 | isolated_build = true
4 | envlist = py{37,38,39,310,311}
5 |
6 | [testenv]
7 | deps =
8 | contextlib2 ~= 21.6.0
9 | mock ~= 5.0.0
10 | pytest ~= 7.2.0
11 | pytest-cov ~= 4.0.0
12 | pytest-randomly ~= 3.12.0
13 | git+https://github.com/tucked/python-kadmin.git@8d1f6fe064310be98734e5b2082defac2531e6b6
14 | commands =
15 | pytest --cov isilon_hadoop_tools --cov-report term-missing {posargs:-r a}
16 |
17 | [gh-actions]
18 | python =
19 | 3.7: py37
20 | 3.8: py38, static, publish
21 | 3.9: py39
22 | 3.10: py310
23 | 3.11: py311
24 |
25 | [testenv:static]
26 | basepython = python3.8
27 | deps =
28 | black ~= 23.1.0
29 | flake8 ~= 6.0.0
30 | pylint ~= 2.16.0
31 | commands =
32 | black --check src setup.py tests
33 | flake8 src setup.py tests
34 | pylint src setup.py
35 |
36 | [flake8]
37 | # https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#flake8
38 | extend-ignore = E203
39 | max-line-length = 100
40 |
41 | [testenv:publish]
42 | basepython = python3.8
43 | passenv = TWINE_*
44 | deps =
45 | build[virtualenv] ~= 0.10.0
46 | twine ~= 4.0.0
47 | commands =
48 | {envpython} -m build --outdir {envtmpdir} .
49 | twine {posargs:check} {envtmpdir}/*
50 |
--------------------------------------------------------------------------------