├── .github └── workflows │ └── on-release-tag.yml ├── .gitignore ├── COPYING-GPLV2 ├── COPYING-LGPLV3 ├── MAINTAINERS ├── MANIFEST.in ├── Makefile ├── README.md ├── debian ├── changelog ├── compat ├── control ├── copyright ├── rules └── source │ ├── format │ └── options ├── docs ├── README.md ├── bitrot.md ├── bricks.md ├── georep.md ├── heal.md ├── local_diskstats.md ├── local_processes.md ├── local_utilization.md ├── nfs_ganesha.md ├── peer.md ├── quota.md ├── rebalance.md ├── snapshot.md ├── utils.md └── volume.md ├── glustercli ├── __init__.py ├── cli │ ├── Makefile.am │ ├── __init__.py │ ├── bitrot.py │ ├── bricks.py │ ├── georep.py │ ├── gluster_version.py │ ├── heal.py │ ├── nfs_ganesha.py │ ├── parsers.py │ ├── peer.py │ ├── quota.py │ ├── rebalance.py │ ├── snapshot.py │ ├── utils.py │ └── volume.py └── metrics │ ├── __init__.py │ ├── cmdlineparser.py │ ├── diskstats.py │ ├── process.py │ ├── utilization.py │ └── utils.py ├── pydocmd.yml ├── pyproject.toml ├── setup.py └── tox.ini /.github/workflows/on-release-tag.yml: -------------------------------------------------------------------------------- 1 | name: "On Release" 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | env: 9 | glustercli_version: $(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 10 | 11 | 12 | jobs: 13 | # Run tests. 14 | # See also https://docs.docker.com/docker-hub/builds/automated-testing/ 15 | push-to-pypi-store: 16 | name: Push to pypi 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v2 20 | - name: Set up Python 21 | uses: actions/setup-python@v1 22 | with: 23 | python-version: '3.x' 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install setuptools setuptools_scm wheel twine 28 | - name: Publish to Pypi 29 | run: | 30 | rm -rf dist; python3 setup.py sdist bdist_wheel; 31 | TWINE_PASSWORD=${{ secrets.TWINE_PASSWORD }} twine upload --username aravindavk dist/* 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[co] 2 | .tox 3 | *.egg 4 | *.egg-info 5 | .coverage 6 | cover 7 | pycscope.* 8 | build 9 | dist 10 | docs/_build 11 | glustercli/_version.py 12 | .DS_Store 13 | _build -------------------------------------------------------------------------------- /COPYING-GPLV2: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /COPYING-LGPLV3: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | 2 | The intention of this file is not to establish who owns what portions of the 3 | code base, but to provide a set of names that developers can consult when they 4 | have a question about a particular subset and also to provide a set of names 5 | to be CC'd when submitting a patch to obtain appropriate review. 6 | 7 | In general, if you have a question about inclusion of a patch, you should 8 | consult gluster-devel@gluster.org and not any specific individual privately. 9 | 10 | 11 | glustercli-python Maintainers 12 | ============================= 13 | Aravinda Vishwanathapura 14 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md COPYING-GPLV2 COPYING-LGPLV3 MAINTAINERS 2 | include .functests .unittests test-requirements.txt tox.ini test/test.conf 3 | recursive-include glustercli *.py 4 | recursive-include test *.py 5 | graft doc 6 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | docgen: 2 | pydocmd simple glustercli.cli.volume++ > docs/volume.md 3 | pydocmd simple glustercli.cli.bitrot++ > docs/bitrot.md 4 | pydocmd simple glustercli.cli.bricks++ > docs/bricks.md 5 | pydocmd simple glustercli.cli.georep++ > docs/georep.md 6 | pydocmd simple glustercli.cli.peer++ > docs/peer.md 7 | pydocmd simple glustercli.cli.quota++ > docs/quota.md 8 | pydocmd simple glustercli.cli.snapshot++ > docs/snapshot.md 9 | pydocmd simple glustercli.cli.heal++ > docs/heal.md 10 | pydocmd simple glustercli.cli.nfs_ganesha++ > docs/nfs_ganesha.md 11 | pydocmd simple glustercli.cli.rebalance++ > docs/rebalance.md 12 | pydocmd simple glustercli.cli.set_gluster_path > docs/utils.md 13 | pydocmd simple glustercli.cli.set_gluster_socket >> docs/utils.md 14 | pydocmd simple glustercli.cli.set_ssh_host >> docs/utils.md 15 | pydocmd simple glustercli.cli.set_ssh_pem_file >> docs/utils.md 16 | pydocmd simple glustercli.cli.ssh_connection >> docs/utils.md 17 | pydocmd simple glustercli.cli.GlusterCmdException >> docs/utils.md 18 | 19 | pydocmd simple glustercli.metrics.local_processes++ > docs/local_processes.md 20 | pydocmd simple glustercli.metrics.local_utilization++ > docs/local_utilization.md 21 | pydocmd simple glustercli.metrics.local_diskstats++ > docs/local_diskstats.md 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # glustercli-python 2 | 3 | Python bindings for [GlusterFS](http://www.gluster.org) CLI and Metrics. 4 | 5 | ## Hello World 6 | 7 | ```python 8 | from glustercli.cli import volume 9 | 10 | # Create a Volume 11 | volume.create("gv1", ["fvm1:/bricks/b1", "fvm2:/bricks/b2"], 12 | force=True) 13 | 14 | # Start Volume 15 | volume.start("gv1") 16 | 17 | # Read Volume Info 18 | volume.info("gv1") 19 | 20 | # Get GlusterFS version 21 | from glustercli.cli import glusterfs_version 22 | print(glusterfs_version()) 23 | ``` 24 | 25 | ## Install 26 | 27 | ``` 28 | sudo pip3 install glustercli 29 | ``` 30 | 31 | ## Documentation 32 | 33 | See [Documentation](docs/README.md) 34 | 35 | Install `pydoc-markdown` package to generate Documentation. 36 | 37 | ``` 38 | sudo pip3 install pydoc-markdown 39 | ``` 40 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | glustercli (0.8) unstable; urgency=medium 2 | 3 | [ Caleb St. John ] 4 | * initial debian package 5 | 6 | -- Caleb Wed, 05 May 2021 15:10:00 -0400 7 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 12 2 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: glustercli 2 | Maintainer: Caleb 3 | Section: python 4 | Priority: optional 5 | Build-Depends: dh-python, python3-setuptools, python3-all, debhelper (>= 12) 6 | Standards-Version: 4.3.0 7 | 8 | 9 | 10 | 11 | 12 | Package: python3-glustercli 13 | Architecture: all 14 | Depends: ${misc:Depends}, ${python3:Depends} 15 | Description: Python bindings for GlusterFS CLI and Metrics collection 16 | glustercli-python 17 | ------------------ 18 | . 19 | This is the official python bindings for the 20 | `GlusterFS `_ CLI. 21 | . 22 | Complete API reference and documentation can be found at 23 | `ReadTheDocs `_. 24 | . 25 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: glustercli-python 3 | Upstream-Contact: Aravinda Vishwanathapura 4 | Source: https://github.com/gluster/glustercli-python 5 | 6 | Files: * 7 | Copyright: 2016, Red Hat Inc 8 | License: GPL-2+ and LGPL-3+ 9 | 10 | License: GPL-2+ 11 | This program is free software; you can redistribute it 12 | and/or modify it under the terms of the GNU General Public 13 | License as published by the Free Software Foundation; either 14 | version 2 of the License, or (at your option) any later 15 | version. 16 | . 17 | This program is distributed in the hope that it will be 18 | useful, but WITHOUT ANY WARRANTY; without even the implied 19 | warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 20 | PURPOSE. See the GNU General Public License for more 21 | details. 22 | . 23 | You should have received a copy of the GNU General Public 24 | License along with this package; if not, write to the Free 25 | Software Foundation, Inc., 51 Franklin St, Fifth Floor, 26 | Boston, MA 02110-1301 USA 27 | . 28 | On Debian systems, the full text of the GNU General Public 29 | License version 2 can be found in the file 30 | `/usr/share/common-licenses/GPL-2`. 31 | 32 | License: LGPL-3+ 33 | This program is free software; you can redistribute it 34 | and/or modify it under the terms of the GNU Lesser General Public 35 | License as published by the Free Software Foundation; either 36 | version 3 of the License, or (at your option) any later 37 | version. 38 | . 39 | This program is distributed in the hope that it will be 40 | useful, but WITHOUT ANY WARRANTY; without even the implied 41 | warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 42 | PURPOSE. See the GNU Lesser General Public License for more 43 | details. 44 | . 45 | You should have received a copy of the GNU Lesser General Public 46 | License along with this package; if not, write to the Free 47 | Software Foundation, Inc., 51 Franklin St, Fifth Floor, 48 | Boston, MA 02110-1301 USA 49 | . 50 | On Debian systems, the full text of the GNU Lesser General Public 51 | License version 2 can be found in the file 52 | `/usr/share/common-licenses/LGPL-3`. 53 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | 3 | %: 4 | dh $@ --with python3 --buildsystem=pybuild 5 | 6 | 7 | override_dh_auto_clean: 8 | python3 setup.py clean -a 9 | find . -name \*.pyc -exec rm {} \; 10 | 11 | 12 | 13 | override_dh_auto_build: 14 | python3 setup.py build --force 15 | 16 | 17 | 18 | override_dh_auto_install: 19 | python3 setup.py install --force --root=debian/python3-glustercli --no-compile -O0 --install-layout=deb 20 | 21 | 22 | 23 | override_dh_python2: 24 | dh_python2 --no-guessing-versions 25 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (quilt) 2 | -------------------------------------------------------------------------------- /debian/source/options: -------------------------------------------------------------------------------- 1 | extend-diff-ignore="\.egg-info$" -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Python bindings for Gluster Commands and Metrics 2 | 3 | ## CLI bindings 4 | 5 | * [Volume Commands](./volume.md) 6 | * [Bitrot Commands](./bitrot.md) 7 | * [Brick Commands](./bricks.md) 8 | * [Geo-replication Commands](./georep.md) 9 | * [Peer Commands](./peer.md) 10 | * [Quota Commands](./quota.md) 11 | * [Snapshot Commands](./snapshot.md) 12 | * [Heal Commands](./heal.md) 13 | * [NFS Ganesha Commands](./nfs_ganesha.md) 14 | * [Rebalance Commands](./rebalance.md) 15 | * [Utilities](./utils.md) 16 | 17 | ## Metrics 18 | 19 | * [Local Processes](./local_processes.md) 20 | * [Local Utilization](./local_utilization.md) 21 | * [Local DiskStats](./local_diskstats.md) 22 | -------------------------------------------------------------------------------- /docs/bitrot.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.bitrot 3 | 4 | 5 | ## enable 6 | ```python 7 | enable(volname) 8 | ``` 9 | 10 | Enable Bitrot Feature 11 | 12 | :param volname: Volume Name 13 | :returns: Output of Enable command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | 16 | 17 | ## disable 18 | ```python 19 | disable(volname) 20 | ``` 21 | 22 | Disable Bitrot Feature 23 | 24 | :param volname: Volume Name 25 | :returns: Output of Disable command, raises 26 | GlusterCmdException((rc, out, err)) on error 27 | 28 | 29 | ## scrub_throttle 30 | ```python 31 | scrub_throttle(volname, throttle_type) 32 | ``` 33 | 34 | Configure Scrub Throttle 35 | 36 | :param volname: Volume Name 37 | :param throttle_type: lazy|normal|aggressive 38 | :returns: Output of the command, raises 39 | GlusterCmdException((rc, out, err)) on error 40 | 41 | 42 | ## scrub_frequency 43 | ```python 44 | scrub_frequency(volname, freq) 45 | ``` 46 | 47 | Configure Scrub Frequency 48 | 49 | :param volname: Volume Name 50 | :param freq: hourly|daily|weekly|biweekly|monthly 51 | :returns: Output of the command, raises 52 | GlusterCmdException((rc, out, err)) on error 53 | 54 | 55 | ## scrub_pause 56 | ```python 57 | scrub_pause(volname) 58 | ``` 59 | 60 | Pause Bitrot Scrub 61 | 62 | :param volname: Volume Name 63 | :returns: Output of Pause command, raises 64 | GlusterCmdException((rc, out, err)) on error 65 | 66 | 67 | ## scrub_resume 68 | ```python 69 | scrub_resume(volname) 70 | ``` 71 | 72 | Resume Bitrot Scrub 73 | 74 | :param volname: Volume Name 75 | :returns: Output of the Resume command, raises 76 | GlusterCmdException((rc, out, err)) on error 77 | 78 | 79 | ## scrub_status 80 | ```python 81 | scrub_status(volname) 82 | ``` 83 | 84 | Scrub Status 85 | 86 | :param volname: Volume Name 87 | :returns: Scrub Status, raises 88 | GlusterCmdException((rc, out, err)) on error 89 | 90 | -------------------------------------------------------------------------------- /docs/bricks.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.bricks 3 | 4 | 5 | ## add 6 | ```python 7 | add(volname, 8 | bricks, 9 | stripe=None, 10 | replica=None, 11 | arbiter=None, 12 | force=False) 13 | ``` 14 | 15 | Add Bricks 16 | 17 | :param volname: Volume Name 18 | :param bricks: List of Bricks 19 | :param stripe: Stripe Count 20 | :param replica: Replica Count 21 | :param arbiter: Arbiter Count 22 | :param force: True|False Force Add Bricks 23 | :returns: Output of add-brick command, raises 24 | GlusterCmdException((rc, out, err)) on error 25 | 26 | 27 | ## remove_start 28 | ```python 29 | remove_start(volname, bricks, replica=None, force=False) 30 | ``` 31 | 32 | Remove Bricks start 33 | 34 | :param volname: Volume Name 35 | :param bricks: List of Bricks 36 | :param replica: Replica Count 37 | :param force: True|False Force Remove Bricks 38 | :returns: Output of remove-brick start command, raises 39 | GlusterCmdException((rc, out, err)) on error 40 | 41 | 42 | ## remove_stop 43 | ```python 44 | remove_stop(volname, bricks, replica=None, force=False) 45 | ``` 46 | 47 | Remove Bricks stop 48 | 49 | :param volname: Volume Name 50 | :param bricks: List of Bricks 51 | :param replica: Replica Count 52 | :param force: True|False Force Remove Bricks 53 | :returns: Output of remove-brick stop command, raises 54 | GlusterCmdException((rc, out, err)) on error 55 | 56 | 57 | ## remove_commit 58 | ```python 59 | remove_commit(volname, bricks, replica=None, force=False) 60 | ``` 61 | 62 | Remove Bricks Commit 63 | 64 | :param volname: Volume Name 65 | :param bricks: List of Bricks 66 | :param replica: Replica Count 67 | :param force: True|False Force Remove Bricks 68 | :returns: Output of remove-brick commit command, raises 69 | GlusterCmdException((rc, out, err)) on error 70 | 71 | 72 | ## remove_status 73 | ```python 74 | remove_status(volname, bricks, replica=None, force=False) 75 | ``` 76 | 77 | Remove Bricks status 78 | 79 | :param volname: Volume Name 80 | :param bricks: List of Bricks 81 | :param replica: Replica Count 82 | :param force: True|False Force Remove Bricks 83 | :returns: Remove Bricks Status, raises 84 | GlusterCmdException((rc, out, err)) on error 85 | 86 | 87 | ## replace_commit 88 | ```python 89 | replace_commit(volname, source_brick, new_brick, force=False) 90 | ``` 91 | 92 | Replace Bricks 93 | 94 | :param volname: Volume Name 95 | :param source_brick: Source Brick 96 | :param new_brick: New Replacement Brick 97 | :param force: True|False Force Replace Bricks 98 | :returns: Output of replace-brick command, raises 99 | GlusterCmdException((rc, out, err)) on error 100 | 101 | -------------------------------------------------------------------------------- /docs/georep.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.georep 3 | 4 | 5 | ## gsec_create 6 | ```python 7 | gsec_create(ssh_key_prefix=True) 8 | ``` 9 | 10 | Generate Geo-replication SSH Keys 11 | 12 | :param ssh_key_prefix: True|False Command prefix in generated public keys 13 | :returns: Output of gsec_create command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | 16 | 17 | ## create 18 | ```python 19 | create(primary_volume, secondary_host, secondary_volume, secondary_user='root', 20 | push_pem=True, 21 | no_verify=False, 22 | force=False, 23 | ssh_port=22) 24 | ``` 25 | 26 | Create Geo-replication Session 27 | 28 | :param primary_volume: Primary Volume Name 29 | :param secondary_host: Secondary Hostname or IP 30 | :param secondary_volume: Secondary Volume 31 | :param secondary_user: Secondary User, default is "root" 32 | :param push_pem: True|False Push SSH keys to Secondary 33 | :param no_verify: True|False Skip the Secondary Verification 34 | process before create 35 | :param force: True|False Force Create Session 36 | :param ssh_port: SSH Port, Default is 22 37 | :returns: Output of Create command, raises 38 | GlusterCmdException((rc, out, err)) on error 39 | 40 | 41 | ## start 42 | ```python 43 | start(primary_volume, secondary_host, secondary_volume, secondary_user='root', force=False) 44 | ``` 45 | 46 | Start Geo-replication Session 47 | 48 | :param primary_volume: Primary Volume Name 49 | :param secondary_host: Secondary Hostname or IP 50 | :param secondary_volume: Secondary Volume 51 | :param secondary_user: Secondary User, default is "root" 52 | :param force: True|False Force Start the Session 53 | :returns: Output of Start command, raises 54 | GlusterCmdException((rc, out, err)) on error 55 | 56 | 57 | ## stop 58 | ```python 59 | stop(primary_volume, secondary_host, secondary_volume, secondary_user='root', force=False) 60 | ``` 61 | 62 | Stop Geo-replication Session 63 | 64 | :param primary_volume: Primary Volume Name 65 | :param secondary_host: Secondary Hostname or IP 66 | :param secondary_volume: Secondary Volume 67 | :param secondary_user: Secondary User, default is "root" 68 | :param force: True|False Force Stop the Session 69 | :returns: Output of Stop command, raises 70 | GlusterCmdException((rc, out, err)) on error 71 | 72 | 73 | ## restart 74 | ```python 75 | restart(primary_volume, secondary_host, secondary_volume, secondary_user='root', force=False) 76 | ``` 77 | 78 | Restart Geo-replication Session 79 | 80 | :param primary_volume: Primary Volume Name 81 | :param secondary_host: Secondary Hostname or IP 82 | :param secondary_volume: Secondary Volume 83 | :param secondary_user: Secondary User, default is "root" 84 | :param force: True|False Force Start the Session 85 | :returns: Output of Start command, raises 86 | GlusterCmdException((rc, out, err)) on error 87 | 88 | 89 | ## delete 90 | ```python 91 | delete(primary_volume, secondary_host, secondary_volume, secondary_user='root', 92 | reset_sync_time=None) 93 | ``` 94 | 95 | Delete Geo-replication Session 96 | 97 | :param primary_volume: Primary Volume Name 98 | :param secondary_host: Secondary Hostname or IP 99 | :param secondary_volume: Secondary Volume 100 | :param secondary_user: Secondary User, default is "root" 101 | :param reset_sync_time: True|False Reset Sync time on delete 102 | :returns: Output of Start command, raises 103 | GlusterCmdException((rc, out, err)) on error 104 | 105 | 106 | ## pause 107 | ```python 108 | pause(primary_volume, secondary_host, secondary_volume, secondary_user='root', force=False) 109 | ``` 110 | 111 | Pause Geo-replication Session 112 | 113 | :param primary_volume: Primary Volume Name 114 | :param secondary_host: Secondary Hostname or IP 115 | :param secondary_volume: Secondary Volume 116 | :param secondary_user: Secondary User, default is "root" 117 | :param force: True|False Force Pause Session 118 | :returns: Output of Pause command, raises 119 | GlusterCmdException((rc, out, err)) on error 120 | 121 | 122 | ## resume 123 | ```python 124 | resume(primary_volume, secondary_host, secondary_volume, secondary_user='root', force=False) 125 | ``` 126 | 127 | Resume Geo-replication Session 128 | 129 | :param primary_volume: Primary Volume Name 130 | :param secondary_host: Secondary Hostname or IP 131 | :param secondary_volume: Secondary Volume 132 | :param secondary_user: Secondary User, default is "root" 133 | :param force: True|False Force Resume Session 134 | :returns: Output of Resume command, raises 135 | GlusterCmdException((rc, out, err)) on error 136 | 137 | 138 | ## config_set 139 | ```python 140 | config_set(primary_volume, secondary_host, secondary_volume, 141 | key, 142 | value, 143 | secondary_user='root') 144 | ``` 145 | 146 | Set Config of a Geo-replication Session 147 | 148 | :param primary_volume: Primary Volume Name 149 | :param secondary_host: Secondary Hostname or IP 150 | :param secondary_volume: Secondary Volume 151 | :param secondary_user: Secondary User, default is "root" 152 | :param key: Config Key 153 | :param value: Config Value 154 | :returns: Output of Config set command, raises 155 | GlusterCmdException((rc, out, err)) on error 156 | 157 | 158 | ## config_reset 159 | ```python 160 | config_reset(primary_volume, secondary_host, secondary_volume, key, secondary_user='root') 161 | ``` 162 | 163 | Reset configuration of Geo-replication Session 164 | 165 | :param primary_volume: Primary Volume Name 166 | :param secondary_host: Secondary Hostname or IP 167 | :param secondary_volume: Secondary Volume 168 | :param secondary_user: Secondary User, default is "root" 169 | :param key: Config Key 170 | :returns: Output of Config reset command, raises 171 | GlusterCmdException((rc, out, err)) on error 172 | 173 | 174 | ## config_get 175 | ```python 176 | config_get(primary_volume, secondary_host, secondary_volume, key=None, secondary_user='root') 177 | ``` 178 | 179 | Get Configuration of Geo-replication Session 180 | 181 | :param primary_volume: Primary Volume Name 182 | :param secondary_host: Secondary Hostname or IP 183 | :param secondary_volume: Secondary Volume 184 | :param secondary_user: Secondary User, default is "root" 185 | :param key: Config Key 186 | :returns: Geo-rep session Config Values, raises 187 | GlusterCmdException((rc, out, err)) on error 188 | 189 | 190 | ## status 191 | ```python 192 | status(primary_volume=None, secondary_host=None, secondary_volume=None, secondary_user='root') 193 | ``` 194 | 195 | Status of Geo-replication Session 196 | 197 | :param primary_volume: Primary Volume Name 198 | :param secondary_host: Secondary Hostname or IP 199 | :param secondary_volume: Secondary Volume 200 | :param secondary_user: Secondary User, default is "root" 201 | :returns: Geo-replication Status, raises 202 | GlusterCmdException((rc, out, err)) on error 203 | 204 | -------------------------------------------------------------------------------- /docs/heal.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.heal 3 | 4 | 5 | ## enable 6 | ```python 7 | enable(volname) 8 | ``` 9 | 10 | Enable Volume Heal 11 | 12 | :param volname: Volume Name 13 | :returns: Output of Enable command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | 16 | 17 | ## disable 18 | ```python 19 | disable(volname) 20 | ``` 21 | 22 | Disable Volume Heal 23 | 24 | :param volname: Volume Name 25 | :returns: Output of Disable command, raises 26 | GlusterCmdException((rc, out, err)) on error 27 | 28 | 29 | ## full 30 | ```python 31 | full(volname) 32 | ``` 33 | 34 | Full Volume Heal 35 | 36 | :param volname: Volume Name 37 | :returns: Output of Full Heal command, raises 38 | GlusterCmdException((rc, out, err)) on error 39 | 40 | 41 | ## statistics 42 | ```python 43 | statistics(volname) 44 | ``` 45 | 46 | Get Statistics of Heal 47 | 48 | :param volname: Volume Name 49 | :returns: Output of Statistics command, raises 50 | GlusterCmdException((rc, out, err)) on error 51 | 52 | 53 | ## info 54 | ```python 55 | info(volname, info_type=None) 56 | ``` 57 | 58 | Get Volume Heal Info 59 | 60 | :param volname: Volume Name 61 | :returns: Output of Heal Info command, raises 62 | GlusterCmdException((rc, out, err)) on error 63 | 64 | 65 | ## split_brain 66 | ```python 67 | split_brain(volname, 68 | bigger_file=None, 69 | latest_mtime=None, 70 | source_brick=None, 71 | path=None) 72 | ``` 73 | 74 | Split Brain Resolution 75 | 76 | :param volname: Volume Name 77 | :param bigger_file: File Path of Bigger file 78 | :param latest_mtime: File Path of Latest mtime 79 | :param source_brick: Source Brick for Good Copy 80 | :param path: Resolution of this path/file 81 | :returns: Output of Split-brain command, raises 82 | GlusterCmdException((rc, out, err)) on error 83 | 84 | -------------------------------------------------------------------------------- /docs/local_diskstats.md: -------------------------------------------------------------------------------- 1 | 2 | # local_diskstats 3 | ```python 4 | local_diskstats(volname=None) 5 | ``` 6 | 7 | Collect Diskstats info of local bricks 8 | 9 | :param volname: Volume Name 10 | :returns: List of diskstats information 11 | { 12 | "volume": VOLUME_NAME, 13 | "brick_index": BRICK_INDEX_IN_VOL_INFO, 14 | "node_id": NODE_ID, 15 | "brick": BRICK_NAME, 16 | "fs": BRICK_FILESYSTEM, 17 | "device": BRICK_DEVICE, 18 | "major_number": MAJOR_NUMBER, 19 | "minor_number": MINOR_NUMBER, 20 | "reads_completed": READS_COMPLETED, 21 | "reads_merged": READS_MERGED, 22 | "sectors_read": SECTORS_READ, 23 | "time_spent_reading": TIME_SPENT_READING, 24 | "writes_completed": WRITES_COMPLETED, 25 | "writes_merged": WRITES_MERGED, 26 | "sectors_written": SECTORS_WRITTEN, 27 | "time_spent_writing": TIME_SPENT_WRITING, 28 | "ios_currently_in_progress": IOS_CURRENTLY_IN_PROGRESS, 29 | "time_spent_doing_ios": TIME_SPENT_DOING_IOS, 30 | "weighted_time_spent_doing_ios": WEIGHTED_TIME_SPENT_DOING_IOS 31 | } 32 | 33 | -------------------------------------------------------------------------------- /docs/local_processes.md: -------------------------------------------------------------------------------- 1 | 2 | # local_processes 3 | ```python 4 | local_processes() 5 | ``` 6 | 7 | -------------------------------------------------------------------------------- /docs/local_utilization.md: -------------------------------------------------------------------------------- 1 | 2 | # local_utilization 3 | ```python 4 | local_utilization(volname=None) 5 | ``` 6 | 7 | Collect Utilization details of local bricks 8 | 9 | :param volname: Volume Name 10 | :returns: List of utilization information 11 | { 12 | "volume": VOLUME_NAME, 13 | "brick_index": BRICK_INDEX_IN_VOL_INFO, 14 | "node_id": NODE_ID, 15 | "brick": BRICK_NAME, 16 | "block_size": ST_F_FRSIZE, 17 | "blocks_total": ST_F_BLOCKS, 18 | "blocks_free": ST_F_BFREE, 19 | "blocks_avail": ST_F_BAVAIL, 20 | "inodes_total": ST_F_FILES, 21 | "inodes_free": ST_F_FFREE, 22 | "inodes_avail": ST_F_FAVAIL 23 | } 24 | 25 | -------------------------------------------------------------------------------- /docs/nfs_ganesha.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.nfs_ganesha 3 | 4 | 5 | ## enable 6 | ```python 7 | enable() 8 | ``` 9 | 10 | Enable NFS Ganesha 11 | 12 | :returns: Output of Enable command, raises 13 | GlusterCmdException((rc, out, err)) on error 14 | 15 | 16 | ## disable 17 | ```python 18 | disable() 19 | ``` 20 | 21 | Disable NFS Ganesha 22 | 23 | :returns: Output of Disable command, raises 24 | GlusterCmdException((rc, out, err)) on error 25 | 26 | -------------------------------------------------------------------------------- /docs/peer.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.peer 3 | 4 | 5 | ## probe 6 | ```python 7 | probe(host) 8 | ``` 9 | 10 | Add Host to Cluster 11 | 12 | :param host: Hostname or IP 13 | :returns: Output of peer probe command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | 16 | 17 | ## attach 18 | ```python 19 | attach(host) 20 | ``` 21 | 22 | Add Host to Cluster, alias for probe 23 | 24 | :param host: Hostname or IP 25 | :returns: Output of peer probe command, raises 26 | GlusterCmdException((rc, out, err)) on error 27 | 28 | 29 | ## detach 30 | ```python 31 | detach(host) 32 | ``` 33 | 34 | Remove Host from Cluster 35 | 36 | :param host: Hostname or IP 37 | :returns: Output of peer detach command, raises 38 | GlusterCmdException((rc, out, err)) on error 39 | 40 | 41 | ## detach_all 42 | ```python 43 | detach_all() 44 | ``` 45 | 46 | Removes All Hosts from Cluster 47 | 48 | :returns: Output of peer detach command, raises 49 | GlusterCmdException((rc, out, err)) on error 50 | 51 | 52 | ## status 53 | ```python 54 | status() 55 | ``` 56 | 57 | Peer Status of Cluster 58 | 59 | :returns: Output of peer status command, raises 60 | GlusterCmdException((rc, out, err)) on error 61 | 62 | 63 | ## pool 64 | ```python 65 | pool() 66 | ``` 67 | 68 | Cluster Pool Status 69 | 70 | :returns: Pool list and status, raises 71 | GlusterCmdException((rc, out, err)) on error 72 | 73 | -------------------------------------------------------------------------------- /docs/quota.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.quota 3 | 4 | 5 | ## inode_quota_enable 6 | ```python 7 | inode_quota_enable(volname) 8 | ``` 9 | 10 | Enable Inode Quota 11 | 12 | :param volname: Volume Name 13 | :returns: Output of inode-quota Enable command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | 16 | 17 | ## enable 18 | ```python 19 | enable(volname) 20 | ``` 21 | 22 | Enable Quota 23 | 24 | :param volname: Volume Name 25 | :returns: Output of quota Enable command, raises 26 | GlusterCmdException((rc, out, err)) on error 27 | 28 | 29 | ## disable 30 | ```python 31 | disable(volname) 32 | ``` 33 | 34 | Disable Inode Quota 35 | 36 | :param volname: Volume Name 37 | :returns: Output of quota Disable command, raises 38 | GlusterCmdException((rc, out, err)) on error 39 | 40 | 41 | ## list_paths 42 | ```python 43 | list_paths(volname, paths=[]) 44 | ``` 45 | 46 | Get Quota List 47 | 48 | :param volname: Volume Name 49 | :param paths: Optional list of paths 50 | :returns: Quota list of paths, raises 51 | GlusterCmdException((rc, out, err)) on error 52 | 53 | 54 | ## list_objects 55 | ```python 56 | list_objects(volname, paths=[]) 57 | ``` 58 | 59 | Get Quota Objects List 60 | 61 | :param volname: Volume Name 62 | :param paths: Optional list of paths 63 | :returns: Quota list of objects, raises 64 | GlusterCmdException((rc, out, err)) on error 65 | 66 | 67 | ## remove_path 68 | ```python 69 | remove_path(volname, path) 70 | ``` 71 | 72 | Remove Path from Quota list 73 | 74 | :param volname: Volume Name 75 | :param path: Path to remove from quota 76 | :returns: Output of Quota remove-path, raises 77 | GlusterCmdException((rc, out, err)) on error 78 | 79 | 80 | ## remove_objects 81 | ```python 82 | remove_objects(volname, path) 83 | ``` 84 | 85 | Remove Objects for a given path 86 | 87 | :param volname: Volume Name 88 | :param path: Path to remove from quota 89 | :returns: Output of Quota remove-objects, raises 90 | GlusterCmdException((rc, out, err)) on error 91 | 92 | 93 | ## default_soft_limit 94 | ```python 95 | default_soft_limit(volname, percent) 96 | ``` 97 | 98 | Set default soft limit 99 | 100 | :param volname: Volume Name 101 | :param percent: Percent of soft limit 102 | :returns: Output of the command, raises 103 | GlusterCmdException((rc, out, err)) on error 104 | 105 | 106 | ## limit_usage 107 | ```python 108 | limit_usage(volname, path, size, percent=None) 109 | ``` 110 | 111 | Limit quota usage 112 | 113 | :param volname: Volume Name 114 | :param path: Path to limit quota 115 | :param size: Limit Size 116 | :param percent: Percentage 117 | :returns: Output of the command, raises 118 | GlusterCmdException((rc, out, err)) on error 119 | 120 | 121 | ## limit_objects 122 | ```python 123 | limit_objects(volname, path, num, percent=None) 124 | ``` 125 | 126 | Limit objects 127 | 128 | :param volname: Volume Name 129 | :param path: Path to limit quota 130 | :param num: Limit Number 131 | :param percent: Percentage 132 | :returns: Output of the command, raises 133 | GlusterCmdException((rc, out, err)) on error 134 | 135 | 136 | ## alert_time 137 | ```python 138 | alert_time(volname, a_time) 139 | ``` 140 | 141 | Set Alert Time 142 | 143 | :param volname: Volume Name 144 | :param alert_time: Alert Time Value 145 | :returns: Output of the command, raises 146 | GlusterCmdException((rc, out, err)) on error 147 | 148 | 149 | ## soft_timeout 150 | ```python 151 | soft_timeout(volname, timeout) 152 | ``` 153 | 154 | Set Soft Timeout 155 | 156 | :param volname: Volume Name 157 | :param timeout: Timeout Value 158 | :returns: Output of the command, raises 159 | GlusterCmdException((rc, out, err)) on error 160 | 161 | 162 | ## hard_timeout 163 | ```python 164 | hard_timeout(volname, timeout) 165 | ``` 166 | 167 | Set Hard Timeout 168 | 169 | :param volname: Volume Name 170 | :param timeout: Timeout Value 171 | :returns: Output of the command, raises 172 | GlusterCmdException((rc, out, err)) on error 173 | 174 | -------------------------------------------------------------------------------- /docs/rebalance.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.rebalance 3 | 4 | 5 | ## fix_layout_start 6 | ```python 7 | fix_layout_start(volname) 8 | ``` 9 | 10 | Fix Layout Rebalance Start 11 | 12 | :param volname: Volume Name 13 | :returns: Output of the command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | 16 | 17 | ## start 18 | ```python 19 | start(volname, force=False) 20 | ``` 21 | 22 | Rebalance Start 23 | 24 | :param volname: Volume Name 25 | :param force: True|False Force start the rebalance 26 | :returns: Output of the command, raises 27 | GlusterCmdException((rc, out, err)) on error 28 | 29 | 30 | ## stop 31 | ```python 32 | stop(volname) 33 | ``` 34 | 35 | Rebalance Stop 36 | 37 | :param volname: Volume Name 38 | :returns: Output of the command, raises 39 | GlusterCmdException((rc, out, err)) on error 40 | 41 | 42 | ## status 43 | ```python 44 | status(volname) 45 | ``` 46 | 47 | Rebalance Status 48 | 49 | :param volname: Volume Name 50 | :returns: Rebalance Status, raises 51 | GlusterCmdException((rc, out, err)) on error 52 | 53 | -------------------------------------------------------------------------------- /docs/snapshot.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.snapshot 3 | 4 | 5 | ## activate 6 | ```python 7 | activate(snapname, force=False) 8 | ``` 9 | 10 | Activate Snapshot 11 | 12 | :param snapname: Snapshot Name 13 | :param force: True|False Force Activate the snapshot 14 | :returns: Output of the command, raises 15 | GlusterCmdException((rc, out, err)) on error 16 | 17 | 18 | ## clone 19 | ```python 20 | clone(clonename, snapname) 21 | ``` 22 | 23 | Clone the Snapshot 24 | 25 | :param clonename: Snapshot Clone Name 26 | :param snapname: Snapshot Name 27 | :returns: Output of the command, raises 28 | GlusterCmdException((rc, out, err)) on error 29 | 30 | 31 | ## create 32 | ```python 33 | create(volname, 34 | snapname, 35 | no_timestamp=False, 36 | description='', 37 | force=False) 38 | ``` 39 | 40 | Create Snapshot 41 | 42 | :param volname: Volume Name 43 | :param snapname: Snapshot Name 44 | :param no_timestamp: True|False Do not add Timestamp to name 45 | :param description: Description for Created Snapshot 46 | :param force: True|False Force Create the snapshot 47 | :returns: Output of the command, raises 48 | GlusterCmdException((rc, out, err)) on error 49 | 50 | 51 | ## deactivate 52 | ```python 53 | deactivate(snapname) 54 | ``` 55 | 56 | Deactivate the Snapshot 57 | 58 | :param snapname: Snapshot Name 59 | :returns: Output of the command, raises 60 | GlusterCmdException((rc, out, err)) on error 61 | 62 | 63 | ## delete 64 | ```python 65 | delete(snapname=None, volname=None) 66 | ``` 67 | 68 | Delete Snapshot 69 | 70 | :param snapname: Snapshot Name 71 | :param volname: Volume Name 72 | :returns: Output of the command, raises 73 | GlusterCmdException((rc, out, err)) on error 74 | 75 | 76 | ## info 77 | ```python 78 | info(snapname=None, volname=None) 79 | ``` 80 | 81 | Snapshot Info 82 | 83 | :param snapname: Snapshot Name 84 | :param volname: Volume Name 85 | :returns: Snapshot Info, raises 86 | GlusterCmdException((rc, out, err)) on error 87 | 88 | 89 | ## snaplist 90 | ```python 91 | snaplist(volname=None) 92 | ``` 93 | 94 | List of Snapshots 95 | 96 | :param volname: Volume Name 97 | :returns: Output of the command, raises 98 | GlusterCmdException((rc, out, err)) on error 99 | 100 | 101 | ## restore 102 | ```python 103 | restore(snapname) 104 | ``` 105 | 106 | Restore Snapshot 107 | 108 | :param snapname: Snapshot Name 109 | :returns: Output of the command, raises 110 | GlusterCmdException((rc, out, err)) on error 111 | 112 | 113 | ## status 114 | ```python 115 | status(snapname=None, volname=None) 116 | ``` 117 | 118 | Snapshot Status 119 | 120 | :param snapname: Snapshot Name 121 | :param volname: Volume Name 122 | :returns: Output of the command, raises 123 | GlusterCmdException((rc, out, err)) on error 124 | 125 | 126 | ## config 127 | ```python 128 | config(volname, 129 | snap_max_hard_limit=None, 130 | snap_max_soft_limit=None, 131 | auto_delete=None, 132 | activate_on_create=None) 133 | ``` 134 | 135 | Set Snapshot Config 136 | 137 | :param volname: Volume Name 138 | :param snap_max_hard_limit: Number of Snapshots hard limit 139 | :param snap_max_soft_limit: Number of Snapshots soft limit 140 | :param auto_delete: True|False Auto delete old snapshots 141 | :param activate_on_create: True|False Activate Snapshot after Create 142 | :returns: Output of the command, raises 143 | GlusterCmdException((rc, out, err)) on error 144 | 145 | -------------------------------------------------------------------------------- /docs/utils.md: -------------------------------------------------------------------------------- 1 | 2 | # set_gluster_path 3 | ```python 4 | set_gluster_path(path) 5 | ``` 6 | 7 | 8 | # set_gluster_socket 9 | ```python 10 | set_gluster_socket(path) 11 | ``` 12 | 13 | 14 | # set_ssh_host 15 | ```python 16 | set_ssh_host(hostname) 17 | ``` 18 | 19 | 20 | # set_ssh_pem_file 21 | ```python 22 | set_ssh_pem_file(pem_file) 23 | ``` 24 | 25 | 26 | # ssh_connection 27 | ```python 28 | ssh_connection(*args, **kwds) 29 | ``` 30 | 31 | 32 | # GlusterCmdException 33 | ```python 34 | GlusterCmdException() 35 | ``` 36 | 37 | -------------------------------------------------------------------------------- /docs/volume.md: -------------------------------------------------------------------------------- 1 | 2 | # glustercli.cli.volume 3 | 4 | 5 | ## start 6 | ```python 7 | start(volname, force=False) 8 | ``` 9 | 10 | Start Gluster Volume 11 | 12 | :param volname: Volume Name 13 | :param force: (True|False) Start Volume with Force option 14 | :returns: Output of Start command, raises 15 | GlusterCmdException((rc, out, err)) on error 16 | 17 | 18 | ## stop 19 | ```python 20 | stop(volname, force=False) 21 | ``` 22 | 23 | Stop Gluster Volume 24 | 25 | :param volname: Volume Name 26 | :param force: (True|False) Stop Volume with Force option 27 | :returns: Output of Stop command, raises 28 | GlusterCmdException((rc, out, err)) on error 29 | 30 | 31 | ## restart 32 | ```python 33 | restart(volname, force=False) 34 | ``` 35 | 36 | Restart Gluster Volume, Wrapper around two calls stop and start 37 | 38 | :param volname: Volume Name 39 | :param force: (True|False) Restart Volume with Force option 40 | :returns: Output of Start command, raises 41 | GlusterCmdException((rc, out, err)) on error 42 | 43 | 44 | ## delete 45 | ```python 46 | delete(volname) 47 | ``` 48 | 49 | Delete Gluster Volume 50 | 51 | :param volname: Volume Name 52 | :returns: Output of Delete command, raises 53 | GlusterCmdException((rc, out, err)) on error 54 | 55 | 56 | ## create 57 | ```python 58 | create(volname, 59 | volbricks, 60 | replica=0, 61 | stripe=0, 62 | arbiter=0, 63 | disperse=0, 64 | disperse_data=0, 65 | redundancy=0, 66 | transport='tcp', 67 | force=False) 68 | ``` 69 | 70 | Create Gluster Volume 71 | 72 | :param volname: Volume Name 73 | :param volbricks: List of Brick paths(HOSTNAME:PATH) 74 | :param replica: Number of Replica bricks 75 | :param stripe: Number of Stripe bricks 76 | :param arbiter: Number of Arbiter bricks 77 | :param disperse: Number of disperse bricks 78 | :param disperse_data: Number of disperse data bricks 79 | :param redundancy: Number of Redundancy bricks 80 | :param transport: Transport mode(tcp|rdma|tcp,rdma) 81 | :param force: (True|False) Create Volume with Force option 82 | :returns: Output of Create command, raises 83 | GlusterCmdException((rc, out, err)) on error 84 | 85 | 86 | ## info 87 | ```python 88 | info(volname=None, group_subvols=False) 89 | ``` 90 | 91 | Get Gluster Volume Info 92 | 93 | :param volname: Volume Name 94 | :param group_subvols: Show Subvolume Information in Groups 95 | :returns: Returns Volume Info, raises 96 | GlusterCmdException((rc, out, err)) on error 97 | 98 | 99 | ## status_detail 100 | ```python 101 | status_detail(volname=None, group_subvols=False) 102 | ``` 103 | 104 | Get Gluster Volume Status 105 | 106 | :param volname: Volume Name 107 | :param group_subvols: Show Subvolume Information in Groups 108 | :returns: Returns Volume Status, raises 109 | GlusterCmdException((rc, out, err)) on error 110 | 111 | 112 | ## optset 113 | ```python 114 | optset(volname, opts) 115 | ``` 116 | 117 | Set Volume Options 118 | 119 | :param volname: Volume Name 120 | :param opts: Dict with config key as dict key and config value as value 121 | :returns: Output of Volume Set command, raises 122 | GlusterCmdException((rc, out, err)) on error 123 | 124 | 125 | ## optget 126 | ```python 127 | optget(volname, opt='all') 128 | ``` 129 | 130 | Get Volume Options 131 | 132 | :param volname: Volume Name 133 | :param opt: Option Name 134 | :returns: List of Volume Options, raises 135 | GlusterCmdException((rc, out, err)) on error 136 | 137 | 138 | ## optreset 139 | ```python 140 | optreset(volname, opt=None, force=False) 141 | ``` 142 | 143 | Reset Volume Options 144 | 145 | :param volname: Volume Name 146 | :param opt: Option name to reset, else reset all 147 | :param force: Force reset options 148 | :returns: Output of Volume Reset command, raises 149 | GlusterCmdException((rc, out, err)) on error 150 | 151 | 152 | ## vollist 153 | ```python 154 | vollist() 155 | ``` 156 | 157 | Volumes List 158 | 159 | :returns: List of Volumes, raises 160 | GlusterCmdException((rc, out, err)) on error 161 | 162 | 163 | ## log_rotate 164 | ```python 165 | log_rotate(volname, brick) 166 | ``` 167 | 168 | Brick log rotate 169 | 170 | :param volname: Volume Name 171 | :param brick: Brick Path 172 | :returns: Output of Log rotate command, raises 173 | GlusterCmdException((rc, out, err)) on error 174 | 175 | 176 | ## sync 177 | ```python 178 | sync(hostname, volname=None) 179 | ``` 180 | 181 | Sync the volume information from a peer 182 | 183 | :param hostname: Hostname to sync from 184 | :param volname: Volume Name 185 | :returns: Output of Sync command, raises 186 | GlusterCmdException((rc, out, err)) on error 187 | 188 | 189 | ## clear_locks 190 | ```python 191 | clear_locks(volname, 192 | path, 193 | kind, 194 | inode_range=None, 195 | entry_basename=None, 196 | posix_range=None) 197 | ``` 198 | 199 | Clear locks held on path 200 | 201 | :param volname: Volume Name 202 | :param path: Locked Path 203 | :param kind: Lock Kind(blocked|granted|all) 204 | :param inode_range: Inode Range 205 | :param entry_basename: Entry Basename 206 | :param posix_range: Posix Range 207 | :returns: Output of Clear locks command, raises 208 | GlusterCmdException((rc, out, err)) on error 209 | 210 | 211 | ## barrier_enable 212 | ```python 213 | barrier_enable(volname) 214 | ``` 215 | 216 | Enable Barrier 217 | 218 | :param volname: Volume Name 219 | :returns: Output of Barrier command, raises 220 | GlusterCmdException((rc, out, err)) on error 221 | 222 | 223 | ## barrier_disable 224 | ```python 225 | barrier_disable(volname) 226 | ``` 227 | 228 | Disable Barrier 229 | 230 | :param volname: Volume Name 231 | :returns: Output of Barrier command, raises 232 | GlusterCmdException((rc, out, err)) on error 233 | 234 | 235 | ## profile_start 236 | ```python 237 | profile_start(volname) 238 | ``` 239 | 240 | Start Profile 241 | 242 | :param volname: Volume Name 243 | :return: Output of Profile command, raises 244 | GlusterCmdException((rc, out, err)) on error 245 | 246 | 247 | ## profile_stop 248 | ```python 249 | profile_stop(volname) 250 | ``` 251 | 252 | Stop Profile 253 | 254 | :param volname: Volume Name 255 | :return: Output of Profile command, raises 256 | GlusterCmdException((rc, out, err)) on error 257 | 258 | 259 | ## profile_info 260 | ```python 261 | profile_info(volname, opt, peek=False) 262 | ``` 263 | 264 | Get Profile info 265 | 266 | :param volname: Volume Name 267 | :param opt: Operation type of info, 268 | like peek, incremental, cumulative, clear 269 | :param peek: Use peek or not, default is False 270 | :return: Return profile info, raises 271 | GlusterCmdException((rc, out, err)) on error 272 | 273 | -------------------------------------------------------------------------------- /glustercli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gluster/glustercli-python/920ddcec3d774de28a7f8cf04e033f0d0fa40c13/glustercli/__init__.py -------------------------------------------------------------------------------- /glustercli/cli/Makefile.am: -------------------------------------------------------------------------------- 1 | EXTRA_DIST= __init__.py volume.py utils.py snapshot.py quota.py \ 2 | peer.py parsers.py georep.py bricks.py bitrot.py heal.py \ 3 | nfs_ganesha.py rebalance.py tier.py 4 | 5 | cliutilsdir = @BUILD_PYTHON_SITE_PACKAGES@/glustercli 6 | 7 | cliutils_PYTHON = __init__.py volume.py utils.py snapshot.py quota.py \ 8 | peer.py parsers.py georep.py bricks.py bitrot.py heal.py \ 9 | nfs_ganesha.py rebalance.py tier.py 10 | -------------------------------------------------------------------------------- /glustercli/cli/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli import volume 4 | from glustercli.cli import bitrot 5 | from glustercli.cli import bricks 6 | from glustercli.cli import georep 7 | from glustercli.cli import peer 8 | from glustercli.cli import quota 9 | from glustercli.cli import snapshot 10 | from glustercli.cli import heal 11 | from glustercli.cli import nfs_ganesha 12 | from glustercli.cli import rebalance 13 | from glustercli.cli.gluster_version import glusterfs_version 14 | 15 | from glustercli.cli.utils import (set_gluster_path, 16 | set_gluster_socket, 17 | set_ssh_host, 18 | set_ssh_pem_file, 19 | ssh_connection, 20 | GlusterCmdException) 21 | 22 | # Reexport 23 | __all__ = ["volume", 24 | "bitrot", 25 | "bricks", 26 | "georep", 27 | "peer", 28 | "quota", 29 | "snapshot", 30 | "heal", 31 | "nfs_ganesha", 32 | "rebalance", 33 | "set_gluster_path", 34 | "set_gluster_socket", 35 | "set_ssh_host", 36 | "set_ssh_pem_file", 37 | "ssh_connection", 38 | "GlusterCmdException"] 39 | -------------------------------------------------------------------------------- /glustercli/cli/bitrot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import bitrot_execute, bitrot_execute_xml, \ 4 | GlusterCmdException 5 | from glustercli.cli.parsers import parse_bitrot_scrub_status 6 | 7 | THROTTLE_TYPES = ["lazy", "normal", "aggressive"] 8 | FREQUENCY_TYPES = ["hourly", "daily", "weekly", "biweekly", "monthly"] 9 | 10 | 11 | def enable(volname): 12 | """ 13 | Enable Bitrot Feature 14 | 15 | :param volname: Volume Name 16 | :returns: Output of Enable command, raises 17 | GlusterCmdException((rc, out, err)) on error 18 | """ 19 | cmd = [volname, "enable"] 20 | return bitrot_execute(cmd) 21 | 22 | 23 | def disable(volname): 24 | """ 25 | Disable Bitrot Feature 26 | 27 | :param volname: Volume Name 28 | :returns: Output of Disable command, raises 29 | GlusterCmdException((rc, out, err)) on error 30 | """ 31 | cmd = [volname, "disable"] 32 | return bitrot_execute(cmd) 33 | 34 | 35 | def scrub_throttle(volname, throttle_type): 36 | """ 37 | Configure Scrub Throttle 38 | 39 | :param volname: Volume Name 40 | :param throttle_type: lazy|normal|aggressive 41 | :returns: Output of the command, raises 42 | GlusterCmdException((rc, out, err)) on error 43 | """ 44 | if throttle_type.lower() not in THROTTLE_TYPES: 45 | raise GlusterCmdException((-1, "", "Invalid Scrub Throttle Type")) 46 | cmd = [volname, "scrub-throttle", throttle_type.lower()] 47 | return bitrot_execute(cmd) 48 | 49 | 50 | def scrub_frequency(volname, freq): 51 | """ 52 | Configure Scrub Frequency 53 | 54 | :param volname: Volume Name 55 | :param freq: hourly|daily|weekly|biweekly|monthly 56 | :returns: Output of the command, raises 57 | GlusterCmdException((rc, out, err)) on error 58 | """ 59 | if freq.lower() not in FREQUENCY_TYPES: 60 | raise GlusterCmdException((-1, "", "Invalid Scrub Frequency")) 61 | cmd = [volname, "scrub-frequency", freq] 62 | return bitrot_execute(cmd) 63 | 64 | 65 | def scrub_pause(volname): 66 | """ 67 | Pause Bitrot Scrub 68 | 69 | :param volname: Volume Name 70 | :returns: Output of Pause command, raises 71 | GlusterCmdException((rc, out, err)) on error 72 | """ 73 | cmd = [volname, "scrub", "pause"] 74 | return bitrot_execute(cmd) 75 | 76 | 77 | def scrub_resume(volname): 78 | """ 79 | Resume Bitrot Scrub 80 | 81 | :param volname: Volume Name 82 | :returns: Output of the Resume command, raises 83 | GlusterCmdException((rc, out, err)) on error 84 | """ 85 | cmd = [volname, "scrub", "resume"] 86 | return bitrot_execute(cmd) 87 | 88 | 89 | def scrub_status(volname): 90 | """ 91 | Scrub Status 92 | 93 | :param volname: Volume Name 94 | :returns: Scrub Status, raises 95 | GlusterCmdException((rc, out, err)) on error 96 | """ 97 | cmd = [volname, "scrub", "status"] 98 | return parse_bitrot_scrub_status(bitrot_execute_xml(cmd)) 99 | -------------------------------------------------------------------------------- /glustercli/cli/bricks.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import volume_execute, volume_execute_xml 4 | from glustercli.cli.parsers import parse_remove_brick_status 5 | 6 | 7 | def add(volname, bricks, stripe=None, replica=None, 8 | arbiter=None, force=False): 9 | """ 10 | Add Bricks 11 | 12 | :param volname: Volume Name 13 | :param bricks: List of Bricks 14 | :param stripe: Stripe Count 15 | :param replica: Replica Count 16 | :param arbiter: Arbiter Count 17 | :param force: True|False Force Add Bricks 18 | :returns: Output of add-brick command, raises 19 | GlusterCmdException((rc, out, err)) on error 20 | """ 21 | cmd = ["add-brick", volname] 22 | if stripe is not None: 23 | cmd += ["stripe", "{0}".format(stripe)] 24 | 25 | if replica is not None: 26 | cmd += ["replica", "{0}".format(replica)] 27 | 28 | if arbiter is not None: 29 | cmd += ["arbiter", "{0}".format(arbiter)] 30 | 31 | cmd += bricks 32 | 33 | if force: 34 | cmd += ["force"] 35 | 36 | return volume_execute(cmd) 37 | 38 | 39 | def remove_force(volname, bricks, replica=None): 40 | """ 41 | Remove Bricks force 42 | 43 | :param volname: Volume Name 44 | :param bricks: List of Bricks 45 | :param replica: Replica Count 46 | :returns: Output of remove-brick force command, raises 47 | GlusterCmdException((rc, out, err)) on error 48 | """ 49 | cmd = ["remove-brick", volname] 50 | if replica is not None: 51 | cmd += ["replica", "{0}".format(replica)] 52 | 53 | cmd += bricks 54 | cmd += ["force"] 55 | 56 | return volume_execute(cmd) 57 | 58 | 59 | def remove_start(volname, bricks, replica=None): 60 | """ 61 | Remove Bricks start 62 | 63 | :param volname: Volume Name 64 | :param bricks: List of Bricks 65 | :param replica: Replica Count 66 | :returns: Output of remove-brick start command, raises 67 | GlusterCmdException((rc, out, err)) on error 68 | """ 69 | cmd = ["remove-brick", volname] 70 | if replica is not None: 71 | cmd += ["replica", "{0}".format(replica)] 72 | 73 | cmd += bricks 74 | cmd += ["start"] 75 | 76 | return volume_execute(cmd) 77 | 78 | 79 | def remove_stop(volname, bricks, replica=None): 80 | """ 81 | Remove Bricks stop 82 | 83 | :param volname: Volume Name 84 | :param bricks: List of Bricks 85 | :param replica: Replica Count 86 | :returns: Output of remove-brick stop command, raises 87 | GlusterCmdException((rc, out, err)) on error 88 | """ 89 | cmd = ["remove-brick", volname] 90 | if replica is not None: 91 | cmd += ["replica", "{0}".format(replica)] 92 | 93 | cmd += bricks 94 | cmd += ["stop"] 95 | 96 | return volume_execute(cmd) 97 | 98 | 99 | def remove_commit(volname, bricks, replica=None): 100 | """ 101 | Remove Bricks Commit 102 | 103 | :param volname: Volume Name 104 | :param bricks: List of Bricks 105 | :param replica: Replica Count 106 | :returns: Output of remove-brick commit command, raises 107 | GlusterCmdException((rc, out, err)) on error 108 | """ 109 | cmd = ["remove-brick", volname] 110 | if replica is not None: 111 | cmd += ["replica", "{0}".format(replica)] 112 | 113 | cmd += bricks 114 | cmd += ["commit"] 115 | 116 | return volume_execute(cmd) 117 | 118 | 119 | def remove_status(volname, bricks, replica=None): 120 | """ 121 | Remove Bricks status 122 | 123 | :param volname: Volume Name 124 | :param bricks: List of Bricks 125 | :param replica: Replica Count 126 | :returns: Remove Bricks Status, raises 127 | GlusterCmdException((rc, out, err)) on error 128 | """ 129 | cmd = ["remove-brick", volname] 130 | if replica is not None: 131 | cmd += ["replica", "{0}".format(replica)] 132 | 133 | cmd += bricks 134 | cmd += ["status"] 135 | 136 | return parse_remove_brick_status(volume_execute_xml(cmd)) 137 | 138 | 139 | def replace_commit(volname, source_brick, new_brick): 140 | """ 141 | Replace Bricks 142 | 143 | :param volname: Volume Name 144 | :param source_brick: Source Brick 145 | :param new_brick: New Replacement Brick 146 | :returns: Output of replace-brick command, raises 147 | GlusterCmdException((rc, out, err)) on error 148 | """ 149 | cmd = [ 150 | "replace-brick", 151 | volname, 152 | source_brick, 153 | new_brick, 154 | "commit", "force" 155 | ] 156 | 157 | return volume_execute(cmd) 158 | -------------------------------------------------------------------------------- /glustercli/cli/georep.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import georep_execute, georep_execute_xml, \ 4 | gluster_system_execute 5 | from glustercli.cli.parsers import parse_georep_config, \ 6 | parse_georep_status 7 | from glustercli.cli import volume 8 | 9 | 10 | def gsec_create(ssh_key_prefix=True): 11 | """ 12 | Generate Geo-replication SSH Keys 13 | 14 | :param ssh_key_prefix: True|False Command prefix in generated public keys 15 | :returns: Output of gsec_create command, raises 16 | GlusterCmdException((rc, out, err)) on error 17 | """ 18 | cmd = ["gsec_create"] 19 | if not ssh_key_prefix: 20 | cmd += ["container"] 21 | 22 | return gluster_system_execute(cmd) 23 | 24 | 25 | # noqa # pylint: disable=too-many-arguments 26 | def create(primary_volume, secondary_host, secondary_volume, 27 | secondary_user="root", 28 | push_pem=True, no_verify=False, force=False, ssh_port=22): 29 | """ 30 | Create Geo-replication Session 31 | 32 | :param primary_volume: Primary Volume Name 33 | :param secondary_host: Secondary Hostname or IP 34 | :param secondary_volume: Secondary Volume 35 | :param secondary_user: Secondary User, default is "root" 36 | :param push_pem: True|False Push SSH keys to Secondary 37 | :param no_verify: True|False Skip the Secondary Verification 38 | process before create 39 | :param force: True|False Force Create Session 40 | :param ssh_port: SSH Port, Default is 22 41 | :returns: Output of Create command, raises 42 | GlusterCmdException((rc, out, err)) on error 43 | """ 44 | cmd = [ 45 | primary_volume, 46 | f"{secondary_user}@{secondary_host}::{secondary_volume}", 47 | "create" 48 | ] 49 | 50 | if ssh_port != 22: 51 | cmd += ["ssh-port", "{0}".format(ssh_port)] 52 | 53 | if push_pem: 54 | cmd += ["push-pem"] 55 | 56 | if no_verify: 57 | cmd += ["no-verify"] 58 | 59 | if force: 60 | cmd += ["force"] 61 | 62 | return georep_execute(cmd) 63 | 64 | 65 | def start(primary_volume, secondary_host, secondary_volume, 66 | secondary_user="root", force=False): 67 | """ 68 | Start Geo-replication Session 69 | 70 | :param primary_volume: Primary Volume Name 71 | :param secondary_host: Secondary Hostname or IP 72 | :param secondary_volume: Secondary Volume 73 | :param secondary_user: Secondary User, default is "root" 74 | :param force: True|False Force Start the Session 75 | :returns: Output of Start command, raises 76 | GlusterCmdException((rc, out, err)) on error 77 | """ 78 | cmd = [primary_volume, 79 | f"{secondary_user}@{secondary_host}::{secondary_volume}", 80 | "start"] 81 | 82 | if force: 83 | cmd += ["force"] 84 | 85 | return georep_execute(cmd) 86 | 87 | 88 | def stop(primary_volume, secondary_host, secondary_volume, 89 | secondary_user="root", force=False): 90 | """ 91 | Stop Geo-replication Session 92 | 93 | :param primary_volume: Primary Volume Name 94 | :param secondary_host: Secondary Hostname or IP 95 | :param secondary_volume: Secondary Volume 96 | :param secondary_user: Secondary User, default is "root" 97 | :param force: True|False Force Start the Session 98 | :returns: Output of Stop command, raises 99 | GlusterCmdException((rc, out, err)) on error 100 | """ 101 | cmd = [primary_volume, 102 | f"{secondary_user}@{secondary_host}::{secondary_volume}", 103 | "stop"] 104 | 105 | if force: 106 | cmd += ["force"] 107 | 108 | return georep_execute(cmd) 109 | 110 | 111 | def restart(primary_volume, secondary_host, secondary_volume, 112 | secondary_user="root", force=False): 113 | """ 114 | Restart Geo-replication Session 115 | 116 | :param primary_volume: Primary Volume Name 117 | :param secondary_host: Secondary Hostname or IP 118 | :param secondary_volume: Secondary Volume 119 | :param secondary_user: Secondary User, default is "root" 120 | :param force: True|False Force Start the Session 121 | :returns: Output of Start command, raises 122 | GlusterCmdException((rc, out, err)) on error 123 | """ 124 | stop(primary_volume, secondary_host, secondary_volume, 125 | secondary_user, force=True) 126 | return start(primary_volume, secondary_host, secondary_volume, 127 | secondary_user, force) 128 | 129 | 130 | def delete(primary_volume, secondary_host, secondary_volume, 131 | secondary_user="root", reset_sync_time=None): 132 | """ 133 | Delete Geo-replication Session 134 | 135 | :param primary_volume: Primary Volume Name 136 | :param secondary_host: Secondary Hostname or IP 137 | :param secondary_volume: Secondary Volume 138 | :param secondary_user: Secondary User, default is "root" 139 | :param reset_sync_time: True|False Reset Sync time on delete 140 | :returns: Output of Start command, raises 141 | GlusterCmdException((rc, out, err)) on error 142 | """ 143 | cmd = [primary_volume, 144 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 145 | "delete"] 146 | 147 | if reset_sync_time is not None: 148 | cmd += ["reset-sync-time"] 149 | 150 | return georep_execute(cmd) 151 | 152 | 153 | def pause(primary_volume, secondary_host, secondary_volume, 154 | secondary_user="root", force=False): 155 | """ 156 | Pause Geo-replication Session 157 | 158 | :param primary_volume: Primary Volume Name 159 | :param secondary_host: Secondary Hostname or IP 160 | :param secondary_volume: Secondary Volume 161 | :param secondary_user: Secondary User, default is "root" 162 | :param force: True|False Force Pause Session 163 | :returns: Output of Pause command, raises 164 | GlusterCmdException((rc, out, err)) on error 165 | """ 166 | cmd = [primary_volume, 167 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 168 | "pause"] 169 | 170 | if force: 171 | cmd += ["force"] 172 | 173 | return georep_execute(cmd) 174 | 175 | 176 | def resume(primary_volume, secondary_host, secondary_volume, 177 | secondary_user="root", force=False): 178 | """ 179 | Resume Geo-replication Session 180 | 181 | :param primary_volume: Primary Volume Name 182 | :param secondary_host: Secondary Hostname or IP 183 | :param secondary_volume: Secondary Volume 184 | :param secondary_user: Secondary User, default is "root" 185 | :param force: True|False Force Resume Session 186 | :returns: Output of Resume command, raises 187 | GlusterCmdException((rc, out, err)) on error 188 | """ 189 | cmd = [primary_volume, 190 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 191 | "resume"] 192 | 193 | if force: 194 | cmd += ["force"] 195 | 196 | return georep_execute(cmd) 197 | 198 | 199 | def config_set(primary_volume, secondary_host, secondary_volume, 200 | key, value, 201 | secondary_user="root"): 202 | """ 203 | Set Config of a Geo-replication Session 204 | 205 | :param primary_volume: Primary Volume Name 206 | :param secondary_host: Secondary Hostname or IP 207 | :param secondary_volume: Secondary Volume 208 | :param secondary_user: Secondary User, default is "root" 209 | :param key: Config Key 210 | :param value: Config Value 211 | :returns: Output of Config set command, raises 212 | GlusterCmdException((rc, out, err)) on error 213 | """ 214 | cmd = [primary_volume, 215 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 216 | "config", key, value] 217 | return georep_execute(cmd) 218 | 219 | 220 | def config_reset(primary_volume, secondary_host, secondary_volume, 221 | key, secondary_user="root"): 222 | """ 223 | Reset configuration of Geo-replication Session 224 | 225 | :param primary_volume: Primary Volume Name 226 | :param secondary_host: Secondary Hostname or IP 227 | :param secondary_volume: Secondary Volume 228 | :param secondary_user: Secondary User, default is "root" 229 | :param key: Config Key 230 | :returns: Output of Config reset command, raises 231 | GlusterCmdException((rc, out, err)) on error 232 | """ 233 | cmd = [primary_volume, 234 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 235 | "config", "!{0}".format(key)] 236 | return georep_execute(cmd) 237 | 238 | 239 | def config_get(primary_volume, secondary_host, secondary_volume, key=None, 240 | secondary_user="root"): 241 | """ 242 | Get Configuration of Geo-replication Session 243 | 244 | :param primary_volume: Primary Volume Name 245 | :param secondary_host: Secondary Hostname or IP 246 | :param secondary_volume: Secondary Volume 247 | :param secondary_user: Secondary User, default is "root" 248 | :param key: Config Key 249 | :returns: Geo-rep session Config Values, raises 250 | GlusterCmdException((rc, out, err)) on error 251 | """ 252 | cmd = [primary_volume, 253 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 254 | "config"] 255 | 256 | if key is not None: 257 | cmd += [key] 258 | 259 | return parse_georep_config(georep_execute_xml(cmd)) 260 | 261 | 262 | def status(primary_volume=None, secondary_host=None, 263 | secondary_volume=None, 264 | secondary_user="root"): 265 | """ 266 | Status of Geo-replication Session 267 | 268 | :param primary_volume: Primary Volume Name 269 | :param secondary_host: Secondary Hostname or IP 270 | :param secondary_volume: Secondary Volume 271 | :param secondary_user: Secondary User, default is "root" 272 | :returns: Geo-replication Status, raises 273 | GlusterCmdException((rc, out, err)) on error 274 | """ 275 | cmd = [] 276 | 277 | if primary_volume is not None: 278 | cmd += [primary_volume] 279 | 280 | if primary_volume is not None and secondary_host is not None and \ 281 | secondary_volume is not None: 282 | cmd += [ 283 | f"{secondary_user}@{secondary_host}::{secondary_volume}" 284 | ] 285 | 286 | cmd += ["status"] 287 | 288 | return parse_georep_status(georep_execute_xml(cmd), volume.info()) 289 | -------------------------------------------------------------------------------- /glustercli/cli/gluster_version.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import gluster_execute 4 | 5 | def glusterfs_version(): 6 | """Return the GlusterFS version""" 7 | cmd = ["--version"] 8 | # The library takes care of raising exception in case of any errors 9 | return(gluster_execute(cmd).split('\n')[0]) 10 | -------------------------------------------------------------------------------- /glustercli/cli/heal.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import heal_execute, heal_execute_xml, \ 4 | GlusterCmdException 5 | from glustercli.cli.parsers import parse_heal_statistics, parse_heal_info 6 | 7 | 8 | HEAL_INFO_TYPES = ["healed", "heal-failed", "split-brain"] 9 | 10 | 11 | def enable(volname): 12 | """ 13 | Enable Volume Heal 14 | 15 | :param volname: Volume Name 16 | :returns: Output of Enable command, raises 17 | GlusterCmdException((rc, out, err)) on error 18 | """ 19 | cmd = [volname, "enable"] 20 | return heal_execute(cmd) 21 | 22 | 23 | def disable(volname): 24 | """ 25 | Disable Volume Heal 26 | 27 | :param volname: Volume Name 28 | :returns: Output of Disable command, raises 29 | GlusterCmdException((rc, out, err)) on error 30 | """ 31 | cmd = [volname, "disable"] 32 | return heal_execute(cmd) 33 | 34 | 35 | def full(volname): 36 | """ 37 | Full Volume Heal 38 | 39 | :param volname: Volume Name 40 | :returns: Output of Full Heal command, raises 41 | GlusterCmdException((rc, out, err)) on error 42 | """ 43 | cmd = [volname, "full"] 44 | return heal_execute(cmd) 45 | 46 | 47 | def statistics(volname): 48 | """ 49 | Get Statistics of Heal 50 | 51 | :param volname: Volume Name 52 | :returns: Output of Statistics command, raises 53 | GlusterCmdException((rc, out, err)) on error 54 | """ 55 | cmd = [volname, "statistics"] 56 | return parse_heal_statistics(heal_execute_xml(cmd)) 57 | 58 | 59 | def info(volname, info_type=None): 60 | """ 61 | Get Volume Heal Info 62 | 63 | :param volname: Volume Name 64 | :returns: Output of Heal Info command, raises 65 | GlusterCmdException((rc, out, err)) on error 66 | """ 67 | cmd = [volname, "info"] 68 | 69 | if info_type is not None: 70 | if info_type.lower() not in HEAL_INFO_TYPES: 71 | raise GlusterCmdException((-1, "", "Invalid Heal Info Types")) 72 | 73 | cmd += [info_type.lower()] 74 | 75 | return parse_heal_info(heal_execute_xml(cmd)) 76 | 77 | 78 | def split_brain(volname, bigger_file=None, 79 | latest_mtime=None, source_brick=None, path=None): 80 | """ 81 | Split Brain Resolution 82 | 83 | :param volname: Volume Name 84 | :param bigger_file: File Path of Bigger file 85 | :param latest_mtime: File Path of Latest mtime 86 | :param source_brick: Source Brick for Good Copy 87 | :param path: Resolution of this path/file 88 | :returns: Output of Split-brain command, raises 89 | GlusterCmdException((rc, out, err)) on error 90 | """ 91 | cmd = [volname, "split-brain"] 92 | if bigger_file is not None: 93 | cmd += ["bigger-file", bigger_file] 94 | 95 | if latest_mtime is not None: 96 | cmd += ["latest-mtime", latest_mtime] 97 | 98 | if source_brick is not None: 99 | cmd += ["source-brick", source_brick] 100 | 101 | if path is not None: 102 | cmd += [path] 103 | 104 | return heal_execute(cmd) 105 | -------------------------------------------------------------------------------- /glustercli/cli/nfs_ganesha.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import gluster_execute 4 | 5 | 6 | def enable(): 7 | """ 8 | Enable NFS Ganesha 9 | 10 | :returns: Output of Enable command, raises 11 | GlusterCmdException((rc, out, err)) on error 12 | """ 13 | cmd = ["nfs-ganesha", "enable"] 14 | return gluster_execute(cmd) 15 | 16 | 17 | def disable(): 18 | """ 19 | Disable NFS Ganesha 20 | 21 | :returns: Output of Disable command, raises 22 | GlusterCmdException((rc, out, err)) on error 23 | """ 24 | cmd = ["nfs-ganesha", "disable"] 25 | return gluster_execute(cmd) 26 | -------------------------------------------------------------------------------- /glustercli/cli/parsers.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import copy 3 | import xml.etree.cElementTree as etree 4 | import math 5 | 6 | from .utils import RebalanceOperationType as ROT 7 | 8 | ParseError = etree.ParseError if hasattr(etree, 'ParseError') else SyntaxError 9 | 10 | 11 | class GlusterCmdOutputParseError(Exception): 12 | pass 13 | 14 | 15 | HEALTH_UP = "up" 16 | HEALTH_DOWN = "down" 17 | HEALTH_PARTIAL = "partial" 18 | HEALTH_DEGRADED = "degraded" 19 | 20 | STATE_CREATED = "Created" 21 | STATE_STARTED = "Started" 22 | STATE_STOPPED = "Stopped" 23 | 24 | TYPE_REPLICATE = "REPLICATE" 25 | TYPE_DISPERSE = "DISPERSE" 26 | 27 | 28 | def _subvol_health(subvol): 29 | up_bricks = 0 30 | for brick in subvol["bricks"]: 31 | if brick["online"]: 32 | up_bricks += 1 33 | 34 | health = HEALTH_UP 35 | if len(subvol["bricks"]) != up_bricks: 36 | health = HEALTH_DOWN 37 | if subvol["type"] == TYPE_REPLICATE: 38 | if up_bricks >= math.ceil(subvol["replica"] / 2): 39 | health = HEALTH_PARTIAL 40 | 41 | # If down bricks are less than or equal to redudancy count 42 | # then Volume is UP but some bricks are down 43 | if subvol["type"] == TYPE_DISPERSE: 44 | down_bricks = (len(subvol["bricks"]) - up_bricks) 45 | if down_bricks <= subvol["disperse_redundancy"]: 46 | health = HEALTH_PARTIAL 47 | 48 | return health 49 | 50 | 51 | def _update_volume_health(volumes): 52 | # Note: vol is edited inside loop 53 | for vol in volumes: 54 | if vol["status"] != STATE_STARTED: 55 | continue 56 | 57 | vol["health"] = HEALTH_UP 58 | up_subvols = 0 59 | 60 | # Note: subvol is edited inside loop 61 | for subvol in vol["subvols"]: 62 | subvol["health"] = _subvol_health(subvol) 63 | 64 | if subvol["health"] == HEALTH_DOWN: 65 | vol["health"] = HEALTH_DEGRADED 66 | 67 | if subvol["health"] == HEALTH_PARTIAL: 68 | if vol["health"] != HEALTH_DEGRADED: 69 | vol["health"] = subvol["health"] 70 | 71 | if subvol["health"] != HEALTH_DOWN: 72 | up_subvols += 1 73 | 74 | if up_subvols == 0: 75 | vol["health"] = HEALTH_DOWN 76 | 77 | 78 | def _update_volume_utilization(volumes): 79 | # Note: modifies volume inside loop 80 | for vol in volumes: 81 | vol["size_total"] = 0 82 | vol["size_free"] = 0 83 | vol["size_used"] = 0 84 | vol["inodes_total"] = 0 85 | vol["inodes_free"] = 0 86 | vol["inodes_used"] = 0 87 | 88 | # Note: modifies subvol inside loop 89 | for subvol in vol["subvols"]: 90 | effective_capacity_used = 0 91 | effective_capacity_total = 0 92 | effective_inodes_used = 0 93 | effective_inodes_total = 0 94 | 95 | for brick in subvol["bricks"]: 96 | if brick["type"] == "Arbiter": 97 | continue 98 | 99 | if brick["size_used"] >= effective_capacity_used: 100 | effective_capacity_used = brick["size_used"] 101 | 102 | ect_is_zero = effective_capacity_total == 0 103 | bst_lt_ect = brick["size_total"] <= effective_capacity_total 104 | if ect_is_zero or (bst_lt_ect and brick["size_total"] > 0): 105 | effective_capacity_total = brick["size_total"] 106 | 107 | if brick["inodes_used"] >= effective_inodes_used: 108 | effective_inodes_used = brick["inodes_used"] 109 | 110 | eit_is_zero = effective_inodes_total == 0 111 | bit_lt_eit = brick["inodes_total"] <= effective_inodes_total 112 | if eit_is_zero or (bit_lt_eit and brick["inodes_total"] > 0): 113 | effective_inodes_total = brick["inodes_total"] 114 | 115 | if subvol["type"] == TYPE_DISPERSE: 116 | # Subvol Size = Sum of size of Data bricks 117 | effective_capacity_used = effective_capacity_used * ( 118 | subvol["disperse"] - subvol["disperse_redundancy"]) 119 | effective_capacity_total = effective_capacity_total * ( 120 | subvol["disperse"] - subvol["disperse_redundancy"]) 121 | effective_inodes_used = effective_inodes_used * ( 122 | subvol["disperse"] - subvol["disperse_redundancy"]) 123 | effective_inodes_total = effective_inodes_total * ( 124 | subvol["disperse"] - subvol["disperse_redundancy"]) 125 | 126 | vol["size_total"] += effective_capacity_total 127 | vol["size_used"] += effective_capacity_used 128 | vol["size_free"] = vol["size_total"] - vol["size_used"] 129 | vol["inodes_total"] += effective_inodes_total 130 | vol["inodes_used"] += effective_inodes_used 131 | vol["inodes_free"] = vol["inodes_total"] - vol["inodes_used"] 132 | 133 | 134 | def _parse_a_vol(volume_el): 135 | value = { 136 | 'name': volume_el.find('name').text, 137 | 'uuid': volume_el.find('id').text, 138 | 'type': volume_el.find('typeStr').text.upper().replace('-', '_'), 139 | 'status': volume_el.find('statusStr').text, 140 | 'num_bricks': int(volume_el.find('brickCount').text), 141 | 'distribute': int(volume_el.find('distCount').text), 142 | # 'stripe': int(volume_el.find('stripeCount').text), 143 | # 'stripe' : 1, 144 | 'replica': int(volume_el.find('replicaCount').text), 145 | 'disperse': int(volume_el.find('disperseCount').text), 146 | 'disperse_redundancy': int(volume_el.find('redundancyCount').text), 147 | 'transport': volume_el.find('transport').text, 148 | 'snapshot_count': int(volume_el.find('snapshotCount').text), 149 | 'bricks': [], 150 | 'options': [] 151 | } 152 | 153 | if value['transport'] == '0': 154 | value['transport'] = 'TCP' 155 | elif value['transport'] == '1': 156 | value['transport'] = 'RDMA' 157 | else: 158 | value['transport'] = 'TCP,RDMA' 159 | 160 | for brick in volume_el.findall('bricks/brick'): 161 | brick_type = "Brick" 162 | if brick.find("isArbiter").text == '1': 163 | brick_type = "Arbiter" 164 | 165 | value['bricks'].append({ 166 | "name": brick.find("name").text, 167 | "uuid": brick.find("hostUuid").text, 168 | "type": brick_type 169 | }) 170 | 171 | for opt in volume_el.findall('options/option'): 172 | value['options'].append({"name": opt.find('name').text, 173 | "value": opt.find('value').text}) 174 | 175 | return value 176 | 177 | 178 | def _get_subvol_bricks_count(replica_count, disperse_count): 179 | if replica_count > 1: 180 | return replica_count 181 | 182 | if disperse_count > 0: 183 | return disperse_count 184 | 185 | return 1 186 | 187 | 188 | def _group_subvols(volumes): 189 | out_volumes = copy.deepcopy(volumes) 190 | for idx, vol in enumerate(volumes): 191 | # Remove Bricks information from the output 192 | # and include subvols 193 | del out_volumes[idx]["bricks"] 194 | out_volumes[idx]["subvols"] = [] 195 | subvol_type = vol["type"].split("_")[-1] 196 | subvol_bricks_count = _get_subvol_bricks_count(vol["replica"], 197 | vol["disperse"]) 198 | 199 | number_of_subvols = int(len(vol["bricks"]) / subvol_bricks_count) 200 | 201 | for sidx in range(number_of_subvols): 202 | subvol = { 203 | "name": "%s-%s-%s" % (vol["name"], subvol_type.lower(), sidx), 204 | "replica": vol["replica"], 205 | "disperse": vol["disperse"], 206 | "disperse_redundancy": vol["disperse_redundancy"], 207 | "type": subvol_type, 208 | "bricks": [] 209 | } 210 | for bidx in range(subvol_bricks_count): 211 | subvol["bricks"].append( 212 | vol["bricks"][sidx * subvol_bricks_count + bidx] 213 | ) 214 | out_volumes[idx]["subvols"].append(subvol) 215 | 216 | return out_volumes 217 | 218 | 219 | def parse_volume_info(info, group_subvols=False): 220 | tree = etree.fromstring(info) 221 | volumes = [] 222 | for volume_el in tree.findall('volInfo/volumes/volume'): 223 | try: 224 | volumes.append(_parse_a_vol(volume_el)) 225 | except (ParseError, AttributeError, ValueError) as err: 226 | raise GlusterCmdOutputParseError(err) 227 | 228 | if group_subvols: 229 | return _group_subvols(volumes) 230 | 231 | return volumes 232 | 233 | def _check_node_value(node_el, key, type, default_value): 234 | value = node_el.find(key) 235 | if value is not None: 236 | return type(value.text) 237 | return type(default_value) 238 | 239 | def _parse_a_node(node_el): 240 | name = (node_el.find('hostname').text + ":" + node_el.find('path').text) 241 | online = node_el.find('status').text == "1" or False 242 | if not online: 243 | # if the node where the brick exists isn't 244 | # online then no reason to continue as the 245 | # caller of this method will populate "default" 246 | # information 247 | return {'name': name, 'online': online} 248 | 249 | value = { 250 | 'name': name, 251 | 'uuid': node_el.find('peerid').text, 252 | 'online': online, 253 | 'pid': node_el.find('pid').text, 254 | 'size_total': int(node_el.find('sizeTotal').text), 255 | 'size_free': int(node_el.find('sizeFree').text), 256 | 'inodes_total': _check_node_value(node_el, 'inodesTotal', int, 0), 257 | 'inodes_free': _check_node_value(node_el, 'inodesFree', int, 0), 258 | 'device': node_el.find('device').text, 259 | 'block_size': node_el.find('blockSize').text, 260 | 'mnt_options': node_el.find('mntOptions').text, 261 | 'fs_name': node_el.find('fsName').text, 262 | } 263 | value['size_used'] = value['size_total'] - value['size_free'] 264 | value['inodes_used'] = value['inodes_total'] - value['inodes_free'] 265 | 266 | # ISSUE #14 glusterfs 3.6.5 does not have 'ports' key 267 | # in vol status detail xml output 268 | if node_el.find('ports'): 269 | value['ports'] = { 270 | "tcp": node_el.find('ports').find("tcp").text, 271 | "rdma": node_el.find('ports').find("rdma").text 272 | } 273 | else: 274 | value['ports'] = { 275 | "tcp": node_el.find('port'), 276 | "rdma": None 277 | } 278 | 279 | return value 280 | 281 | 282 | def _parse_volume_status(data): 283 | tree = etree.fromstring(data) 284 | nodes = [] 285 | for node_el in tree.findall('volStatus/volumes/volume/node'): 286 | try: 287 | nodes.append(_parse_a_node(node_el)) 288 | except (ParseError, AttributeError, ValueError) as err: 289 | raise GlusterCmdOutputParseError(err) 290 | 291 | return nodes 292 | 293 | 294 | def parse_volume_status(status_data, volinfo, group_subvols=False): 295 | nodes_data = _parse_volume_status(status_data) 296 | tmp_brick_status = {} 297 | for node in nodes_data: 298 | tmp_brick_status[node["name"]] = node 299 | 300 | volumes = [] 301 | for vol in volinfo: 302 | volumes.append(vol.copy()) 303 | volumes[-1]["bricks"] = [] 304 | 305 | for brick in vol["bricks"]: 306 | brick_status_data = tmp_brick_status.get(brick["name"], None) 307 | if brick_status_data is None: 308 | use_default = True 309 | else: 310 | # brick could be offline 311 | use_default = not brick_status_data.get("online", False) 312 | 313 | if use_default: 314 | # Default Status 315 | volumes[-1]["bricks"].append({ 316 | "name": brick["name"], 317 | "uuid": brick["uuid"], 318 | "type": brick["type"], 319 | "online": False, 320 | "ports": {"tcp": "N/A", "rdma": "N/A"}, 321 | "pid": "N/A", 322 | "size_total": 0, 323 | "size_free": 0, 324 | "size_used": 0, 325 | "inodes_total": 0, 326 | "inodes_free": 0, 327 | "inodes_used": 0, 328 | "device": "N/A", 329 | "block_size": "N/A", 330 | "mnt_options": "N/A", 331 | "fs_name": "N/A" 332 | }) 333 | else: 334 | volumes[-1]["bricks"].append(brick_status_data.copy()) 335 | volumes[-1]["bricks"][-1]["type"] = brick["type"] 336 | 337 | if group_subvols: 338 | grouped_vols = _group_subvols(volumes) 339 | _update_volume_utilization(grouped_vols) 340 | _update_volume_health(grouped_vols) 341 | return grouped_vols 342 | 343 | return volumes 344 | 345 | 346 | def _parse_profile_info_clear(volume_el): 347 | clear = { 348 | 'volname': volume_el.find('volname').text, 349 | 'bricks': [] 350 | } 351 | 352 | for brick_el in volume_el.findall('brick'): 353 | clear['bricks'].append({ 354 | 'brick_name': brick_el.find('brickName').text, 355 | 'clear_stats': brick_el.find('clearStats').text 356 | }) 357 | 358 | return clear 359 | 360 | 361 | def _bytes_size(size): 362 | traditional = [ 363 | (1024**5, 'PB'), 364 | (1024**4, 'TB'), 365 | (1024**3, 'GB'), 366 | (1024**2, 'MB'), 367 | (1024**1, 'KB'), 368 | (1024**0, 'B') 369 | ] 370 | 371 | for factor, suffix in traditional: 372 | if size > factor: 373 | break 374 | return str(int(size / factor)) + suffix 375 | 376 | 377 | def _parse_profile_block_stats(b_el): 378 | stats = [] 379 | for block_el in b_el.findall('block'): 380 | size = _bytes_size(int(block_el.find('size').text)) 381 | stats.append({size: {'reads': int(block_el.find('reads').text), 382 | 'writes': int(block_el.find('writes').text)}}) 383 | return stats 384 | 385 | 386 | def _parse_profile_fop_stats(fop_el): 387 | stats = [] 388 | for fop in fop_el.findall('fop'): 389 | name = fop.find('name').text 390 | stats.append( 391 | { 392 | name: { 393 | 'hits': int(fop.find('hits').text), 394 | 'max_latency': float(fop.find('maxLatency').text), 395 | 'min_latency': float(fop.find('minLatency').text), 396 | 'avg_latency': float(fop.find('avgLatency').text), 397 | } 398 | } 399 | ) 400 | return stats 401 | 402 | 403 | def _parse_profile_bricks(brick_el): 404 | cumulative_block_stats = [] 405 | cumulative_fop_stats = [] 406 | cumulative_total_read_bytes = 0 407 | cumulative_total_write_bytes = 0 408 | cumulative_total_duration = 0 409 | 410 | interval_block_stats = [] 411 | interval_fop_stats = [] 412 | interval_total_read_bytes = 0 413 | interval_total_write_bytes = 0 414 | interval_total_duration = 0 415 | 416 | brick_name = brick_el.find('brickName').text 417 | 418 | if brick_el.find('cumulativeStats') is not None: 419 | cumulative_block_stats = _parse_profile_block_stats( 420 | brick_el.find('cumulativeStats/blockStats')) 421 | cumulative_fop_stats = _parse_profile_fop_stats( 422 | brick_el.find('cumulativeStats/fopStats')) 423 | cumulative_total_read_bytes = int( 424 | brick_el.find('cumulativeStats').find('totalRead').text) 425 | cumulative_total_write_bytes = int( 426 | brick_el.find('cumulativeStats').find('totalWrite').text) 427 | cumulative_total_duration = int( 428 | brick_el.find('cumulativeStats').find('duration').text) 429 | 430 | if brick_el.find('intervalStats') is not None: 431 | interval_block_stats = _parse_profile_block_stats( 432 | brick_el.find('intervalStats/blockStats')) 433 | interval_fop_stats = _parse_profile_fop_stats( 434 | brick_el.find('intervalStats/fopStats')) 435 | interval_total_read_bytes = int( 436 | brick_el.find('intervalStats').find('totalRead').text) 437 | interval_total_write_bytes = int( 438 | brick_el.find('intervalStats').find('totalWrite').text) 439 | interval_total_duration = int( 440 | brick_el.find('intervalStats').find('duration').text) 441 | 442 | profile_brick = { 443 | 'brick_name': brick_name, 444 | 'cumulative_block_stats': cumulative_block_stats, 445 | 'cumulative_fop_stats': cumulative_fop_stats, 446 | 'cumulative_total_read_bytes': cumulative_total_read_bytes, 447 | 'cumulative_total_write_bytes': cumulative_total_write_bytes, 448 | 'cumulative_total_duration': cumulative_total_duration, 449 | 'interval_block_stats': interval_block_stats, 450 | 'interval_fop_stats': interval_fop_stats, 451 | 'interval_total_read_bytes': interval_total_read_bytes, 452 | 'interval_total_write_bytes': interval_total_write_bytes, 453 | 'interval_total_duration': interval_total_duration, 454 | } 455 | 456 | return profile_brick 457 | 458 | 459 | def _parse_profile_info(volume_el): 460 | profile = { 461 | 'volname': volume_el.find('volname').text, 462 | 'bricks': [] 463 | } 464 | 465 | for brick_el in volume_el.findall('brick'): 466 | profile['bricks'].append(_parse_profile_bricks(brick_el)) 467 | 468 | return profile 469 | 470 | 471 | def parse_volume_profile_info(info, opt): 472 | xml = etree.fromstring(info) 473 | profiles = [] 474 | for prof_el in xml.findall('volProfile'): 475 | try: 476 | if opt == "clear": 477 | profiles.append(_parse_profile_info_clear(prof_el)) 478 | else: 479 | profiles.append(_parse_profile_info(prof_el)) 480 | 481 | except (ParseError, AttributeError, ValueError) as err: 482 | raise GlusterCmdOutputParseError(err) 483 | 484 | return profiles 485 | 486 | 487 | def parse_volume_options(data): 488 | raise NotImplementedError("Volume Options") 489 | 490 | 491 | def parse_georep_status(data, volinfo): 492 | """ 493 | Merge Geo-rep status and Volume Info to get Offline Status 494 | and to sort the status in the same order as of Volume Info 495 | """ 496 | session_keys = set() 497 | gstatus = {} 498 | 499 | try: 500 | tree = etree.fromstring(data) 501 | # Get All Sessions 502 | for volume_el in tree.findall("geoRep/volume"): 503 | sessions_el = volume_el.find("sessions") 504 | # Primary Volume name if multiple Volumes 505 | pvol = volume_el.find("name").text 506 | 507 | # For each session, collect the details 508 | for session in sessions_el.findall("session"): 509 | session_secondary = "{0}:{1}".format(pvol, session.find( 510 | "session_secondary").text) 511 | session_keys.add(session_secondary) 512 | gstatus[session_secondary] = {} 513 | 514 | for pair in session.findall('pair'): 515 | primary_brick = "{0}:{1}".format( 516 | pair.find("primary_node").text, 517 | pair.find("primary_brick").text 518 | ) 519 | 520 | gstatus[session_secondary][primary_brick] = { 521 | "primary_volume": pvol, 522 | "secondary_volume": pair.find("secondary").text.split("::")[-1], 523 | "primary_node": pair.find("primary_node").text, 524 | "primary_brick": pair.find("primary_brick").text, 525 | "secondary_user": pair.find("secondary_user").text, 526 | "secondary": pair.find("secondary").text, 527 | "secondary_node": pair.find("secondary_node").text, 528 | "status": pair.find("status").text, 529 | "crawl_status": pair.find("crawl_status").text, 530 | "entry": pair.find("entry").text, 531 | "data": pair.find("data").text, 532 | "meta": pair.find("meta").text, 533 | "failures": pair.find("failures").text, 534 | "checkpoint_completed": pair.find( 535 | "checkpoint_completed").text, 536 | "primary_node_uuid": pair.find("primary_node_uuid").text, 537 | "last_synced": pair.find("last_synced").text, 538 | "checkpoint_time": pair.find("checkpoint_time").text, 539 | "checkpoint_completion_time": 540 | pair.find("checkpoint_completion_time").text 541 | } 542 | except (ParseError, AttributeError, ValueError) as err: 543 | raise GlusterCmdOutputParseError(err) 544 | 545 | # Get List of Bricks for each Volume 546 | all_bricks = {} 547 | for vol in volinfo: 548 | all_bricks[vol["name"]] = vol["bricks"] 549 | 550 | # For Each session Get Bricks info for the Volume and Populate 551 | # Geo-rep status for that Brick 552 | out = [] 553 | for session in session_keys: 554 | pvol, _, secondary = session.split(":", 2) 555 | secondary = secondary.replace("ssh://", "") 556 | primary_bricks = all_bricks[pvol] 557 | out.append([]) 558 | for brick in primary_bricks: 559 | bname = brick["name"] 560 | if gstatus.get(session) and gstatus[session].get(bname, None): 561 | out[-1].append(gstatus[session][bname]) 562 | else: 563 | # Offline Status 564 | node, brick_path = bname.split(":") 565 | if "@" not in secondary: 566 | secondary_user = "root" 567 | else: 568 | secondary_user, _ = secondary.split("@") 569 | 570 | out[-1].append({ 571 | "primary_volume": pvol, 572 | "secondary_volume": secondary.split("::")[-1], 573 | "primary_node": node, 574 | "primary_brick": brick_path, 575 | "secondary_user": secondary_user, 576 | "secondary": secondary, 577 | "secondary_node": "N/A", 578 | "status": "Offline", 579 | "crawl_status": "N/A", 580 | "entry": "N/A", 581 | "data": "N/A", 582 | "meta": "N/A", 583 | "failures": "N/A", 584 | "checkpoint_completed": "N/A", 585 | "primary_node_uuid": brick["uuid"], 586 | "last_synced": "N/A", 587 | "checkpoint_time": "N/A", 588 | "checkpoint_completion_time": "N/A" 589 | }) 590 | return out 591 | 592 | 593 | def parse_bitrot_scrub_status(data): 594 | raise NotImplementedError("Bitrot Scrub Status") 595 | 596 | 597 | def parse_rebalance_status(data): 598 | status = etree.fromstring(data).find('volRebalance') 599 | result = {} 600 | try: 601 | result['task_id'] = status.find('task-id').text 602 | result['op_type'] = ROT(int(status.find('op').text)).name 603 | result['node_count'] = int(status.find('nodeCount').text) 604 | 605 | # individual node section 606 | result['nodes'] = [] 607 | for i in status: 608 | if i.tag != 'node': 609 | continue 610 | else: 611 | result['nodes'].append({ 612 | 'node_name': i.find('nodeName').text, 613 | 'id': i.find('id').text, 614 | 'files': i.find('files').text, 615 | 'size': i.find('size').text, 616 | 'lookups': i.find('lookups').text, 617 | 'failures': i.find('failures').text, 618 | 'skipped': i.find('skipped').text, 619 | 'status': i.find('statusStr').text, 620 | 'runtime': i.find('runtime').text, 621 | }) 622 | 623 | # aggregate section 624 | result['aggregate'] = { 625 | 'files': status.find('aggregate/files').text, 626 | 'size': status.find('aggregate/size').text, 627 | 'lookups': status.find('aggregate/lookups').text, 628 | 'failures': status.find('aggregate/failures').text, 629 | 'skipped': status.find('aggregate/skipped').text, 630 | 'status': status.find('aggregate/statusStr').text, 631 | 'runtime': status.find('aggregate/runtime').text, 632 | } 633 | except (ParseError, AttributeError, ValueError) as err: 634 | raise GlusterCmdOutputParseError(err) 635 | 636 | return result 637 | 638 | 639 | def parse_quota_list_paths(quotainfo): 640 | volquota = etree.fromstring(quotainfo) 641 | quota_list = [] 642 | 643 | for limit in volquota.findall('volQuota/limit'): 644 | quota_list.append({ 645 | 'path': limit.find('path').text, 646 | 'hard_limit': limit.find('hard_limit').text, 647 | 'soft_limit_percent': limit.find('soft_limit_percent').text, 648 | 'used_space': limit.find('used_space').text, 649 | 'avail_space': limit.find('avail_space').text, 650 | 'sl_exceeded': limit.find('sl_exceeded').text, 651 | 'hl_exceeded': limit.find('hl_exceeded').text 652 | }) 653 | return quota_list 654 | 655 | 656 | def parse_quota_list_objects(data): 657 | raise NotImplementedError("Quota List Objects") 658 | 659 | 660 | def parse_georep_config(data): 661 | raise NotImplementedError("Georep Config") 662 | 663 | 664 | def parse_remove_brick_status(status): 665 | tree = etree.fromstring(status) 666 | 667 | result = {'nodes': [], 'aggregate': _parse_remove_aggregate( 668 | tree.find('volRemoveBrick/aggregate'))} 669 | 670 | for node_el in tree.findall('volRemoveBrick/node'): 671 | result['nodes'].append(_parse_remove_node(node_el)) 672 | 673 | return result 674 | 675 | 676 | def _parse_remove_node(node_el): 677 | value = { 678 | 'name': node_el.find('nodeName').text, 679 | 'id': node_el.find('id').text, 680 | 'files': node_el.find('files').text, 681 | 'size': node_el.find('size').text, 682 | 'lookups': node_el.find('lookups').text, 683 | 'failures': node_el.find('failures').text, 684 | 'skipped': node_el.find('skipped').text, 685 | 'status_code': node_el.find('status').text, 686 | 'status': node_el.find('statusStr').text, 687 | 'runtime': node_el.find('runtime').text 688 | } 689 | 690 | return value 691 | 692 | 693 | def _parse_remove_aggregate(aggregate_el): 694 | value = { 695 | 'files': aggregate_el.find('files').text, 696 | 'size': aggregate_el.find('size').text, 697 | 'lookups': aggregate_el.find('lookups').text, 698 | 'failures': aggregate_el.find('failures').text, 699 | 'skipped': aggregate_el.find('skipped').text, 700 | 'status_code': aggregate_el.find('status').text, 701 | 'status': aggregate_el.find('statusStr').text, 702 | 'runtime': aggregate_el.find('runtime').text 703 | } 704 | 705 | return value 706 | 707 | 708 | def parse_tier_detach(data): 709 | raise NotImplementedError("Tier detach Status") 710 | 711 | 712 | def parse_tier_status(data): 713 | raise NotImplementedError("Tier Status") 714 | 715 | 716 | def parse_volume_list(data): 717 | xml = etree.fromstring(data) 718 | volumes = [] 719 | for volume_el in xml.findall('volList/volume'): 720 | volumes.append(volume_el.text) 721 | return volumes 722 | 723 | 724 | def parse_heal_info(data): 725 | xml = etree.fromstring(data) 726 | healinfo = [] 727 | for brick_el in xml.findall('healInfo/bricks/brick'): 728 | healinfo.append({ 729 | 'name': brick_el.find('name').text, 730 | 'status': brick_el.find('status').text, 731 | 'host_uuid': brick_el.attrib['hostUuid'], 732 | 'nr_entries': brick_el.find('numberOfEntries').text 733 | }) 734 | return healinfo 735 | 736 | 737 | def parse_heal_statistics(data): 738 | raise NotImplementedError("Heal Statistics") 739 | 740 | 741 | def parse_snapshot_status(data): 742 | raise NotImplementedError("Snapshot Status") 743 | 744 | 745 | def parse_snapshot_info(data): 746 | xml = etree.fromstring(data) 747 | snapinfo = [] 748 | for snap_el in xml.findall('snapInfo/snapshots/snapshot'): 749 | snapdata = { 750 | 'name': snap_el.find('name').text, 751 | 'create_time': snap_el.find('createTime').text, 752 | 'uuid': snap_el.find('uuid').text, 753 | 'status': snap_el.find('snapVolume/status').text, 754 | } 755 | if snap_el.find('snapVolume/originVolume'): 756 | snapdata['origin_volume'] = \ 757 | snap_el.find('snapVolume/originVolume/name').text 758 | snapinfo.append(snapdata) 759 | return snapinfo 760 | 761 | 762 | def parse_snapshot_list(data): 763 | xml = etree.fromstring(data) 764 | snapshots = [] 765 | for snap_el in xml.findall('snapList/snapshot'): 766 | snapshots.append(snap_el.text) 767 | return snapshots 768 | 769 | 770 | def _parse_a_peer(peer): 771 | value = { 772 | 'uuid': peer.find('uuid').text, 773 | 'hostname': peer.find('hostname').text, 774 | 'connected': peer.find('connected').text 775 | } 776 | 777 | if value['connected'] == '0': 778 | value['connected'] = "Disconnected" 779 | elif value['connected'] == '1': 780 | value['connected'] = "Connected" 781 | 782 | return value 783 | 784 | 785 | def parse_peer_status(data): 786 | tree = etree.fromstring(data) 787 | peers = [] 788 | for peer_el in tree.findall('peerStatus/peer'): 789 | try: 790 | peers.append(_parse_a_peer(peer_el)) 791 | except (ParseError, AttributeError, ValueError) as err: 792 | raise GlusterCmdOutputParseError(err) 793 | 794 | return peers 795 | 796 | 797 | def parse_pool_list(data): 798 | tree = etree.fromstring(data) 799 | pools = [] 800 | for peer_el in tree.findall('peerStatus/peer'): 801 | try: 802 | pools.append(_parse_a_peer(peer_el)) 803 | except (ParseError, AttributeError, ValueError) as err: 804 | raise GlusterCmdOutputParseError(err) 805 | 806 | return pools 807 | -------------------------------------------------------------------------------- /glustercli/cli/peer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import peer_execute, peer_execute_xml, \ 4 | gluster_execute_xml, GlusterCmdException 5 | from glustercli.cli.parsers import parse_peer_status, parse_pool_list 6 | 7 | 8 | def probe(host): 9 | """ 10 | Add Host to Cluster 11 | 12 | :param host: Hostname or IP 13 | :returns: Output of peer probe command, raises 14 | GlusterCmdException((rc, out, err)) on error 15 | """ 16 | cmd = ["probe", host] 17 | return peer_execute(cmd) 18 | 19 | 20 | def attach(host): 21 | """ 22 | Add Host to Cluster, alias for probe 23 | 24 | :param host: Hostname or IP 25 | :returns: Output of peer probe command, raises 26 | GlusterCmdException((rc, out, err)) on error 27 | """ 28 | return probe(host) 29 | 30 | 31 | def detach(host): 32 | """ 33 | Remove Host from Cluster 34 | 35 | :param host: Hostname or IP 36 | :returns: Output of peer detach command, raises 37 | GlusterCmdException((rc, out, err)) on error 38 | """ 39 | cmd = ["detach", host] 40 | return peer_execute(cmd) 41 | 42 | 43 | def detach_all(): 44 | """ 45 | Removes All Hosts from Cluster 46 | 47 | :returns: Output of peer detach command, raises 48 | GlusterCmdException((rc, out, err)) on error 49 | """ 50 | peers = parse_peer_status(peer_execute_xml(["status"])) 51 | errors_list = [] 52 | outlist = [] 53 | if peers: 54 | for peer in peers: 55 | host = peer["hostname"] 56 | if peer["connected"] == "Connected": 57 | cmd = ["detach", host] 58 | try: 59 | result = peer_execute(cmd) 60 | out = host + " " + result 61 | outlist.append(out) 62 | except Exception as err: 63 | errors_list.append(err) 64 | else: 65 | err = host + " is not connected" 66 | errors_list.append(err) 67 | if errors_list: 68 | raise GlusterCmdException((1, "", errors_list)) 69 | return "\n".join(outlist) 70 | 71 | 72 | def status(): 73 | """ 74 | Peer Status of Cluster 75 | 76 | :returns: Output of peer status command, raises 77 | GlusterCmdException((rc, out, err)) on error 78 | """ 79 | cmd = ["status"] 80 | return parse_peer_status(peer_execute_xml(cmd)) 81 | 82 | 83 | def pool(): 84 | """ 85 | Cluster Pool Status 86 | 87 | :returns: Pool list and status, raises 88 | GlusterCmdException((rc, out, err)) on error 89 | """ 90 | cmd = ["pool", "list"] 91 | return parse_pool_list(gluster_execute_xml(cmd)) 92 | -------------------------------------------------------------------------------- /glustercli/cli/quota.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import quota_execute, quota_execute_xml, \ 4 | volume_execute 5 | from glustercli.cli.parsers import parse_quota_list_paths, \ 6 | parse_quota_list_objects 7 | 8 | 9 | def inode_quota_enable(volname): 10 | """ 11 | Enable Inode Quota 12 | 13 | :param volname: Volume Name 14 | :returns: Output of inode-quota Enable command, raises 15 | GlusterCmdException((rc, out, err)) on error 16 | """ 17 | cmd = ["inode-quota", volname, "enable"] 18 | return volume_execute(cmd) 19 | 20 | 21 | def enable(volname): 22 | """ 23 | Enable Quota 24 | 25 | :param volname: Volume Name 26 | :returns: Output of quota Enable command, raises 27 | GlusterCmdException((rc, out, err)) on error 28 | """ 29 | cmd = [volname, "enable"] 30 | return quota_execute(cmd) 31 | 32 | 33 | def disable(volname): 34 | """ 35 | Disable Inode Quota 36 | 37 | :param volname: Volume Name 38 | :returns: Output of quota Disable command, raises 39 | GlusterCmdException((rc, out, err)) on error 40 | """ 41 | cmd = [volname, "disable"] 42 | return quota_execute(cmd) 43 | 44 | 45 | # noqa # pylint: disable=dangerous-default-value 46 | def list_paths(volname, paths=[]): 47 | """ 48 | Get Quota List 49 | 50 | :param volname: Volume Name 51 | :param paths: Optional list of paths 52 | :returns: Quota list of paths, raises 53 | GlusterCmdException((rc, out, err)) on error 54 | """ 55 | cmd = [volname, "list"] + paths 56 | return parse_quota_list_paths(quota_execute_xml(cmd)) 57 | 58 | 59 | # noqa # pylint: disable=dangerous-default-value 60 | def list_objects(volname, paths=[]): 61 | """ 62 | Get Quota Objects List 63 | 64 | :param volname: Volume Name 65 | :param paths: Optional list of paths 66 | :returns: Quota list of objects, raises 67 | GlusterCmdException((rc, out, err)) on error 68 | """ 69 | cmd = [volname, "list"] + paths 70 | return parse_quota_list_objects(quota_execute_xml(cmd)) 71 | 72 | 73 | def remove_path(volname, path): 74 | """ 75 | Remove Path from Quota list 76 | 77 | :param volname: Volume Name 78 | :param path: Path to remove from quota 79 | :returns: Output of Quota remove-path, raises 80 | GlusterCmdException((rc, out, err)) on error 81 | """ 82 | cmd = [volname, "remove-path", path] 83 | return quota_execute(cmd) 84 | 85 | 86 | def remove_objects(volname, path): 87 | """ 88 | Remove Objects for a given path 89 | 90 | :param volname: Volume Name 91 | :param path: Path to remove from quota 92 | :returns: Output of Quota remove-objects, raises 93 | GlusterCmdException((rc, out, err)) on error 94 | """ 95 | cmd = [volname, "remove-objects", path] 96 | return quota_execute(cmd) 97 | 98 | 99 | def default_soft_limit(volname, percent): 100 | """ 101 | Set default soft limit 102 | 103 | :param volname: Volume Name 104 | :param percent: Percent of soft limit 105 | :returns: Output of the command, raises 106 | GlusterCmdException((rc, out, err)) on error 107 | """ 108 | cmd = [volname, "default-soft-limit", "{0}".format(percent)] 109 | return quota_execute(cmd) 110 | 111 | 112 | def limit_usage(volname, path, size, percent=None): 113 | """ 114 | Limit quota usage 115 | 116 | :param volname: Volume Name 117 | :param path: Path to limit quota 118 | :param size: Limit Size 119 | :param percent: Percentage 120 | :returns: Output of the command, raises 121 | GlusterCmdException((rc, out, err)) on error 122 | """ 123 | cmd = [volname, "limit-usage", path, "{0}".format(size)] 124 | if percent is not None: 125 | cmd += ["{0}".format(percent)] 126 | return quota_execute(cmd) 127 | 128 | 129 | def limit_objects(volname, path, num, percent=None): 130 | """ 131 | Limit objects 132 | 133 | :param volname: Volume Name 134 | :param path: Path to limit quota 135 | :param num: Limit Number 136 | :param percent: Percentage 137 | :returns: Output of the command, raises 138 | GlusterCmdException((rc, out, err)) on error 139 | """ 140 | cmd = [volname, "limit-objects", path, "{0}".format(num)] 141 | if percent is not None: 142 | cmd += ["{0}".format(percent)] 143 | return quota_execute(cmd) 144 | 145 | 146 | def alert_time(volname, a_time): 147 | """ 148 | Set Alert Time 149 | 150 | :param volname: Volume Name 151 | :param alert_time: Alert Time Value 152 | :returns: Output of the command, raises 153 | GlusterCmdException((rc, out, err)) on error 154 | """ 155 | cmd = [volname, "alert-time", "{0}".format(a_time)] 156 | return quota_execute(cmd) 157 | 158 | 159 | def soft_timeout(volname, timeout): 160 | """ 161 | Set Soft Timeout 162 | 163 | :param volname: Volume Name 164 | :param timeout: Timeout Value 165 | :returns: Output of the command, raises 166 | GlusterCmdException((rc, out, err)) on error 167 | """ 168 | cmd = [volname, "soft-timeout", "{0}".format(timeout)] 169 | return quota_execute(cmd) 170 | 171 | 172 | def hard_timeout(volname, timeout): 173 | """ 174 | Set Hard Timeout 175 | 176 | :param volname: Volume Name 177 | :param timeout: Timeout Value 178 | :returns: Output of the command, raises 179 | GlusterCmdException((rc, out, err)) on error 180 | """ 181 | cmd = [volname, "hard-timeout", "{0}".format(timeout)] 182 | return quota_execute(cmd) 183 | -------------------------------------------------------------------------------- /glustercli/cli/rebalance.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import volume_execute, volume_execute_xml 4 | from glustercli.cli.parsers import parse_rebalance_status 5 | 6 | 7 | def fix_layout_start(volname): 8 | """ 9 | Fix Layout Rebalance Start 10 | 11 | :param volname: Volume Name 12 | :returns: Output of the command, raises 13 | GlusterCmdException((rc, out, err)) on error 14 | """ 15 | cmd = ["rebalance", volname, "fix-layout", "start"] 16 | return volume_execute(cmd) 17 | 18 | 19 | def start(volname, force=False): 20 | """ 21 | Rebalance Start 22 | 23 | :param volname: Volume Name 24 | :param force: True|False Force start the rebalance 25 | :returns: Output of the command, raises 26 | GlusterCmdException((rc, out, err)) on error 27 | """ 28 | cmd = ["rebalance", volname, "start"] 29 | if force: 30 | cmd += ["force"] 31 | return volume_execute(cmd) 32 | 33 | 34 | def stop(volname): 35 | """ 36 | Rebalance Stop 37 | 38 | :param volname: Volume Name 39 | :returns: Output of the command, raises 40 | GlusterCmdException((rc, out, err)) on error 41 | """ 42 | cmd = ["rebalance", volname, "stop"] 43 | return volume_execute(cmd) 44 | 45 | 46 | def status(volname): 47 | """ 48 | Rebalance Status 49 | 50 | :param volname: Volume Name 51 | :returns: Rebalance Status, raises 52 | GlusterCmdException((rc, out, err)) on error 53 | """ 54 | cmd = ["rebalance", volname, "status"] 55 | return parse_rebalance_status(volume_execute_xml(cmd)) 56 | -------------------------------------------------------------------------------- /glustercli/cli/snapshot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import snapshot_execute, snapshot_execute_xml 4 | from glustercli.cli.parsers import (parse_snapshot_status, 5 | parse_snapshot_info, 6 | parse_snapshot_list) 7 | 8 | 9 | def activate(snapname, force=False): 10 | """ 11 | Activate Snapshot 12 | 13 | :param snapname: Snapshot Name 14 | :param force: True|False Force Activate the snapshot 15 | :returns: Output of the command, raises 16 | GlusterCmdException((rc, out, err)) on error 17 | """ 18 | cmd = ["activate", snapname] 19 | 20 | if force: 21 | cmd += ["force"] 22 | 23 | return snapshot_execute(cmd) 24 | 25 | 26 | def clone(clonename, snapname): 27 | """ 28 | Clone the Snapshot 29 | 30 | :param clonename: Snapshot Clone Name 31 | :param snapname: Snapshot Name 32 | :returns: Output of the command, raises 33 | GlusterCmdException((rc, out, err)) on error 34 | """ 35 | cmd = ["clone", clonename, snapname] 36 | 37 | return snapshot_execute(cmd) 38 | 39 | 40 | def create(volname, snapname, no_timestamp=False, description="", force=False): 41 | """ 42 | Create Snapshot 43 | 44 | :param volname: Volume Name 45 | :param snapname: Snapshot Name 46 | :param no_timestamp: True|False Do not add Timestamp to name 47 | :param description: Description for Created Snapshot 48 | :param force: True|False Force Create the snapshot 49 | :returns: Output of the command, raises 50 | GlusterCmdException((rc, out, err)) on error 51 | """ 52 | cmd = ["create", snapname, volname] 53 | 54 | if no_timestamp: 55 | cmd += ["no-timestamp"] 56 | 57 | if description: 58 | cmd += ["description", description] 59 | 60 | if force: 61 | cmd += ["force"] 62 | 63 | return snapshot_execute(cmd) 64 | 65 | 66 | def deactivate(snapname): 67 | """ 68 | Deactivate the Snapshot 69 | 70 | :param snapname: Snapshot Name 71 | :returns: Output of the command, raises 72 | GlusterCmdException((rc, out, err)) on error 73 | """ 74 | cmd = ["deactivate", snapname] 75 | 76 | return snapshot_execute(cmd) 77 | 78 | 79 | def delete(snapname=None, volname=None): 80 | """ 81 | Delete Snapshot 82 | 83 | :param snapname: Snapshot Name 84 | :param volname: Volume Name 85 | :returns: Output of the command, raises 86 | GlusterCmdException((rc, out, err)) on error 87 | """ 88 | cmd = ["delete"] 89 | if snapname is not None: 90 | cmd += [snapname] 91 | 92 | if volname is not None and snapname is None: 93 | cmd += ["volume", volname] 94 | 95 | return snapshot_execute(cmd) 96 | 97 | 98 | def info(snapname=None, volname=None): 99 | """ 100 | Snapshot Info 101 | 102 | :param snapname: Snapshot Name 103 | :param volname: Volume Name 104 | :returns: Snapshot Info, raises 105 | GlusterCmdException((rc, out, err)) on error 106 | """ 107 | cmd = ["info"] 108 | if snapname is not None: 109 | cmd += [snapname] 110 | 111 | if volname is not None and snapname is None: 112 | cmd += ["volume", volname] 113 | 114 | return parse_snapshot_info(snapshot_execute_xml(cmd)) 115 | 116 | 117 | def snaplist(volname=None): 118 | """ 119 | List of Snapshots 120 | 121 | :param volname: Volume Name 122 | :returns: Output of the command, raises 123 | GlusterCmdException((rc, out, err)) on error 124 | """ 125 | cmd = ["list"] 126 | 127 | if volname is not None: 128 | cmd += [volname] 129 | 130 | return parse_snapshot_list(snapshot_execute_xml(cmd)) 131 | 132 | 133 | def restore(snapname): 134 | """ 135 | Restore Snapshot 136 | 137 | :param snapname: Snapshot Name 138 | :returns: Output of the command, raises 139 | GlusterCmdException((rc, out, err)) on error 140 | """ 141 | cmd = ["restore", snapname] 142 | return snapshot_execute(cmd) 143 | 144 | 145 | def status(snapname=None, volname=None): 146 | """ 147 | Snapshot Status 148 | 149 | :param snapname: Snapshot Name 150 | :param volname: Volume Name 151 | :returns: Output of the command, raises 152 | GlusterCmdException((rc, out, err)) on error 153 | """ 154 | cmd = ["status"] 155 | if snapname is not None: 156 | cmd += [snapname] 157 | 158 | if volname is not None and snapname is None: 159 | cmd += ["volume", volname] 160 | 161 | return parse_snapshot_status(snapshot_execute_xml(cmd)) 162 | 163 | 164 | def config(volname, snap_max_hard_limit=None, 165 | snap_max_soft_limit=None, auto_delete=None, 166 | activate_on_create=None): 167 | """ 168 | Set Snapshot Config 169 | 170 | :param volname: Volume Name 171 | :param snap_max_hard_limit: Number of Snapshots hard limit 172 | :param snap_max_soft_limit: Number of Snapshots soft limit 173 | :param auto_delete: True|False Auto delete old snapshots 174 | :param activate_on_create: True|False Activate Snapshot after Create 175 | :returns: Output of the command, raises 176 | GlusterCmdException((rc, out, err)) on error 177 | """ 178 | cmd = ["config", volname] 179 | 180 | if snap_max_hard_limit is not None: 181 | cmd += ["snap-max-hard-limit", "{0}".format(snap_max_hard_limit)] 182 | 183 | if snap_max_soft_limit is not None: 184 | cmd += ["snap-max-soft-limit", "{0}".format(snap_max_soft_limit)] 185 | 186 | if auto_delete is not None: 187 | auto_delete_arg = "enable" if auto_delete else "disable" 188 | cmd += ["snap-max-hard-limit", auto_delete_arg] 189 | 190 | if activate_on_create is not None: 191 | activate_arg = "enable" if activate_on_create else "disable" 192 | cmd += ["snap-max-hard-limit", activate_arg] 193 | 194 | return snapshot_execute(cmd) 195 | -------------------------------------------------------------------------------- /glustercli/cli/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import subprocess 4 | import xml.etree.cElementTree as ET 5 | from contextlib import contextmanager 6 | from enum import IntEnum 7 | 8 | GLUSTERCMD = "gluster" 9 | GLUSTERD_SOCKET = None 10 | ssh = None 11 | SSH_HOST = None 12 | SSH_PEM_FILE = None 13 | prev_ssh_host = None 14 | prev_ssh_pem_file = None 15 | 16 | 17 | @contextmanager 18 | def ssh_connection(hostname, pem_file): 19 | global SSH_HOST, SSH_PEM_FILE 20 | SSH_HOST = hostname 21 | SSH_PEM_FILE = pem_file 22 | yield 23 | SSH_HOST = None 24 | SSH_PEM_FILE = None 25 | 26 | 27 | def execute(cmd): 28 | global prev_ssh_host, prev_ssh_pem_file 29 | 30 | cmd_args = [] 31 | cmd_args.append(GLUSTERCMD) 32 | 33 | if GLUSTERD_SOCKET: 34 | cmd_args.append("--glusterd-sock={0}".format(GLUSTERD_SOCKET)) 35 | 36 | cmd_args.append("--mode=script") 37 | cmd_args += cmd 38 | 39 | if SSH_HOST is not None and SSH_PEM_FILE is not None: 40 | # Reconnect only if first time or previously connected to different 41 | # host or using different pem key 42 | if ssh is None or prev_ssh_host != SSH_HOST \ 43 | or prev_ssh_pem_file != SSH_PEM_FILE: 44 | __connect_ssh() 45 | prev_ssh_host = SSH_HOST 46 | prev_ssh_pem_file = SSH_PEM_FILE 47 | 48 | cmd_args = " ".join(cmd_args) 49 | _, stdout, stderr = ssh.exec_command(cmd_args) 50 | returncode = stdout.channel.recv_exit_status() 51 | return (returncode, stdout.read().strip(), stderr.read().strip()) 52 | 53 | proc = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, 54 | stderr=subprocess.PIPE, 55 | universal_newlines=True) 56 | out, err = proc.communicate() 57 | return (proc.returncode, out, err) 58 | 59 | 60 | class RebalanceOperationType(IntEnum): 61 | """ 62 | from rpc/xdr/src/cli1-xdr.x 63 | 64 | Represents the type of rebalance cmds 65 | that can be issued towards a gluster 66 | volume. 67 | (i.e. volume rebalance fix-layout start) 68 | """ 69 | NONE = 0 70 | START = 1 71 | STOP = 2 72 | STATUS = 3 73 | START_LAYOUT_FIX = 4 74 | START_FORCE = 5 75 | START_TIER = 6 76 | STATUS_TIER = 7 77 | START_DETACH_TIER = 8 78 | STOP_DETACH_TIER = 9 79 | PAUSE_TIER = 10 80 | RESUME_TIER = 11 81 | DETACH_STATUS = 12 82 | STOP_TIER = 13 83 | DETACH_START = 14 84 | DETACH_COMMIT = 15 85 | DETACH_COMMIT_FORCE = 16 86 | DETACH_STOP = 17 87 | TYPE_MAX = 18 # unused 88 | 89 | 90 | class GlusterCmdException(Exception): 91 | pass 92 | 93 | 94 | def set_ssh_pem_file(pem_file): 95 | global USE_SSH, SSH_PEM_FILE 96 | USE_SSH = True 97 | SSH_PEM_FILE = pem_file 98 | 99 | 100 | def set_ssh_host(hostname): 101 | global SSH_HOST 102 | SSH_HOST = hostname 103 | 104 | 105 | def __connect_ssh(): 106 | global ssh 107 | 108 | import paramiko # noqa # pylint: disable=import-outside-toplevel 109 | 110 | if ssh is None: 111 | ssh = paramiko.SSHClient() 112 | try: 113 | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 114 | ssh.connect(SSH_HOST, username="root", key_filename=SSH_PEM_FILE) 115 | except paramiko.ssh_exception.AuthenticationException as err: 116 | raise GlusterCmdException("Unable to establish SSH connection " 117 | "to root@{0}:\n{1}".format( 118 | SSH_HOST, err)) 119 | 120 | 121 | def set_gluster_path(path): 122 | global GLUSTERCMD 123 | GLUSTERCMD = path 124 | 125 | 126 | def set_gluster_socket(path): 127 | global GLUSTERD_SOCKET 128 | GLUSTERD_SOCKET = path 129 | 130 | 131 | def check_for_xml_errors(data): 132 | stdout = data[0] 133 | stderr = data[1] 134 | 135 | # depending on the gluster sub-command that's run 136 | # it can have a returncode of 0 (meaning success) 137 | # however this could mean that the formatting of 138 | # the xml was successful and not the command that 139 | # was run. We need to check stdout and/or stderr 140 | # for the `opRet` xml element and if it's -1, then 141 | # format the error accordingly and raise 142 | # GlusterCmdException 143 | 144 | # for reasons unknown, some commands will fail and 145 | # return to stdout instead of stderr and vice versa 146 | error = stdout if stdout else stderr or None 147 | if error is not None: 148 | try: 149 | error = ET.fromstring(error) 150 | except ET.ParseError: 151 | # means parsing xml data failed 152 | # so play it safe and ignore 153 | return 154 | 155 | op_ret = error.find('opRet').text or None 156 | op_err = error.find('opErrstr').text or None 157 | if op_ret == '-1': 158 | if op_err is None: 159 | # means command failed but no error 160 | # string so make up one 161 | op_err = 'FAILED' 162 | raise GlusterCmdException((int(op_ret), '', op_err)) 163 | 164 | 165 | def execute_or_raise(cmd): 166 | returncode, out, err = execute(cmd) 167 | if returncode != 0: 168 | raise GlusterCmdException((returncode, out, err)) 169 | 170 | check_for_xml_errors((out, err)) 171 | 172 | return out.strip() 173 | 174 | 175 | def gluster_system_execute(cmd): 176 | cmd.insert(0, "system::") 177 | cmd.insert(1, "execute") 178 | return execute_or_raise(cmd) 179 | 180 | 181 | def gluster_execute(cmd): 182 | return execute_or_raise(cmd) 183 | 184 | 185 | def gluster_execute_xml(cmd): 186 | cmd.append("--xml") 187 | return execute_or_raise(cmd) 188 | 189 | 190 | def volume_execute(cmd): 191 | cmd.insert(0, "volume") 192 | return execute_or_raise(cmd) 193 | 194 | 195 | def peer_execute(cmd): 196 | cmd.insert(0, "peer") 197 | return execute_or_raise(cmd) 198 | 199 | 200 | def volume_execute_xml(cmd): 201 | cmd.insert(0, "volume") 202 | return gluster_execute_xml(cmd) 203 | 204 | 205 | def peer_execute_xml(cmd): 206 | cmd.insert(0, "peer") 207 | return gluster_execute_xml(cmd) 208 | 209 | 210 | def georep_execute(cmd): 211 | cmd.insert(0, "volume") 212 | cmd.insert(1, "geo-replication") 213 | return execute_or_raise(cmd) 214 | 215 | 216 | def georep_execute_xml(cmd): 217 | cmd.insert(0, "volume") 218 | cmd.insert(1, "geo-replication") 219 | return gluster_execute_xml(cmd) 220 | 221 | 222 | def bitrot_execute(cmd): 223 | cmd.insert(0, "volume") 224 | cmd.insert(1, "bitrot") 225 | return execute_or_raise(cmd) 226 | 227 | 228 | def bitrot_execute_xml(cmd): 229 | cmd.insert(0, "volume") 230 | cmd.insert(1, "bitrot") 231 | return gluster_execute_xml(cmd) 232 | 233 | 234 | def quota_execute(cmd): 235 | cmd.insert(0, "volume") 236 | cmd.insert(1, "quota") 237 | return execute_or_raise(cmd) 238 | 239 | 240 | def quota_execute_xml(cmd): 241 | cmd.insert(0, "volume") 242 | cmd.insert(1, "quota") 243 | return gluster_execute_xml(cmd) 244 | 245 | 246 | def heal_execute(cmd): 247 | cmd.insert(0, "volume") 248 | cmd.insert(1, "heal") 249 | return execute_or_raise(cmd) 250 | 251 | 252 | def heal_execute_xml(cmd): 253 | cmd.insert(0, "volume") 254 | cmd.insert(1, "heal") 255 | return gluster_execute_xml(cmd) 256 | 257 | 258 | def snapshot_execute(cmd): 259 | cmd.insert(0, "snapshot") 260 | return execute_or_raise(cmd) 261 | 262 | 263 | def snapshot_execute_xml(cmd): 264 | cmd.insert(0, "snapshot") 265 | return gluster_execute_xml(cmd) 266 | 267 | 268 | def tier_execute(cmd): 269 | cmd.insert(0, "volume") 270 | cmd.insert(1, "tier") 271 | return execute_or_raise(cmd) 272 | 273 | 274 | def tier_execute_xml(cmd): 275 | cmd.insert(0, "volume") 276 | cmd.insert(1, "tier") 277 | return gluster_execute_xml(cmd) 278 | -------------------------------------------------------------------------------- /glustercli/cli/volume.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.cli.utils import volume_execute, volume_execute_xml, \ 4 | GlusterCmdException 5 | from glustercli.cli.parsers import (parse_volume_info, 6 | parse_volume_status, 7 | parse_volume_options, 8 | parse_volume_list, 9 | parse_volume_profile_info) 10 | 11 | # Following import are not used in this file, but imported to make 12 | # it available via volume.(noqa to ignore in pep8 check) 13 | from glustercli.cli import bitrot # noqa # pylint: disable=unused-import 14 | from glustercli.cli import bricks # noqa # pylint: disable=unused-import 15 | from glustercli.cli import heal # noqa # pylint: disable=unused-import 16 | from glustercli.cli import quota # noqa # pylint: disable=unused-import 17 | from glustercli.cli import rebalance # noqa # pylint: disable=unused-import 18 | 19 | 20 | LOCK_KINDS = ["blocked", "granted", "all"] 21 | INFO_OPS = ["peek", "incremental", "cumulative", "clear"] 22 | 23 | 24 | def start(volname, force=False): 25 | """ 26 | Start Gluster Volume 27 | 28 | :param volname: Volume Name 29 | :param force: (True|False) Start Volume with Force option 30 | :returns: Output of Start command, raises 31 | GlusterCmdException((rc, out, err)) on error 32 | """ 33 | cmd = ["start", volname] 34 | if force: 35 | cmd += ["force"] 36 | 37 | return volume_execute(cmd) 38 | 39 | 40 | def stop(volname, force=False): 41 | """ 42 | Stop Gluster Volume 43 | 44 | :param volname: Volume Name 45 | :param force: (True|False) Stop Volume with Force option 46 | :returns: Output of Stop command, raises 47 | GlusterCmdException((rc, out, err)) on error 48 | """ 49 | cmd = ["stop", volname] 50 | if force: 51 | cmd += ["force"] 52 | 53 | return volume_execute(cmd) 54 | 55 | 56 | def restart(volname, force=False): 57 | """ 58 | Restart Gluster Volume, Wrapper around two calls stop and start 59 | 60 | :param volname: Volume Name 61 | :param force: (True|False) Restart Volume with Force option 62 | :returns: Output of Start command, raises 63 | GlusterCmdException((rc, out, err)) on error 64 | """ 65 | cmd = ["stop", volname] 66 | if force: 67 | cmd += ["force"] 68 | 69 | volume_execute(cmd) 70 | 71 | cmd = ["start", volname] 72 | if force: 73 | cmd += ["force"] 74 | 75 | return volume_execute(cmd) 76 | 77 | 78 | def delete(volname): 79 | """ 80 | Delete Gluster Volume 81 | 82 | :param volname: Volume Name 83 | :returns: Output of Delete command, raises 84 | GlusterCmdException((rc, out, err)) on error 85 | """ 86 | cmd = ["delete", volname] 87 | return volume_execute(cmd) 88 | 89 | 90 | # noqa # pylint: disable=too-many-arguments 91 | def create(volname, volbricks, replica=0, stripe=0, arbiter=0, disperse=0, 92 | disperse_data=0, redundancy=0, transport="tcp", force=False): 93 | """ 94 | Create Gluster Volume 95 | 96 | :param volname: Volume Name 97 | :param volbricks: List of Brick paths(HOSTNAME:PATH) 98 | :param replica: Number of Replica bricks 99 | :param stripe: Number of Stripe bricks 100 | :param arbiter: Number of Arbiter bricks 101 | :param disperse: Number of disperse bricks 102 | :param disperse_data: Number of disperse data bricks 103 | :param redundancy: Number of Redundancy bricks 104 | :param transport: Transport mode(tcp|rdma|tcp,rdma) 105 | :param force: (True|False) Create Volume with Force option 106 | :returns: Output of Create command, raises 107 | GlusterCmdException((rc, out, err)) on error 108 | """ 109 | cmd = ["create", volname] 110 | if replica != 0: 111 | cmd += ["replica", "{0}".format(replica)] 112 | 113 | if stripe != 0: 114 | cmd += ["stripe", "{0}".format(stripe)] 115 | 116 | if arbiter != 0: 117 | cmd += ["arbiter", "{0}".format(arbiter)] 118 | 119 | if disperse != 0: 120 | cmd += ["disperse", "{0}".format(disperse)] 121 | 122 | if disperse_data != 0: 123 | cmd += ["disperse-data", "{0}".format(disperse_data)] 124 | 125 | if redundancy != 0: 126 | cmd += ["redundancy", "{0}".format(redundancy)] 127 | 128 | if transport != "tcp": 129 | cmd += ["transport", transport] 130 | 131 | cmd += volbricks 132 | 133 | if force: 134 | cmd += ["force"] 135 | 136 | return volume_execute(cmd) 137 | 138 | 139 | def info(volname=None, group_subvols=False): 140 | """ 141 | Get Gluster Volume Info 142 | 143 | :param volname: Volume Name 144 | :param group_subvols: Show Subvolume Information in Groups 145 | :returns: Returns Volume Info, raises 146 | GlusterCmdException((rc, out, err)) on error 147 | """ 148 | cmd = ["info"] 149 | if volname is not None: 150 | cmd += [volname] 151 | 152 | return parse_volume_info(volume_execute_xml(cmd), 153 | group_subvols=group_subvols) 154 | 155 | 156 | def status_detail(volname=None, group_subvols=False): 157 | """ 158 | Get Gluster Volume Status 159 | 160 | :param volname: Volume Name or List of volumes 161 | :param group_subvols: Show Subvolume Information in Groups 162 | :returns: Returns Volume Status, raises 163 | GlusterCmdException((rc, out, err)) on error 164 | """ 165 | cmd = ["status"] 166 | if volname is not None: 167 | if type(volname) == list: 168 | volumes = [] 169 | volinfo = {} 170 | 171 | for vi in info(): 172 | volinfo[vi['name']] = vi 173 | 174 | for v in volname: 175 | cmd += [v, "detail"] 176 | volumes.append(parse_volume_status(volume_execute_xml(cmd), 177 | [volinfo[v]], 178 | group_subvols=\ 179 | group_subvols).pop()) 180 | cmd = ["status"] 181 | return(volumes) 182 | else: 183 | cmd += [volname, "detail"] 184 | else: 185 | cmd += ["all", "detail"] 186 | 187 | return parse_volume_status(volume_execute_xml(cmd), 188 | info(volname), 189 | group_subvols=group_subvols) 190 | 191 | 192 | def optset(volname, opts): 193 | """ 194 | Set Volume Options 195 | 196 | :param volname: Volume Name 197 | :param opts: Dict with config key as dict key and config value as value 198 | :returns: Output of Volume Set command, raises 199 | GlusterCmdException((rc, out, err)) on error 200 | """ 201 | cmd = ["set", volname] 202 | for key, value in opts.items(): 203 | cmd += [key, value] 204 | 205 | return volume_execute(cmd) 206 | 207 | 208 | def optget(volname, opt="all"): 209 | """ 210 | Get Volume Options 211 | 212 | :param volname: Volume Name 213 | :param opt: Option Name 214 | :returns: List of Volume Options, raises 215 | GlusterCmdException((rc, out, err)) on error 216 | """ 217 | cmd = ["get", volname, opt] 218 | return parse_volume_options(volume_execute_xml(cmd)) 219 | 220 | 221 | def optreset(volname, opt=None, force=False): 222 | """ 223 | Reset Volume Options 224 | 225 | :param volname: Volume Name 226 | :param opt: Option name to reset, else reset all 227 | :param force: Force reset options 228 | :returns: Output of Volume Reset command, raises 229 | GlusterCmdException((rc, out, err)) on error 230 | """ 231 | cmd = ["reset", volname] 232 | 233 | if opt is not None: 234 | cmd += [opt] 235 | 236 | if force: 237 | cmd += ["force"] 238 | 239 | return volume_execute(cmd) 240 | 241 | 242 | def vollist(): 243 | """ 244 | Volumes List 245 | 246 | :returns: List of Volumes, raises 247 | GlusterCmdException((rc, out, err)) on error 248 | """ 249 | cmd = ["list"] 250 | return parse_volume_list(volume_execute_xml(cmd)) 251 | 252 | 253 | def log_rotate(volname, brick): 254 | """ 255 | Brick log rotate 256 | 257 | :param volname: Volume Name 258 | :param brick: Brick Path 259 | :returns: Output of Log rotate command, raises 260 | GlusterCmdException((rc, out, err)) on error 261 | """ 262 | cmd = ["log", volname, "rotate", brick] 263 | return volume_execute(cmd) 264 | 265 | 266 | def sync(hostname, volname=None): 267 | """ 268 | Sync the volume information from a peer 269 | 270 | :param hostname: Hostname to sync from 271 | :param volname: Volume Name 272 | :returns: Output of Sync command, raises 273 | GlusterCmdException((rc, out, err)) on error 274 | """ 275 | cmd = ["sync", hostname] 276 | if volname is not None: 277 | cmd += [volname] 278 | return volume_execute(cmd) 279 | 280 | 281 | # noqa # pylint: disable=too-many-arguments 282 | def clear_locks(volname, path, kind, inode_range=None, 283 | entry_basename=None, posix_range=None): 284 | """ 285 | Clear locks held on path 286 | 287 | :param volname: Volume Name 288 | :param path: Locked Path 289 | :param kind: Lock Kind(blocked|granted|all) 290 | :param inode_range: Inode Range 291 | :param entry_basename: Entry Basename 292 | :param posix_range: Posix Range 293 | :returns: Output of Clear locks command, raises 294 | GlusterCmdException((rc, out, err)) on error 295 | """ 296 | if kind.lower() not in LOCK_KINDS: 297 | raise GlusterCmdException((-1, "", "Invalid Lock Kind")) 298 | cmd = ["clear-locks", volname, "kind", kind.lower()] 299 | 300 | if inode_range is not None: 301 | cmd += ["inode", inode_range] 302 | 303 | if entry_basename is not None: 304 | cmd += ["entry", entry_basename] 305 | 306 | if posix_range is not None: 307 | cmd += ["posix", posix_range] 308 | 309 | return volume_execute(cmd) 310 | 311 | 312 | def barrier_enable(volname): 313 | """ 314 | Enable Barrier 315 | 316 | :param volname: Volume Name 317 | :returns: Output of Barrier command, raises 318 | GlusterCmdException((rc, out, err)) on error 319 | """ 320 | cmd = ["barrier", volname, "enable"] 321 | return volume_execute(cmd) 322 | 323 | 324 | def barrier_disable(volname): 325 | """ 326 | Disable Barrier 327 | 328 | :param volname: Volume Name 329 | :returns: Output of Barrier command, raises 330 | GlusterCmdException((rc, out, err)) on error 331 | """ 332 | cmd = ["barrier", volname, "disable"] 333 | return volume_execute(cmd) 334 | 335 | 336 | def profile_start(volname): 337 | """ 338 | Start Profile 339 | 340 | :param volname: Volume Name 341 | :return: Output of Profile command, raises 342 | GlusterCmdException((rc, out, err)) on error 343 | """ 344 | cmd = ["profile", volname, "start"] 345 | return volume_execute(cmd) 346 | 347 | 348 | def profile_stop(volname): 349 | """ 350 | Stop Profile 351 | 352 | :param volname: Volume Name 353 | :return: Output of Profile command, raises 354 | GlusterCmdException((rc, out, err)) on error 355 | """ 356 | cmd = ["profile", volname, "stop"] 357 | return volume_execute(cmd) 358 | 359 | 360 | def profile_info(volname, opt, peek=False): 361 | """ 362 | Get Profile info 363 | 364 | :param volname: Volume Name 365 | :param opt: Operation type of info, 366 | like peek, incremental, cumulative, clear 367 | :param peek: Use peek or not, default is False 368 | :return: Return profile info, raises 369 | GlusterCmdException((rc, out, err)) on error 370 | """ 371 | 372 | if opt.lower() not in INFO_OPS: 373 | raise GlusterCmdException(( 374 | -1, 375 | "", 376 | "Invalid Info Operation Type, use peek, " 377 | "incremental, cumulative, clear" 378 | )) 379 | cmd = ["profile", volname, "info", opt.lower()] 380 | 381 | if opt.lower() == INFO_OPS[1] and peek: 382 | cmd += ["peek"] 383 | 384 | return parse_volume_profile_info(volume_execute_xml(cmd), opt) 385 | 386 | # TODO: Pending Wrappers 387 | # volume statedump [nfs|quotad] [all|mem|iobuf| 388 | # callpool|priv|fd|inode|history]... - perform statedump on bricks 389 | # volume status [all | [nfs|shd||quotad]] 390 | # [detail|clients|mem|inode|fd|callpool|tasks] - display status of 391 | # all or specified volume(s)/brick 392 | # volume top {open|read|write|opendir|readdir|clear} 393 | # [nfs|brick ] [list-cnt ] | 394 | # volume top {read-perf|write-perf} [bs count 395 | # ] [brick ] [list-cnt ] - volume top operations 396 | -------------------------------------------------------------------------------- /glustercli/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from glustercli.metrics.process import local_processes 4 | from glustercli.metrics.utilization import local_utilization 5 | from glustercli.metrics.diskstats import local_diskstats 6 | 7 | # Reexport 8 | __all__ = [ 9 | "local_processes", 10 | "local_utilization", 11 | "local_diskstats" 12 | ] 13 | -------------------------------------------------------------------------------- /glustercli/metrics/cmdlineparser.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | import socket 3 | 4 | from glustercli.metrics import utils 5 | 6 | 7 | def _hostname(): 8 | return socket.gethostname().split('.')[0] 9 | 10 | 11 | def parse_cmdline_glusterd(args): 12 | return { 13 | "name": "glusterd", 14 | "node_id": utils.get_node_id(), 15 | "hostname": _hostname() 16 | } 17 | 18 | 19 | def parse_cmdline_glusterfsd(args): 20 | parser = ArgumentParser() 21 | parser.add_argument("-s", dest="server") 22 | parser.add_argument("--volfile-id") 23 | parser.add_argument("--brick-name") 24 | pargs, _ = parser.parse_known_args(args) 25 | 26 | return { 27 | "name": "glusterfsd", 28 | "hostname": _hostname(), 29 | "node_id": utils.get_node_id(), 30 | "server": pargs.server, 31 | "brick_path": pargs.brick_name, 32 | "volname": pargs.volfile_id.split(".")[0] 33 | } 34 | 35 | 36 | def parse_cmdline_glustershd(args): 37 | # TODO: Parsing 38 | pass 39 | 40 | 41 | def parse_cmdline_python(args): 42 | if len(args) > 1 and "glustereventsd" in args[1]: 43 | return parse_cmdline_glustereventsd(args) 44 | 45 | if len(args) > 1 and "gsyncd" in args[1]: 46 | return parse_cmdline_gsyncd(args) 47 | 48 | return {} 49 | 50 | 51 | def parse_cmdline_gsyncd(args): 52 | data = { 53 | "name": "gsyncd", 54 | "hostname": _hostname() 55 | } 56 | if "--feedback-fd" in args: 57 | data["role"] = "worker" 58 | elif "--agent" in args: 59 | data["role"] = "agent" 60 | elif "--monitor" in args: 61 | data["role"] = "monitor" 62 | elif "--listen" in args: 63 | data["role"] = "secondary" 64 | 65 | return data 66 | 67 | 68 | def parse_cmdline_glustereventsd(args): 69 | return { 70 | "name": "glustereventsd", 71 | "hostname": _hostname() 72 | } 73 | -------------------------------------------------------------------------------- /glustercli/metrics/diskstats.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | from glustercli.metrics.utils import get_local_bricks 5 | 6 | 7 | DEFAULT_DISKSTAT = { 8 | "major_number": 0, 9 | "minor_number": 0, 10 | "reads_completed": 0, 11 | "reads_merged": 0, 12 | "sectors_read": 0, 13 | "time_spent_reading": 0, 14 | "writes_completed": 0, 15 | "writes_merged": 0, 16 | "sectors_written": 0, 17 | "time_spent_writing": 0, 18 | "ios_currently_in_progress": 0, 19 | "time_spent_doing_ios": 0, 20 | "weighted_time_spent_doing_ios": 0 21 | } 22 | 23 | 24 | def local_diskstats(volname=None): 25 | """ 26 | Collect Diskstats info of local bricks 27 | 28 | :param volname: Volume Name 29 | :returns: List of diskstats information 30 | { 31 | "volume": VOLUME_NAME, 32 | "brick_index": BRICK_INDEX_IN_VOL_INFO, 33 | "node_id": NODE_ID, 34 | "brick": BRICK_NAME, 35 | "fs": BRICK_FILESYSTEM, 36 | "device": BRICK_DEVICE, 37 | "major_number": MAJOR_NUMBER, 38 | "minor_number": MINOR_NUMBER, 39 | "reads_completed": READS_COMPLETED, 40 | "reads_merged": READS_MERGED, 41 | "sectors_read": SECTORS_READ, 42 | "time_spent_reading": TIME_SPENT_READING, 43 | "writes_completed": WRITES_COMPLETED, 44 | "writes_merged": WRITES_MERGED, 45 | "sectors_written": SECTORS_WRITTEN, 46 | "time_spent_writing": TIME_SPENT_WRITING, 47 | "ios_currently_in_progress": IOS_CURRENTLY_IN_PROGRESS, 48 | "time_spent_doing_ios": TIME_SPENT_DOING_IOS, 49 | "weighted_time_spent_doing_ios": WEIGHTED_TIME_SPENT_DOING_IOS 50 | } 51 | """ 52 | local_bricks = get_local_bricks(volname) 53 | cmd = ["df", "--output=source"] 54 | 55 | diskstat_data_raw = "" 56 | with open("/proc/diskstats") as stat_file: 57 | diskstat_data_raw = stat_file.read() 58 | 59 | # /proc/diskstats fields 60 | # 1 - major number 61 | # 2 - minor mumber 62 | # 3 - device name 63 | # 4 - reads completed successfully 64 | # 5 - reads merged 65 | # 6 - sectors read 66 | # 7 - time spent reading (ms) 67 | # 8 - writes completed 68 | # 9 - writes merged 69 | # 10 - sectors written 70 | # 11 - time spent writing (ms) 71 | # 12 - I/Os currently in progress 72 | # 13 - time spent doing I/Os (ms) 73 | # 14 - weighted time spent doing I/Os (ms) 74 | diskstat_data = {} 75 | for row in diskstat_data_raw.strip().split("\n"): 76 | row = row.split() 77 | if not row: 78 | continue 79 | 80 | diskstat_data[row[2]] = { 81 | "major_number": row[0], 82 | "minor_number": row[1], 83 | "reads_completed": row[3], 84 | "reads_merged": row[4], 85 | "sectors_read": row[5], 86 | "time_spent_reading": row[6], 87 | "writes_completed": row[7], 88 | "writes_merged": row[8], 89 | "sectors_written": row[9], 90 | "time_spent_writing": row[10], 91 | "ios_currently_in_progress": row[11], 92 | "time_spent_doing_ios": row[12], 93 | "weighted_time_spent_doing_ios": row[13] 94 | } 95 | 96 | for brick in local_bricks: 97 | bpath = brick["brick"].split(":", 1)[-1] 98 | proc = subprocess.Popen(cmd + [bpath], 99 | stdout=subprocess.PIPE, 100 | stderr=subprocess.PIPE, 101 | universal_newlines=True) 102 | out, _ = proc.communicate() 103 | 104 | # `df` command error 105 | if proc.returncode != 0: 106 | brick["fs"] = "unknown" 107 | brick["device"] = "unknown" 108 | brick.update(DEFAULT_DISKSTAT) 109 | continue 110 | 111 | df_data = out.strip() 112 | df_data = df_data.split("\n")[-1].strip() # First line is header 113 | brick["fs"] = df_data 114 | if os.path.islink(df_data): 115 | brick["device"] = os.readlink(df_data).split("/")[-1] 116 | else: 117 | brick["device"] = df_data.split("/")[-1] 118 | 119 | brick.update(diskstat_data.get(brick["device"], DEFAULT_DISKSTAT)) 120 | 121 | return local_bricks 122 | -------------------------------------------------------------------------------- /glustercli/metrics/process.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | from glustercli.metrics import cmdlineparser 4 | 5 | GLUSTER_PROCS = [ 6 | "glusterd", 7 | "glusterfsd", 8 | "glustershd", 9 | "glusterfs", 10 | "python", # gsyncd, glustereventsd etc 11 | "ssh", # gsyncd related ssh connections 12 | ] 13 | 14 | 15 | def get_cmdline(pid): 16 | args = [] 17 | try: 18 | with open("/proc/{0}/cmdline".format(pid), "r") as cmdline_file: 19 | args = cmdline_file.read().strip("\x00").split("\x00") 20 | except IOError: 21 | pass 22 | 23 | return args 24 | 25 | 26 | def local_processes(): 27 | # Run ps command and get all the details for all gluster processes 28 | # ps --no-header -ww -o pid,pcpu,pmem,rsz,vsz,etimes,comm -C glusterd,.. 29 | # command can be used instead of comm, but if an argument has space then 30 | # it is a problem 31 | # for example `mytool "hello world" arg2` will be displayed as 32 | # `mytool hello world arg2` in ps output 33 | # Read cmdline from `/proc//cmdline` to get full commands 34 | # Use argparse to parse the output and form the key 35 | # Example output of ps command: 36 | # 6959 0.0 0.6 12840 713660 504076 glusterfs 37 | details = [] 38 | cmd = ["ps", 39 | "--no-header", # No header in the output 40 | "-ww", # To set unlimited width to avoid crop 41 | "-o", # Output Format 42 | "pid,pcpu,pmem,rsz,vsz,etimes,comm", 43 | "-C", 44 | ",".join(GLUSTER_PROCS)] 45 | 46 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, 47 | stderr=subprocess.PIPE, 48 | universal_newlines=True) 49 | out, _ = proc.communicate() 50 | # No records in `ps` output 51 | if proc.returncode != 0: 52 | return details 53 | 54 | data = out.strip() 55 | 56 | for line in data.split("\n"): 57 | # Sample data: 58 | # 6959 0.0 0.6 12840 713660 504076 glusterfs 59 | try: 60 | pid, pcpu, pmem, rsz, vsz, etimes, _ = line.strip().split() 61 | except ValueError: 62 | # May be bad ps output, for example 63 | # 30916 0.0 0.0 0 0 7 python 64 | continue 65 | 66 | args = get_cmdline(int(pid)) 67 | if not args: 68 | # Unable to collect the cmdline, may be IO error and process died? 69 | continue 70 | 71 | cmdname = args[0].split("/")[-1] 72 | func_name = "parse_cmdline_" + cmdname 73 | details_func = getattr(cmdlineparser, func_name, None) 74 | 75 | if details_func is not None: 76 | data = details_func(args) 77 | if data is not None: 78 | data["percentage_cpu"] = float(pcpu) 79 | data["percentage_memory"] = float(pmem) 80 | data["resident_memory"] = int(rsz) 81 | data["virtual_memory"] = int(vsz) 82 | data["elapsed_time_sec"] = int(etimes) 83 | data["pid"] = int(pid) 84 | details.append(data) 85 | 86 | return details 87 | -------------------------------------------------------------------------------- /glustercli/metrics/utilization.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from glustercli.metrics.utils import get_local_bricks 4 | 5 | 6 | def local_utilization(volname=None): 7 | """ 8 | Collect Utilization details of local bricks 9 | 10 | :param volname: Volume Name 11 | :returns: List of utilization information 12 | { 13 | "volume": VOLUME_NAME, 14 | "brick_index": BRICK_INDEX_IN_VOL_INFO, 15 | "node_id": NODE_ID, 16 | "brick": BRICK_NAME, 17 | "block_size": ST_F_FRSIZE, 18 | "blocks_total": ST_F_BLOCKS, 19 | "blocks_free": ST_F_BFREE, 20 | "blocks_avail": ST_F_BAVAIL, 21 | "inodes_total": ST_F_FILES, 22 | "inodes_free": ST_F_FFREE, 23 | "inodes_avail": ST_F_FAVAIL 24 | } 25 | """ 26 | 27 | local_bricks = get_local_bricks(volname) 28 | 29 | for brick in local_bricks: 30 | bpath = brick["brick"].split(":", 1)[-1] 31 | stat = os.statvfs(bpath) 32 | 33 | brick["block_size"] = stat.f_frsize 34 | brick["blocks_total"] = stat.f_blocks 35 | brick["blocks_free"] = stat.f_bfree 36 | brick["blocks_avail"] = stat.f_bavail 37 | brick["inodes_total"] = stat.f_files 38 | brick["inodes_free"] = stat.f_ffree 39 | brick["inodes_avail"] = stat.f_favail 40 | 41 | return local_bricks 42 | -------------------------------------------------------------------------------- /glustercli/metrics/utils.py: -------------------------------------------------------------------------------- 1 | from glustercli.cli import volume 2 | 3 | UUID_FILE = "/var/lib/glusterd/glusterd.info" 4 | 5 | myuuid = None 6 | 7 | 8 | def get_node_id(): 9 | global myuuid 10 | 11 | if myuuid is not None: 12 | return myuuid 13 | 14 | val = None 15 | with open(UUID_FILE) as uuid_file: 16 | for line in uuid_file: 17 | if line.startswith("UUID="): 18 | val = line.strip().split("=")[-1] 19 | break 20 | 21 | myuuid = val 22 | return val 23 | 24 | 25 | def get_local_bricks(volname=None): 26 | local_node_id = get_node_id() 27 | volinfo = volume.info(volname) 28 | bricks = [] 29 | for vol in volinfo: 30 | for jdx, brick in enumerate(vol["bricks"]): 31 | if brick["uuid"] != local_node_id: 32 | continue 33 | 34 | bricks.append({ 35 | "volume": vol["name"], 36 | "brick_index": jdx, 37 | "node_id": brick["uuid"], 38 | "brick": brick["name"]}) 39 | 40 | return bricks 41 | -------------------------------------------------------------------------------- /pydocmd.yml: -------------------------------------------------------------------------------- 1 | site_name: "Python bindings for Gluster CLI commands and Metrics" 2 | 3 | # This tells pydocmd which pages to generate from which Python modules, 4 | # functions and classes. At the first level is the page name, below that 5 | # is a tree of Python member names (modules, classes, etc.) that should be 6 | # documented. Higher indentation leads to smaller header size. 7 | generate: 8 | - docs/cli.md: 9 | - glustercli.cli.volume++ 10 | - glustercli.cli.georep++ 11 | - docs/metrics.md: 12 | - glustercli.metrics++ 13 | 14 | # MkDocs pages configuration. The `<<` operator is sugar added by pydocmd 15 | # that allows you to use an external Markdown file (eg. your project's README) 16 | # in the documentation. The path must be relative to current working directory. 17 | # This configuration is not mandatory if you have your own mkdocs.yml config file. 18 | pages: 19 | - Home: docs/index.md << README.md 20 | - glustercli.cli: 21 | - CLI bindings: docs/cli.md 22 | - glustercli.metrics: 23 | - Metrics: docs/metrics.md 24 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools_scm] 6 | write_to = "glustercli/_version.py" -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | try: 2 | # python >=3.8 3 | from importlib.metadata import version, PackageNotFoundError 4 | except ImportError: 5 | # python <3.8 6 | # importlib.metadata not available for python 3.7 7 | from importlib_metadata import version, PackageNotFoundError 8 | from setuptools import setup 9 | 10 | try: 11 | __version__ = version('glustercli') 12 | except PackageNotFoundError: 13 | __version__ = "unknown" 14 | 15 | setup( 16 | name='glustercli', 17 | version=__version__, 18 | description='Python bindings for GlusterFS CLI and Metrics collection', 19 | license='GPLv2 or LGPLv3+', 20 | author='Aravinda Vishwanathapura', 21 | author_email='aravinda@kadalu.io', 22 | url='https://github.com/gluster/glustercli-python', 23 | packages=["glustercli", "glustercli.cli", "glustercli.metrics"], 24 | install_requires=["paramiko"], 25 | classifiers=[ 26 | 'Development Status :: 5 - Production/Stable', 27 | 'Intended Audience :: Developers', 28 | 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)', # noqa 29 | 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 30 | 'Operating System :: POSIX :: Linux', 31 | 'Programming Language :: Python', 32 | 'Programming Language :: Python :: 2', 33 | 'Programming Language :: Python :: 2.6', 34 | 'Programming Language :: Python :: 2.7', 35 | 'Programming Language :: Python :: 3', 36 | 'Programming Language :: Python :: 3.4', 37 | 'Programming Language :: Python :: 3.5', 38 | 'Topic :: System :: Filesystems', 39 | ], 40 | ) 41 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26,py27,pep8 3 | minversion = 1.6 4 | 5 | [testenv] 6 | setenv = VIRTUAL_ENV={envdir} 7 | NOSE_WITH_COVERAGE=1 8 | NOSE_COVER_BRANCHES=1 9 | NOSE_COVER_ERASE=1 10 | NOSE_COVER_PACKAGE=gluster 11 | 12 | [testenv:pep8] 13 | commands = 14 | flake8 {posargs:gluster test setup.py} 15 | 16 | [testenv:cover] 17 | setenv = NOSE_WITH_COVERAGE=1 18 | NOSE_COVER_BRANCHES=1 19 | 20 | [testenv:venv] 21 | commands = {posargs} 22 | 23 | [flake8] 24 | ignore = H 25 | builtins = _ 26 | exclude = .venv,.tox,dist,doc,test,*egg 27 | show-source = True 28 | --------------------------------------------------------------------------------