├── .coveragerc ├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── opinel ├── __init__.py ├── data │ ├── icmp_message_types.json │ ├── ip-ranges │ │ ├── aws-in-ec2.json │ │ ├── aws-in-us.json │ │ └── aws.json │ ├── protocols.json │ └── requirements.txt ├── services │ ├── __init__.py │ ├── cloudformation.py │ ├── cloudtrail.py │ ├── iam.py │ ├── organizations.py │ └── s3.py └── utils │ ├── __init__.py │ ├── aws.py │ ├── cli_parser.py │ ├── conditions.py │ ├── console.py │ ├── credentials.py │ ├── fs.py │ ├── globals.py │ ├── profiles.py │ └── threads.py ├── requirements.txt ├── setup.py └── tests ├── __init__.py ├── data ├── accessKeys1.csv ├── accessKeys2.csv ├── accessKeys3.csv ├── accessKeys4.csv ├── cloudformation-001.json ├── cloudformation-002.json ├── cloudformation-003.json ├── cloudformation-003bad.json ├── cloudformation-004.json ├── cloudformation-005.json ├── cloudformation-005bad.json ├── config ├── credentials ├── credentials1.csv ├── credentials2.csv ├── default_args.json ├── ip-ranges-1.json ├── ip-ranges-2.json ├── ip-ranges-3.json ├── ip-ranges-4.json ├── policy1.json ├── policy2.json ├── protocols.json ├── protocols.txt ├── protocols.yaml ├── requirements1.txt ├── requirements2.txt ├── requirements3.txt └── statement1.json ├── load_data.py ├── local.py ├── requirements.txt ├── results └── read_ip_ranges │ ├── ip-ranges-1a.json │ ├── ip-ranges-1b.json │ ├── ip-ranges-1c.json │ ├── ip-ranges-3.json │ ├── ip-ranges-4.json │ └── ip-ranges-a.json ├── test-services-cloudformation.py ├── test-services-cloudtrail.py ├── test-services-iam.py ├── test-services-organizations.py ├── test-services-s3.py ├── test-tests.py ├── test-utils-aws.py ├── test-utils-cli_parser.py ├── test-utils-conditions.py ├── test-utils-console.py ├── test-utils-credentials.py ├── test-utils-fs.py ├── test-utils-globals.py ├── test-utils-profiles.py └── test-utils-threads.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | include = opinel/*.py 3 | opinel/services/*.py 4 | opinel/utils/*.py 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # virtualenv 2 | env/ 3 | venv/ 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | env/ 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | 47 | # Translations 48 | *.mo 49 | *.pot 50 | 51 | # Django stuff: 52 | *.log 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | 60 | # Pycharm 61 | .idea 62 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "2.7" 5 | - "3.3" 6 | - "3.4" 7 | - "3.5" 8 | - "3.6" 9 | 10 | before_script: 11 | - pip install -r requirements.txt 12 | - pip install coveralls 13 | 14 | script: 15 | - nosetests --with-coverage tests/test-tests.py 16 | - nosetests --with-coverage tests/test-utils-globals.py 17 | - nosetests --with-coverage tests/test-utils-console.py 18 | - nosetests --with-coverage tests/test-utils-aws.py 19 | - nosetests --with-coverage tests/test-utils-fs.py 20 | - nosetests --with-coverage tests/test-utils-profiles.py 21 | - nosetests --with-coverage tests/test-utils-threads.py 22 | - nosetests --with-coverage tests/test-utils-cli_parser.py 23 | - nosetests --with-coverage tests/test-utils-credentials.py 24 | - nosetests --with-coverage tests/test-utils-conditions.py 25 | - nosetests --with-coverage tests/test-services-cloudformation.py 26 | - nosetests --with-coverage tests/test-services-cloudtrail.py 27 | - nosetests --with-coverage tests/test-services-iam.py 28 | - nosetests --with-coverage tests/test-services-organizations.py 29 | - nosetests --with-coverage tests/test-services-s3.py 30 | 31 | after_success: 32 | - coveralls 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | {description} 294 | Copyright (C) {year} {fullname} 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | {signature of Ty Coon}, 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | 341 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.rst 3 | include requirements.txt 4 | recursive-include opinel/data * 5 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ###### 2 | opinel 3 | ###### 4 | 5 | .. image:: https://travis-ci.org/nccgroup/opinel.svg?branch=master 6 | :target: https://travis-ci.org/nccgroup/opinel 7 | .. image:: https://coveralls.io/repos/github/nccgroup/opinel/badge.svg?branch=master 8 | :target: https://coveralls.io/github/nccgroup/opinel?branch=master 9 | .. image:: https://badge.fury.io/py/opinel.svg 10 | :target: https://badge.fury.io/py/opinel 11 | :align: right 12 | 13 | ************ 14 | Introduction 15 | ************ 16 | 17 | opinel is the Python package used by `Scout2`_ and `AWS-Recipes`_. 18 | 19 | ************ 20 | Installation 21 | ************ 22 | 23 | Install via `pip`_: 24 | 25 | :: 26 | 27 | $ pip install opinel 28 | 29 | Install from source: 30 | 31 | :: 32 | 33 | $ git clone https://github.com/nccgroup/opinel 34 | $ cd opinel 35 | $ python setup.py install 36 | 37 | .. _Scout2: https://github.com/nccgroup/Scout2 38 | .. _AWS-Recipes: https://github.com/nccgroup/AWS-Recipes 39 | .. _pip: https://pip.pypa.io/en/stable/index.html 40 | -------------------------------------------------------------------------------- /opinel/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'l01cd3v' 2 | __version__ = '3.3.4' 3 | -------------------------------------------------------------------------------- /opinel/data/icmp_message_types.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "generated_on": "September 14, 2015", 4 | "source": "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml" 5 | }, 6 | "icmp_message_types": { 7 | "-1": "ALL", 8 | "0": "Echo Reply", 9 | "3": "Destination Unreachable", 10 | "4": "Source Quench", 11 | "5": "Redirect", 12 | "6": "Alternate Host Address", 13 | "8": "Echo", 14 | "9": "Router Advertisement", 15 | "10": "Router Selection", 16 | "11": "Time Exceeded", 17 | "12": "Parameter Problem", 18 | "13": "Timestamp", 19 | "14": "Timestamp Reply", 20 | "15": "Information Request", 21 | "16": "Information Reply", 22 | "17": "Address Mask Request", 23 | "18": "Address Mask Reply", 24 | "30": "Traceroute", 25 | "31": "Datagram Conversion Error", 26 | "32": "Mobile Host Redirect", 27 | "33": "IPv6 Where-Are-You", 28 | "34": "IPv6 I-Am-Here", 29 | "35": "Mobile Registration Request", 30 | "36": "Mobile Registration Reply", 31 | "37": "Domain Name Request", 32 | "38": "Domain Name Reply", 33 | "39": "SKIP", 34 | "40": "Photuris" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /opinel/data/ip-ranges/aws-in-ec2.json: -------------------------------------------------------------------------------- 1 | { 2 | "source": "ip-ranges/aws.json", 3 | "conditions": [ "and", 4 | [ "service", "equal", "EC2" ] 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /opinel/data/ip-ranges/aws-in-us.json: -------------------------------------------------------------------------------- 1 | { 2 | "source": "ip-ranges/aws.json", 3 | "conditions": [ "and", 4 | [ "region", "match", [ "us-.*" ] ] 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /opinel/data/protocols.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "generated_on": "February 12, 2014", 4 | "source": "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml" 5 | }, 6 | "protocols": { 7 | "-1": "ALL", 8 | "0": "HOPOPT", 9 | "1": "ICMP", 10 | "2": "IGMP", 11 | "3": "GGP", 12 | "4": "IPv4", 13 | "5": "ST", 14 | "6": "TCP", 15 | "7": "CBT", 16 | "8": "EGP", 17 | "9": "IGP", 18 | "10": "BBN-RCC-MON", 19 | "11": "NVP-II", 20 | "12": "PUP", 21 | "13": "ARGUS", 22 | "14": "EMCON", 23 | "15": "XNET", 24 | "16": "CHAOS", 25 | "17": "UDP", 26 | "18": "MUX", 27 | "19": "DCN-MEAS", 28 | "20": "HMP", 29 | "21": "PRM", 30 | "22": "XNS-IDP", 31 | "23": "TRUNK-1", 32 | "24": "TRUNK-2", 33 | "25": "LEAF-1", 34 | "26": "LEAF-2", 35 | "27": "RDP", 36 | "28": "IRTP", 37 | "29": "ISO-TP4", 38 | "30": "NETBLT", 39 | "31": "MFE-NSP", 40 | "32": "MERIT-INP", 41 | "33": "DCCP", 42 | "34": "3PC", 43 | "35": "IDPR", 44 | "36": "XTP", 45 | "37": "DDP", 46 | "38": "IDPR-CMTP", 47 | "39": "TP++", 48 | "40": "IL", 49 | "41": "IPv6", 50 | "42": "SDRP", 51 | "43": "IPv6-Route", 52 | "44": "IPv6-Frag", 53 | "45": "IDRP", 54 | "46": "RSVP", 55 | "47": "GRE", 56 | "48": "DSR", 57 | "49": "BNA", 58 | "50": "ESP", 59 | "51": "AH", 60 | "52": "I-NLSP", 61 | "53": "SWIPE", 62 | "54": "NARP", 63 | "55": "MOBILE", 64 | "56": "TLSP", 65 | "57": "SKIP", 66 | "58": "IPv6-ICMP", 67 | "59": "IPv6-NoNxt", 68 | "60": "IPv6-Opts", 69 | "62": "CFTP", 70 | "64": "SAT-EXPAK", 71 | "65": "KRYPTOLAN", 72 | "66": "RVD", 73 | "67": "IPPC", 74 | "69": "SAT-MON", 75 | "70": "VISA", 76 | "71": "IPCV", 77 | "72": "CPNX", 78 | "73": "CPHB", 79 | "74": "WSN", 80 | "75": "PVP", 81 | "76": "BR-SAT-MON", 82 | "77": "SUN-ND", 83 | "78": "WB-MON", 84 | "79": "WB-EXPAK", 85 | "80": "ISO-IP", 86 | "81": "VMTP", 87 | "82": "SECURE-VMTP", 88 | "83": "VINES", 89 | "84": "TTP", 90 | "84": "IPTM", 91 | "85": "NSFNET-IGP", 92 | "86": "DGP", 93 | "87": "TCF", 94 | "88": "EIGRP", 95 | "89": "OSPFIGP", 96 | "90": "Sprite-RPC", 97 | "91": "LARP", 98 | "92": "MTP", 99 | "93": "AX.25", 100 | "94": "IPIP", 101 | "95": "MICP", 102 | "96": "SCC-SP", 103 | "97": "ETHERIP", 104 | "98": "ENCAP", 105 | "100": "GMTP", 106 | "101": "IFMP", 107 | "102": "PNNI", 108 | "103": "PIM", 109 | "104": "ARIS", 110 | "105": "SCPS", 111 | "106": "QNX", 112 | "107": "A/N", 113 | "108": "IPComp", 114 | "109": "SNP", 115 | "110": "Compaq-Peer", 116 | "111": "IPX-in-IP", 117 | "112": "VRRP", 118 | "113": "PGM", 119 | "115": "L2TP", 120 | "116": "DDX", 121 | "117": "IATP", 122 | "118": "STP", 123 | "119": "SRP", 124 | "120": "UTI", 125 | "121": "SMP", 126 | "122": "SM", 127 | "123": "PTP", 128 | "124": "ISIS over IPv4", 129 | "125": "FIRE", 130 | "126": "CRTP", 131 | "127": "CRUDP", 132 | "128": "SSCOPMCE", 133 | "129": "IPLT", 134 | "130": "SPS", 135 | "131": "PIPE", 136 | "132": "SCTP", 137 | "133": "FC", 138 | "134": "RSVP-E2E-IGNORE", 139 | "135": "Mobility Header", 140 | "136": "UDPLite", 141 | "137": "MPLS-in-IP", 142 | "138": "manet", 143 | "139": "HIP", 144 | "140": "Shim6", 145 | "141": "WESP", 146 | "142": "ROHC" 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /opinel/data/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.4.4 2 | requests>=2.4.0,<3.0.0 3 | netaddr>=0.7.11 4 | iampoliciesgonewild>=1.0.6.2 5 | pyyaml>=3.12 6 | -------------------------------------------------------------------------------- /opinel/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nccgroup/opinel/2d4f5b96e0a1f9cb0356629f4f87e4ed99ce2606/opinel/services/__init__.py -------------------------------------------------------------------------------- /opinel/services/cloudformation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import json 4 | import os 5 | import re 6 | import time 7 | 8 | from opinel.utils.aws import connect_service, handle_truncated_response 9 | from opinel.utils.console import printDebug, printInfo, printError, printException, prompt_4_yes_no 10 | from opinel.utils.fs import read_file 11 | from opinel.utils.globals import snake_to_camel, snake_to_words 12 | 13 | re_iam_capability = re.compile('.*?AWS::IAM.*?', re.DOTALL | re.MULTILINE) 14 | 15 | def create_cloudformation_resource_from_template(api_client, resource_type, name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion = False, need_on_failure=False): 16 | """ 17 | 18 | :param callback: 19 | :param name: 20 | :param template_path: 21 | :param template_parameters: 22 | :param quiet: 23 | :return: 24 | """ 25 | create = getattr(api_client, 'create_%s' % resource_type) 26 | api_resource_type = snake_to_camel(resource_type) 27 | # Add a timestamps 28 | tags.append({'Key': 'OpinelTimestamp', 'Value': str(time.time())}) 29 | params = prepare_cloudformation_params(name, template_path, template_parameters, api_resource_type, tags) 30 | if not quiet: 31 | printInfo('Creating the %s %s..' % (resource_type, name)) 32 | response = create(**params) 33 | resource_id_attribute = '%sId' % api_resource_type 34 | resource_id = response[resource_id_attribute] if resource_id_attribute in response else None 35 | operation_id = response['OperationId'] if 'OperationId' in response else None 36 | if wait_for_completion: 37 | cloudformation_wait(api_client, resource_type, name, operation_id) 38 | return resource_id 39 | 40 | 41 | def create_stack(api_client, stack_name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion = False): 42 | """ 43 | 44 | :param api_client: 45 | :param stack_name: 46 | :param template_path: 47 | :param template_parameters: List of parameter keys and values 48 | :param quiet: 49 | :return: 50 | """ 51 | return create_cloudformation_resource_from_template(api_client, 'stack', stack_name, template_path, template_parameters, tags, quiet, wait_for_completion, need_on_failure=True) 52 | 53 | 54 | def create_or_update_stack(api_client, stack_name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion = False): 55 | """ 56 | 57 | :param api_client: 58 | :param stack_name: 59 | :param template_path: 60 | :param template_parameters: List of parameter keys and values 61 | :param quiet: 62 | :return: 63 | """ 64 | try: 65 | stack = api_client.describe_stacks(StackName = stack_name) 66 | printInfo('Stack already exists... ', newLine = False) 67 | stack_id = update_stack(api_client, stack_name, template_path, template_parameters, quiet, wait_for_completion) 68 | except Exception as e: 69 | if hasattr(e, 'response') and type(e.response) == dict and 'Error' in e.response and e.response['Error']['Code'] == 'ValidationError': 70 | stack_id = create_stack(api_client, stack_name, template_path, template_parameters, tags, quiet, wait_for_completion) 71 | else: 72 | stack_id = None 73 | printException(e) 74 | return stack_id 75 | 76 | 77 | 78 | def create_stack_set(api_client, stack_set_name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion = False): 79 | """ 80 | 81 | :param api_client: 82 | :param stack_set_name: 83 | :param template_path: 84 | :param template_parameters: 85 | :param quiet: 86 | :return: 87 | """ 88 | return create_cloudformation_resource_from_template(api_client, 'stack_set', stack_set_name, template_path, template_parameters, tags, quiet, wait_for_completion) 89 | 90 | 91 | def create_or_update_stack_set(api_client, stack_set_name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion = False): 92 | """ 93 | 94 | :param api_client: 95 | :param stack_name: 96 | :param template_path: 97 | :param template_parameters: List of parameter keys and values 98 | :param quiet: 99 | :return: 100 | """ 101 | operation_id = stack_set_id = None 102 | try: 103 | stack_set = api_client.describe_stack_set(StackSetName = stack_set_name) 104 | printInfo('Stack set already exists... ', newLine = False) 105 | operation_id = update_stack_set(api_client, stack_set_name, template_path, template_parameters, quiet, wait_for_completion) 106 | except Exception as e: 107 | if hasattr(e, 'response') and type(e.response) == dict and 'Error' in e.response and e.response['Error']['Code'] == 'StackSetNotFoundException': 108 | stack_set_id = create_stack_set(api_client, stack_set_name, template_path, template_parameters, tags, quiet, wait_for_completion) 109 | else: 110 | printException(e) 111 | return (stack_set_id, operation_id) 112 | 113 | 114 | def create_stack_instances(api_client, stack_set_name, account_ids, regions, quiet=False): 115 | """ 116 | 117 | :param api_client: 118 | :param stack_set_name: 119 | :param account_ids: 120 | :param regions: 121 | :return: 122 | """ 123 | operation_preferences = {'FailureTolerancePercentage': 100, 124 | 'MaxConcurrentPercentage': 100 125 | } 126 | if not quiet: 127 | printInfo('Creating stack instances in %d regions and %d accounts...' % (len(regions), len(account_ids))) 128 | printDebug(' %s' % ', '.join(regions)) 129 | response = api_client.create_stack_instances(StackSetName=stack_set_name, Accounts=account_ids, Regions=regions, OperationPreferences=operation_preferences) 130 | if not quiet: 131 | printInfo('Successfully started operation Id %s' % response['OperationId']) 132 | return response['OperationId'] 133 | 134 | 135 | def delete_stack_set(api_client, stack_set_name, timeout = 60 * 5): 136 | """ 137 | """ 138 | printDebug('Deleting stack set %s' % stack_set_name) 139 | # Check for instances 140 | stack_instances = handle_truncated_response(api_client.list_stack_instances, {'StackSetName': stack_set_name}, ['Summaries'])['Summaries'] 141 | account_ids = [] 142 | regions = [] 143 | if len(stack_instances) > 0: 144 | for si in stack_instances: 145 | if si['Account'] not in account_ids: 146 | account_ids.append(si['Account']) 147 | if si['Region'] not in regions: 148 | regions.append(si['Region']) 149 | operation_id = api_client.delete_stack_instances(StackSetName = stack_set_name, Accounts = account_ids, Regions = regions, RetainStacks = False)['OperationId'] 150 | wait_for_operation(api_client, stack_set_name, operation_id) 151 | api_client.delete_stack_set(StackSetName = stack_set_name) 152 | 153 | 154 | def get_stackset_ready_accounts(credentials, account_ids, quiet=True): 155 | """ 156 | Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role 157 | 158 | :param credentials: AWS credentials to use when calling sts:assumerole 159 | :param org_account_ids: List of AWS accounts to check for Stackset configuration 160 | 161 | :return: List of account IDs in which assuming the stackset execution role worked 162 | """ 163 | api_client = connect_service('sts', credentials, silent=True) 164 | configured_account_ids = [] 165 | for account_id in account_ids: 166 | try: 167 | role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id 168 | api_client.assume_role(RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts') 169 | configured_account_ids.append(account_id) 170 | except Exception as e: 171 | pass 172 | 173 | if len(configured_account_ids) != len(account_ids) and not quiet: 174 | printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids)) 175 | printDebug(str(configured_account_ids)) 176 | return configured_account_ids 177 | 178 | 179 | def make_awsrecipes_stack_name(template_path): 180 | """ 181 | 182 | :param template_path: 183 | :return: 184 | """ 185 | return make_prefixed_stack_name('AWSRecipes', template_path) 186 | 187 | 188 | def make_opinel_stack_name(template_path): 189 | """ 190 | 191 | :param template_path:" 192 | :return: 193 | """ 194 | return make_prefixed_stack_name('Opinel', template_path) 195 | 196 | 197 | def make_prefixed_stack_name(prefix, template_path): 198 | """ 199 | 200 | :param prefix: 201 | :param template_path: 202 | """ 203 | parts = os.path.basename(template_path).split('-') 204 | parts = parts if len(parts) == 1 else parts[:-1] 205 | return ('%s-%s' % (prefix, '-'.join(parts))).split('.')[0] 206 | 207 | 208 | def prepare_cloudformation_params(stack_name, template_path, template_parameters, resource_type, tags=[], need_on_failure=False): 209 | """ 210 | 211 | :param api_client: 212 | :param stack_name: 213 | :param template_path: 214 | :param template_parameters: List of parameter keys and values 215 | :param quiet: 216 | :return: 217 | """ 218 | printDebug('Reading CloudFormation template from %s' % template_path) 219 | template_body = read_file(template_path) 220 | params = {} 221 | params['%sName' % resource_type] = stack_name 222 | params['TemplateBody'] = template_body 223 | if len(template_parameters): 224 | params['Parameters'] = [] 225 | it = iter(template_parameters) 226 | for param in it: 227 | printError('Param:: %s' % param) 228 | params['Parameters'].append({'ParameterKey': param,'ParameterValue': next(it)}) 229 | 230 | if len(tags): 231 | params['Tags'] = tags 232 | if re_iam_capability.match(template_body): 233 | params['Capabilities'] = [ 234 | 'CAPABILITY_NAMED_IAM'] 235 | if need_on_failure: 236 | params['OnFailure'] = 'ROLLBACK' 237 | return params 238 | 239 | 240 | def update_stack(api_client, stack_name, template_path, template_parameters = [], quiet = False, wait_for_completion = False): 241 | """ 242 | 243 | :param api_client: 244 | :param stack_name: 245 | :param template_path: 246 | :param template_parameters: List of parameter keys and values 247 | :param quiet: 248 | :return: 249 | """ 250 | update_cloudformation_resource_from_template(api_client, 'stack', stack_name, template_path, template_parameters, quiet = quiet, wait_for_completion = wait_for_completion) 251 | 252 | 253 | def update_stack_set(api_client, stack_set_name, template_path, template_parameters=[], quiet=False, wait_for_completion = False): 254 | """ 255 | 256 | :param api_client: 257 | :param stack_set_name: 258 | :param template_path: 259 | :param template_parameters: 260 | :param quiet: 261 | :return: 262 | """ 263 | return update_cloudformation_resource_from_template(api_client, 'stack_set', stack_set_name, template_path, template_parameters, [], quiet, wait_for_completion) 264 | 265 | 266 | def update_cloudformation_resource_from_template(api_client, resource_type, name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion = False): 267 | """ 268 | 269 | :param callback: 270 | :param name: 271 | :param template_path: 272 | :param template_parameters: 273 | :param quiet: 274 | :return: 275 | """ 276 | try: 277 | update = getattr(api_client, 'update_%s' % resource_type) 278 | api_resource_type = snake_to_camel(resource_type) 279 | # Add a timestamps 280 | tags.append({'Key': 'OpinelTimestamp', 'Value': str(time.time())}) 281 | params = prepare_cloudformation_params(name, template_path, template_parameters, api_resource_type, tags) 282 | if not quiet: 283 | printInfo('Updating the %s...' % resource_type, newLine=False) 284 | response = update(**params) 285 | operation_id = response['OperationId'] if resource_type == 'stack_set' else None 286 | if wait_for_completion: 287 | cloudformation_wait(api_client, resource_type, name, operation_id) 288 | 289 | except Exception as e: 290 | if api_resource_type == 'Stack' and hasattr(e, 'response') and type(e.response == dict) and e.response['Error']['Code'] == 'ValidationError' and e.response['Error']['Message'] == 'No updates are to be performed.': 291 | printInfo(' Already up to date.') 292 | else: 293 | printException(e) 294 | printError(' Failed.') 295 | 296 | 297 | def wait_for_operation(api_client, stack_set_name, operation_id, timeout = 5 * 60, increment = 5): 298 | printDebug('Waiting for operation %s on stack set %s...' % (operation_id, stack_set_name)) 299 | timer = 0 300 | status = '' 301 | while True: 302 | if timer >= timeout: 303 | printError('Timed out.') 304 | break 305 | info = api_client.describe_stack_set_operation(StackSetName = stack_set_name, OperationId = operation_id) 306 | status = info['StackSetOperation']['Status'] 307 | if status not in ['RUNNING', 'STOPPING']: 308 | break 309 | printError('Operation status is \'%s\'... waiting %d seconds until next check...' % (status, increment)) 310 | time.sleep(increment) 311 | timer += increment 312 | return 'Operation %s is %s' % (operation_id, status) 313 | 314 | 315 | def wait_for_stack_set(api_client, stack_set_name, timeout = 60, increment = 5): 316 | printDebug('Waiting for stack set %s to be ready...' % stack_set_name) 317 | timer = 0 318 | while True: 319 | if timer >= timeout: 320 | printError('Timed out.') 321 | break 322 | printError('Checking the stack set\'s status...') 323 | time.sleep(increment) 324 | timer += increment 325 | info = api_client.describe_stack_set(StackSetName = stack_set_name) 326 | if info['StackSet']['Status'] == 'ACTIVE': 327 | break 328 | 329 | 330 | def still_running(callback, params, resource_type): 331 | rc = True 332 | response = callback(**params) 333 | if resource_type == 'stack': 334 | status = response['Stacks'][0]['StackStatus'] 335 | if status.endswith('_COMPLETE') or status.endswith('_FAILED'): 336 | rc = False 337 | elif resource_type == 'stack_set': 338 | status = response['StackSet']['Status'] 339 | if status == 'ACTIVE': 340 | rc = False 341 | elif resource_type == 'operation': 342 | status = response['StackSetOperation']['Status'] 343 | if status != 'RUNNING': 344 | rc = False 345 | return (rc, status) 346 | 347 | 348 | def cloudformation_wait(api_client, resource_type, resource_name, operation_id = None, timeout = 5 * 60, increment = 5): 349 | if resource_type == 'stack': 350 | callback = api_client.describe_stacks 351 | params = {'StackName': resource_name} 352 | elif resource_type == 'stack_set': 353 | params = {'StackSetName': resource_name} 354 | if operation_id: 355 | callback = api_client.describe_stack_set_operation 356 | params['OperationId'] = operation_id 357 | resource_type = 'operation' 358 | else: 359 | callback = api_client.describe_stack_set 360 | else: 361 | printError('Unknown resource type: %s' % resource_type) 362 | return 363 | timer = 0 364 | while True: 365 | if timer >= timeout: 366 | printError('Timed out.') 367 | break 368 | rc, status = still_running(callback, params, resource_type) 369 | if rc == False: 370 | printInfo('Status: %s' % status) 371 | break 372 | printInfo('Status: %s... waiting %d seconds until next check...' % (status, increment)) 373 | timer += increment 374 | time.sleep(increment) 375 | -------------------------------------------------------------------------------- /opinel/services/cloudtrail.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | def get_trails(api_client): 4 | return api_client.describe_trails()['trailList'] 5 | -------------------------------------------------------------------------------- /opinel/services/iam.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import re 4 | 5 | from opinel.utils.aws import handle_truncated_response 6 | from opinel.utils.credentials import generate_password 7 | from opinel.utils.console import printInfo, printError, printException 8 | 9 | 10 | 11 | def add_user_to_group(iam_client, user, group, quiet = False): 12 | """ 13 | Add an IAM user to an IAM group 14 | 15 | :param iam_client: 16 | :param group: 17 | :param user: 18 | :param user_info: 19 | :param dry_run: 20 | :return: 21 | """ 22 | if not quiet: 23 | printInfo('Adding user to group %s...' % group) 24 | iam_client.add_user_to_group(GroupName = group, UserName = user) 25 | 26 | 27 | def create_groups(iam_client, groups): 28 | """ 29 | Create a number of IAM group, silently handling exceptions when entity already exists 30 | . 31 | :param iam_client: AWS API client for IAM 32 | :param groups: Name of IAM groups to be created. 33 | 34 | :return: None 35 | """ 36 | groups_data = [] 37 | if type(groups) != list: 38 | groups = [ groups ] 39 | for group in groups: 40 | errors = [] 41 | try: 42 | printInfo('Creating group %s...' % group) 43 | iam_client.create_group(GroupName = group) 44 | except Exception as e: 45 | if e.response['Error']['Code'] != 'EntityAlreadyExists': 46 | printException(e) 47 | errors.append('iam:creategroup') 48 | groups_data.append({'groupname': group, 'errors': errors}) 49 | return groups_data 50 | 51 | 52 | def create_user(iam_client, user, groups = [], with_password= False, with_mfa = False, with_access_key = False, require_password_reset = True): 53 | """ 54 | 55 | :param iam_client: AWS API client for IAM 56 | :param user: Name of the user to create 57 | :param groups: Name of the IAM groups to add the user to 58 | :param with_password: Boolean indicating whether creation of a password should be done 59 | :param with_mfa: Boolean indicating whether creation of an MFA device should be done 60 | :param with_access_key: Boolean indicating whether creation of an API access key should be done 61 | :param require_password_reset: Boolean indicating whether users should reset their password after first login 62 | :return: 63 | """ 64 | user_data = {'username': user, 'errors': []} 65 | printInfo('Creating user %s...' % user) 66 | try: 67 | iam_client.create_user(UserName = user) 68 | except Exception as e: 69 | user_data['errors'].append('iam:createuser') 70 | return user_data 71 | # Add user to groups 72 | if type(groups) != list: 73 | groups = [ groups ] 74 | for group in groups: 75 | try: 76 | add_user_to_group(iam_client, user, group) 77 | except Exception as e: 78 | printException(e) 79 | user_data['errors'].append('iam:addusertogroup - %s' % group) 80 | # Generate password 81 | if with_password: 82 | try: 83 | printInfo('Creating a login profile...') 84 | user_data['password'] = generate_password() 85 | iam_client.create_login_profile(UserName = user, Password = user_data['password'] , PasswordResetRequired = require_password_reset) 86 | except Exception as e: 87 | printException(e) 88 | user_data['errors'].append('iam:createloginprofile') 89 | # Enable MFA 90 | if False and with_mfa: 91 | printInfo('Enabling MFA...') 92 | serial = '' 93 | mfa_code1 = '' 94 | mfa_code2 = '' 95 | # Create an MFA device, Display the QR Code, and activate the MFA device 96 | try: 97 | mfa_serial = False # enable_mfa(iam_client, user, '%s/qrcode.png' % user) 98 | except Exception as e: 99 | return 42 100 | # Request access key 101 | if with_access_key: 102 | try: 103 | printInfo('Creating an API access key...') 104 | access_key = iam_client.create_access_key(UserName=user)['AccessKey'] 105 | user_data['AccessKeyId'] = access_key['AccessKeyId'] 106 | user_data['SecretAccessKey'] = access_key['SecretAccessKey'] 107 | except Exception as e: 108 | printException(e) 109 | user_data['errors'].append('iam:createaccesskey') 110 | return user_data 111 | 112 | 113 | def delete_user(iam_client, user, mfa_serial = None, keep_user = False, terminated_groups = []): 114 | """ 115 | Delete IAM user 116 | 117 | :param iam_client: 118 | :param user: 119 | :param mfa_serial: 120 | :param keep_user: 121 | :param terminated_groups: 122 | :return: 123 | """ 124 | errors = [] 125 | printInfo('Deleting user %s...' % user) 126 | # Delete access keys 127 | try: 128 | aws_keys = get_access_keys(iam_client, user) 129 | for aws_key in aws_keys: 130 | try: 131 | printInfo('Deleting access key ID %s... ' % aws_key['AccessKeyId'], False) 132 | iam_client.delete_access_key(AccessKeyId = aws_key['AccessKeyId'], UserName = user) 133 | printInfo('Success') 134 | except Exception as e: 135 | printInfo('Failed') 136 | printException(e) 137 | errors.append(e.response['Error']['Code']) 138 | except Exception as e: 139 | printException(e) 140 | printError('Failed to get access keys for user %s.' % user) 141 | # Deactivate and delete MFA devices 142 | try: 143 | mfa_devices = iam_client.list_mfa_devices(UserName = user)['MFADevices'] 144 | for mfa_device in mfa_devices: 145 | serial = mfa_device['SerialNumber'] 146 | try: 147 | printInfo('Deactivating MFA device %s... ' % serial, False) 148 | iam_client.deactivate_mfa_device(SerialNumber = serial, UserName = user) 149 | printInfo('Success') 150 | except Exception as e: 151 | printInfo('Failed') 152 | printException(e) 153 | errors.append(e.response['Error']['Code']) 154 | delete_virtual_mfa_device(iam_client, serial) 155 | if mfa_serial: 156 | delete_virtual_mfa_device(iam_client, mfa_serial) 157 | except Exception as e: 158 | printException(e) 159 | printError('Faile to fetch/delete MFA device serial number for user %s.' % user) 160 | errors.append(e.response['Error']['Code']) 161 | # Remove IAM user from groups 162 | try: 163 | groups = iam_client.list_groups_for_user(UserName = user)['Groups'] 164 | for group in groups: 165 | try: 166 | printInfo('Removing from group %s... ' % group['GroupName'], False) 167 | iam_client.remove_user_from_group(GroupName = group['GroupName'], UserName = user) 168 | printInfo('Success') 169 | except Exception as e: 170 | printInfo('Failed') 171 | printException(e) 172 | errors.append(e.response['Error']['Code']) 173 | except Exception as e: 174 | printException(e) 175 | printError('Failed to fetch IAM groups for user %s.' % user) 176 | errors.append(e.response['Error']['Code']) 177 | # Delete login profile 178 | login_profile = [] 179 | try: 180 | login_profile = iam_client.get_login_profile(UserName = user)['LoginProfile'] 181 | except Exception as e: 182 | pass 183 | try: 184 | if len(login_profile): 185 | printInfo('Deleting login profile... ', False) 186 | iam_client.delete_login_profile(UserName = user) 187 | printInfo('Success') 188 | except Exception as e: 189 | printInfo('Failed') 190 | printException(e) 191 | errors.append(e.response['Error']['Code']) 192 | # Delete inline policies 193 | try: 194 | printInfo('Deleting inline policies... ', False) 195 | policies = iam_client.list_user_policies(UserName = user) 196 | for policy in policies['PolicyNames']: 197 | iam_client.delete_user_policy(UserName = user, PolicyName = policy) 198 | printInfo('Success') 199 | except Exception as e: 200 | printInfo('Failed') 201 | printException(e) 202 | errors.append(e.response['Error']['Code']) 203 | # Detach managed policies 204 | try: 205 | printInfo('Detaching managed policies... ', False) 206 | policies = iam_client.list_attached_user_policies(UserName = user) 207 | for policy in policies['AttachedPolicies']: 208 | iam_client.detach_user_policy(UserName = user, PolicyArn = policy['PolicyArn']) 209 | printInfo('Success') 210 | except Exception as e: 211 | printInfo('Failed') 212 | printException(e) 213 | errors.append(e.response['Error']['Code']) 214 | # Delete IAM user 215 | try: 216 | if not keep_user: 217 | iam_client.delete_user(UserName = user) 218 | printInfo('User %s deleted.' % user) 219 | else: 220 | for group in terminated_groups: 221 | add_user_to_group(iam_client, group, user) 222 | except Exception as e: 223 | printException(e) 224 | printError('Failed to delete user.') 225 | errors.append(e.response['Error']['Code']) 226 | pass 227 | return errors 228 | 229 | 230 | def delete_virtual_mfa_device(iam_client, mfa_serial): 231 | """ 232 | Delete a vritual MFA device given its serial number 233 | 234 | :param iam_client: 235 | :param mfa_serial: 236 | :return: 237 | """ 238 | try: 239 | printInfo('Deleting MFA device %s...' % mfa_serial) 240 | iam_client.delete_virtual_mfa_device(SerialNumber = mfa_serial) 241 | except Exception as e: 242 | printException(e) 243 | printError('Failed to delete MFA device %s' % mfa_serial) 244 | pass 245 | 246 | def get_access_keys(iam_client, user_name): 247 | """ 248 | 249 | :param iam_client: 250 | :param user_name: 251 | :return: 252 | """ 253 | keys = handle_truncated_response(iam_client.list_access_keys, {'UserName': user_name}, ['AccessKeyMetadata'])['AccessKeyMetadata'] 254 | return keys 255 | 256 | 257 | def init_group_category_regex(category_groups, category_regex_args): 258 | """ 259 | Initialize and compile regular expression for category groups 260 | 261 | :param category_regex_args: List of string regex 262 | 263 | :return: List of compiled regex 264 | """ 265 | category_regex = [] 266 | authorized_empty_regex = 1 267 | if len(category_regex_args) and len(category_groups) != len(category_regex_args): 268 | printError('Error: you must provide as many regex as category groups.') 269 | return None 270 | for regex in category_regex_args: 271 | if len(regex) < 1: 272 | if authorized_empty_regex > 0: 273 | category_regex.append(None) 274 | authorized_empty_regex -= 1 275 | else: 276 | printError('Error: you cannot have more than one empty regex to automatically assign groups to users.') 277 | return None 278 | else: 279 | category_regex.append(re.compile(regex)) 280 | return category_regex 281 | 282 | 283 | 284 | def show_access_keys(iam_client, user_name): 285 | """ 286 | 287 | :param iam_client: 288 | :param user_name: 289 | :return: 290 | """ 291 | keys = get_access_keys(iam_client, user_name) 292 | printInfo('User \'%s\' currently has %s access keys:' % (user_name, len(keys))) 293 | for key in keys: 294 | printInfo('\t%s (%s)' % (key['AccessKeyId'], key['Status'])) 295 | -------------------------------------------------------------------------------- /opinel/services/organizations.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.utils.aws import handle_truncated_response 4 | from opinel.utils.console import printDebug, printInfo 5 | 6 | 7 | def get_organization_account_ids(api_client, exceptions = [], quiet = True): 8 | 9 | # List all accounts in the organization 10 | org_accounts = get_organization_accounts(api_client, exceptions, quiet) 11 | return [ account['Id'] for account in org_accounts ] 12 | 13 | 14 | def get_organization_accounts(api_client, exceptions = [], quiet = True): 15 | 16 | # List all accounts in the organization 17 | org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] 18 | if not quiet: 19 | printInfo('Found %d accounts in the organization.' % len(org_accounts)) 20 | for account in org_accounts: 21 | printDebug(str(account)) 22 | if len(exceptions): 23 | filtered_accounts = [] 24 | for account in org_accounts: 25 | if account['Id'] not in exceptions: 26 | filtered_accounts.append(account) 27 | org_accounts = filtered_accounts 28 | return org_accounts 29 | 30 | 31 | def get_organizational_units(api_client): 32 | ous = [] 33 | roots = api_client.list_roots()['Roots'] 34 | return get_children_organizational_units(api_client, roots) 35 | 36 | 37 | def get_children_organizational_units(api_client, parents): 38 | ous = [] 39 | for parent in parents: 40 | children = handle_truncated_response(api_client.list_organizational_units_for_parent, {'ParentId': parent['Id']}, ['OrganizationalUnits'])['OrganizationalUnits'] 41 | if len(children): 42 | ous += get_children_organizational_units(api_client, children) 43 | else: 44 | ous.append(parent) 45 | return ous 46 | 47 | def list_accounts_for_parent(api_client, parent): 48 | return handle_truncated_response(api_client.list_accounts_for_parent, {'ParentId': parent['Id']}, ['Accounts'])['Accounts'] 49 | 50 | -------------------------------------------------------------------------------- /opinel/services/s3.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | def get_s3_bucket_location(s3_client, bucket_name): 5 | """ 6 | 7 | :param s3_client: 8 | :param bucket_name: 9 | :return: 10 | """ 11 | location = s3_client.get_bucket_location(Bucket = bucket_name) 12 | return location['LocationConstraint'] if location['LocationConstraint'] else 'us-east-1' 13 | -------------------------------------------------------------------------------- /opinel/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nccgroup/opinel/2d4f5b96e0a1f9cb0356629f4f87e4ed99ce2606/opinel/utils/__init__.py -------------------------------------------------------------------------------- /opinel/utils/aws.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import boto3 4 | from botocore.session import Session 5 | from collections import Counter 6 | import time 7 | 8 | from opinel.utils.console import printInfo, printException 9 | 10 | 11 | 12 | def build_region_list(service, chosen_regions = [], partition_name = 'aws'): 13 | """ 14 | Build the list of target region names 15 | 16 | :param service: 17 | :param chosen_regions: 18 | :param partition_name: 19 | 20 | :return: 21 | """ 22 | service = 'ec2containerservice' if service == 'ecs' else service # Of course things aren't that easy... 23 | # Get list of regions from botocore 24 | regions = Session().get_available_regions(service, partition_name = partition_name) 25 | if len(chosen_regions): 26 | return list((Counter(regions) & Counter(chosen_regions)).elements()) 27 | else: 28 | return regions 29 | 30 | 31 | def connect_service(service, credentials, region_name = None, config = None, silent = False): 32 | """ 33 | Instantiates an AWS API client 34 | 35 | :param service: 36 | :param credentials: 37 | :param region_name: 38 | :param config: 39 | :param silent: 40 | 41 | :return: 42 | """ 43 | api_client = None 44 | try: 45 | client_params = {} 46 | client_params['service_name'] = service.lower() 47 | session_params = {} 48 | session_params['aws_access_key_id'] = credentials['AccessKeyId'] 49 | session_params['aws_secret_access_key'] = credentials['SecretAccessKey'] 50 | session_params['aws_session_token'] = credentials['SessionToken'] 51 | if region_name: 52 | client_params['region_name'] = region_name 53 | session_params['region_name'] = region_name 54 | if config: 55 | client_params['config'] = config 56 | aws_session = boto3.session.Session(**session_params) 57 | if not silent: 58 | infoMessage = 'Connecting to AWS %s' % service 59 | if region_name: 60 | infoMessage = infoMessage + ' in %s' % region_name 61 | printInfo('%s...' % infoMessage) 62 | api_client = aws_session.client(**client_params) 63 | except Exception as e: 64 | printException(e) 65 | return api_client 66 | 67 | 68 | def get_name(src, dst, default_attribute): 69 | """ 70 | 71 | :param src: 72 | :param dst: 73 | :param default_attribute: 74 | 75 | :return: 76 | """ 77 | name_found = False 78 | if 'Tags' in src: 79 | for tag in src['Tags']: 80 | if tag['Key'] == 'Name' and tag['Value'] != '': 81 | dst['name'] = tag['Value'] 82 | name_found = True 83 | if not name_found: 84 | dst['name'] = src[default_attribute] 85 | return dst['name'] 86 | 87 | 88 | def get_caller_identity(credentials): 89 | api_client = connect_service('sts', credentials, silent = True) 90 | return api_client.get_caller_identity() 91 | 92 | 93 | def get_username(credentials): 94 | caller_identity = get_caller_identity(credentials) 95 | return caller_identity['Arn'].split('/')[-1] 96 | 97 | 98 | def get_aws_account_id(credentials): 99 | caller_identity = get_caller_identity(credentials) 100 | return caller_identity['Arn'].split(':')[4] 101 | 102 | 103 | def get_partition_name(credentials): 104 | caller_identity = get_caller_identity(credentials) 105 | return caller_identity['Arn'].split(':')[1] 106 | 107 | 108 | def handle_truncated_response(callback, params, entities): 109 | """ 110 | Handle truncated responses 111 | 112 | :param callback: 113 | :param params: 114 | :param entities: 115 | 116 | :return: 117 | """ 118 | results = {} 119 | for entity in entities: 120 | results[entity] = [] 121 | while True: 122 | try: 123 | marker_found = False 124 | response = callback(**params) 125 | for entity in entities: 126 | if entity in response: 127 | results[entity] = results[entity] + response[entity] 128 | for marker_name in ['NextToken', 'Marker', 'PaginationToken']: 129 | if marker_name in response and response[marker_name]: 130 | params[marker_name] = response[marker_name] 131 | marker_found = True 132 | if not marker_found: 133 | break 134 | except Exception as e: 135 | if is_throttled(e): 136 | time.sleep(1) 137 | else: 138 | raise e 139 | return results 140 | 141 | 142 | def is_throttled(e): 143 | """ 144 | Determines whether the exception is due to API throttling. 145 | 146 | :param e: Exception raised 147 | :return: True if it's a throttling exception else False 148 | """ 149 | return True if (hasattr(e, 'response') and 'Error' in e.response and e.response['Error']['Code'] in [ 'Throttling', 'RequestLimitExceeded', 'ThrottlingException', 'TooManyRequestsException' ]) else False 150 | -------------------------------------------------------------------------------- /opinel/utils/cli_parser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import argparse 4 | import json 5 | import os 6 | import sys 7 | import tempfile 8 | 9 | opinel_arg_dir = os.path.join(os.path.expanduser('~'), '.aws/opinel') 10 | 11 | class OpinelArgumentParser(object): 12 | """ 13 | """ 14 | 15 | def __init__(self, tool_name = ''): 16 | self.parser = argparse.ArgumentParser() 17 | self.default_args = read_default_args(tool_name) 18 | 19 | def add_argument(self, argument_name, help = None, dest = None, nargs = None, default = None, action = None, choices = None): 20 | 21 | # Built-in, common arguments 22 | if argument_name == 'debug': 23 | self.parser.add_argument('--debug', 24 | dest='debug', 25 | default=False, 26 | action='store_true', 27 | help='Print the stack trace when exception occurs' if not help else help) 28 | elif argument_name == 'dry-run': 29 | self.parser.add_argument('--dry-run', 30 | dest='dry_run', 31 | default=False, 32 | action='store_true', 33 | help='Executes read-only actions (check status, describe*, get*, list*...)' if not help else help) 34 | elif argument_name == 'profile': 35 | default = os.environ.get('AWS_PROFILE', 'default') 36 | default_origin = " (from AWS_PROFILE)." if 'AWS_PROFILE' in os.environ else "." 37 | self.parser.add_argument('--profile', 38 | dest='profile', 39 | default=[default], 40 | nargs='+', 41 | help='Name of the profile. Defaults to %(default)s' + default_origin if not help else help) 42 | elif argument_name == 'regions': 43 | self.parser.add_argument('--regions', 44 | dest='regions', 45 | default=[], 46 | nargs='+', 47 | help='Name of regions to run the tool in, defaults to all' if not help else help) 48 | elif argument_name == 'partition-name': 49 | self.parser.add_argument('--partition-name', 50 | dest='partition_name', 51 | default='aws', 52 | help='Switch out of the public AWS partition (e.g. US gov or China)') 53 | elif argument_name == 'vpc': 54 | self.parser.add_argument('--vpc', 55 | dest='vpc', 56 | default=[], 57 | nargs='+', 58 | help='Name of VPC to run the tool in, defaults to all' if not help else help) 59 | elif argument_name == 'force': 60 | self.parser.add_argument('--force', 61 | dest='force_write', 62 | default=False, 63 | action='store_true', 64 | help='Overwrite existing files' if not help else help) 65 | elif argument_name == 'ip-ranges': 66 | self.parser.add_argument('--ip-ranges', 67 | dest='ip_ranges', 68 | default=[], 69 | nargs='+', 70 | help='Config file(s) that contain your known IP ranges.' if not help else help) 71 | elif argument_name == 'ip-ranges-name-key': 72 | self.parser.add_argument('--ip-ranges-name-key', 73 | dest='ip_ranges_name_key', 74 | default='name', 75 | help='Name of the key containing the display name of a known CIDR.' if not help else help) 76 | elif argument_name == 'mfa-serial': 77 | self.parser.add_argument('--mfa-serial', 78 | dest='mfa_serial', 79 | default=None, 80 | help='ARN of the user\'s MFA device' if not help else help) 81 | elif argument_name == 'mfa-code': 82 | self.parser.add_argument('--mfa-code', 83 | dest='mfa_code', 84 | default=None, 85 | help='Six-digit code displayed on the MFA device.' if not help else help) 86 | elif argument_name == 'csv-credentials': 87 | self.parser.add_argument('--csv-credentials', 88 | dest='csv_credentials', 89 | default=None, 90 | help='Path to a CSV file containing the access key ID and secret key' if not help else help) 91 | elif argument_name == 'user-name': 92 | self.parser.add_argument('--user-name', 93 | dest='user_name', 94 | default=[], 95 | nargs='+', 96 | help='Name of the user.' if not help else help) 97 | elif argument_name == 'bucket-name': 98 | self.parser.add_argument('--bucket-name', 99 | dest='bucket_name', 100 | default=[None], 101 | help='Name of the s3 bucket.' if not help else help) 102 | elif argument_name == 'group-name': 103 | self.parser.add_argument('--group-name', 104 | dest='group_name', 105 | default=[], 106 | nargs='+', 107 | help='Name of the IAM group.' if not help else help) 108 | 109 | # Default 110 | elif help != None and default != None and (nargs != None or action != None): 111 | dest = argument_name.replace('-', '_') if not dest else dest 112 | if nargs: 113 | if not choices: 114 | self.parser.add_argument('--%s' % argument_name, 115 | dest = dest, 116 | default = self.default_args[dest] if dest in self.default_args else default, 117 | nargs = nargs, 118 | help = help) 119 | else: 120 | self.parser.add_argument('--%s' % argument_name, 121 | dest = dest, 122 | default = self.default_args[dest] if dest in self.default_args else default, 123 | nargs = nargs, 124 | choices = choices, 125 | help = help) 126 | elif action: 127 | self.parser.add_argument('--%s' % argument_name, 128 | dest = dest, 129 | default = self.default_args[dest] if dest in self.default_args else default, 130 | action = action, 131 | help = help) 132 | 133 | # Error 134 | else: 135 | raise Exception('Invalid parameter name %s' % argument_name) 136 | 137 | 138 | def parse_args(self): 139 | args = self.parser.parse_args() 140 | return args 141 | 142 | 143 | def read_default_args(tool_name): 144 | """ 145 | Read default argument values for a given tool 146 | 147 | :param tool_name: Name of the script to read the default arguments for 148 | :return: Dictionary of default arguments (shared + tool-specific) 149 | """ 150 | global opinel_arg_dir 151 | 152 | profile_name = 'default' 153 | # h4ck to have an early read of the profile name 154 | for i, arg in enumerate(sys.argv): 155 | if arg == '--profile' and len(sys.argv) >= i + 1: 156 | profile_name = sys.argv[i + 1] 157 | #if not os.path.isdir(opinel_arg_dir): 158 | # os.makedirs(opinel_arg_dir) 159 | if not os.path.isdir(opinel_arg_dir): 160 | try: 161 | os.makedirs(opinel_arg_dir) 162 | except: 163 | # Within AWS Lambda, home directories are not writable. This attempts to detect that... 164 | # ...and uses the /tmp folder, which *is* writable in AWS Lambda 165 | opinel_arg_dir = os.path.join(tempfile.gettempdir(), '.aws/opinel') 166 | if not os.path.isdir(opinel_arg_dir): 167 | os.makedirs(opinel_arg_dir) 168 | opinel_arg_file = os.path.join(opinel_arg_dir, '%s.json' % profile_name) 169 | default_args = {} 170 | if os.path.isfile(opinel_arg_file): 171 | with open(opinel_arg_file, 'rt') as f: 172 | all_args = json.load(f) 173 | for target in all_args: 174 | if tool_name.endswith(target): 175 | default_args.update(all_args[target]) 176 | for k in all_args['shared']: 177 | if k not in default_args: 178 | default_args[k] = all_args['shared'][k] 179 | return default_args 180 | -------------------------------------------------------------------------------- /opinel/utils/conditions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import datetime 4 | import dateutil.parser 5 | import json 6 | import netaddr 7 | import re 8 | 9 | from opinel.utils.console import printError 10 | 11 | from iampoliciesgonewild import get_actions_from_statement, _expand_wildcard_action 12 | 13 | 14 | 15 | def __prepare_age_test(a, b): 16 | if type(a) != list: 17 | printError('Error: olderThan requires a list such as [ N , \'days\' ] or [ M, \'hours\'].') 18 | raise Exception 19 | number = int(a[0]) 20 | unit = a[1] 21 | if unit not in ['days', 'hours', 'minutes', 'seconds']: 22 | printError('Error: only days, hours, minutes, and seconds are supported.') 23 | raise Exception 24 | if unit == 'hours': 25 | number *= 3600 26 | unit = 'seconds' 27 | elif unit == 'minutes': 28 | number *= 60 29 | unit = 'seconds' 30 | age = getattr((datetime.datetime.today() - dateutil.parser.parse(str(b)).replace(tzinfo=None)), unit) 31 | return age, number 32 | 33 | 34 | def pass_condition(b, test, a): 35 | """ 36 | Generic test function used by Scout2 / AWS recipes 37 | . 38 | :param b: Value to be tested against 39 | :param test: Name of the test case to run 40 | :param a: Value to be tested 41 | 42 | :return: True of condition is met, False otherwise 43 | """ 44 | 45 | # Return false by default 46 | result = False 47 | 48 | # Equality tests 49 | if test == 'equal': 50 | a = str(a) 51 | b = str(b) 52 | result = (a == b) 53 | elif test == 'notEqual': 54 | result = (not pass_condition(b, 'equal', a)) 55 | 56 | # More/Less tests 57 | elif test == 'lessThan': 58 | result = (int(b) < int(a)) 59 | elif test == 'lessOrEqual': 60 | result = (int(b) <= int(a)) 61 | elif test == 'moreThan': 62 | result = (int(b) > int(a)) 63 | elif test == 'moreOrEqual': 64 | result = (int(b) >= int(a)) 65 | 66 | # Empty tests 67 | elif test == 'empty': 68 | result = ((type(b) == dict and b == {}) or (type(b) == list and b == []) or (type(b) == list and b == [None])) 69 | elif test == 'notEmpty': 70 | result = (not pass_condition(b, 'empty', 'a')) 71 | elif test == 'null': 72 | result = ((b == None) or (type(b) == str and b == 'None')) 73 | elif test == 'notNull': 74 | result = (not pass_condition(b, 'null', a)) 75 | 76 | # Boolean tests 77 | elif test == 'true': 78 | result = (str(b).lower() == 'true') 79 | elif test == 'notTrue' or test == 'false': 80 | result = (str(b).lower() == 'false') 81 | 82 | # Object length tests 83 | elif test == 'lengthLessThan': 84 | result = (len(b) < int(a)) 85 | elif test == 'lengthMoreThan': 86 | result = (len(b) > int(a)) 87 | elif test == 'lengthEqual': 88 | result = (len(b) == int(a)) 89 | 90 | # Dictionary keys tests 91 | elif test == 'withKey': 92 | result = (a in b) 93 | elif test == 'withoutKey': 94 | result = (not a in b) 95 | 96 | # List tests 97 | elif test == 'containAtLeastOneOf': 98 | result = False 99 | if not type(b) == list: 100 | b = [ b ] 101 | if not type(a) == list: 102 | a = [ a ] 103 | for c in b: 104 | if type(c): 105 | c = str(c) 106 | if c in a: 107 | result = True 108 | break 109 | elif test == 'containAtLeastOneDifferentFrom': 110 | result = False 111 | if not type(b) == list: 112 | b = [ b ] 113 | if not type(a) == list: 114 | a = [ a ] 115 | for c in b: 116 | if c != None and c != '' and c not in a: 117 | result = True 118 | break 119 | elif test == 'containNoneOf': 120 | result = True 121 | if not type(b) == list: 122 | b = [ b ] 123 | if not type(a) == list: 124 | a = [ a ] 125 | for c in b: 126 | if c in a: 127 | result = False 128 | break 129 | 130 | # Regex tests 131 | elif test == 'match': 132 | if type(a) != list: 133 | a = [ a ] 134 | b = str(b) 135 | for c in a: 136 | if re.match(c, b) != None: 137 | result = True 138 | break 139 | elif test == 'notMatch': 140 | result = (not pass_condition(b, 'match', a)) 141 | 142 | # Date tests 143 | elif test == 'priorToDate': 144 | b = dateutil.parser.parse(str(b)).replace(tzinfo=None) 145 | a = dateutil.parser.parse(str(a)).replace(tzinfo=None) 146 | result = (b < a) 147 | elif test == 'olderThan': 148 | age, threshold = __prepare_age_test(a, b) 149 | result = (age > threshold) 150 | elif test == 'newerThan': 151 | age, threshold = __prepare_age_test(a, b) 152 | result = (age < threshold) 153 | 154 | # CIDR tests 155 | elif test == 'inSubnets': 156 | result = False 157 | grant = netaddr.IPNetwork(b) 158 | if type(a) != list: 159 | a = [ a ] 160 | for c in a: 161 | known_subnet = netaddr.IPNetwork(c) 162 | if grant in known_subnet: 163 | result = True 164 | break 165 | elif test == 'notInSubnets': 166 | result = (not pass_condition(b, 'inSubnets', a)) 167 | 168 | # Policy statement tests 169 | elif test == 'containAction': 170 | result = False 171 | if type(b) != dict: 172 | b = json.loads(b) 173 | statement_actions = get_actions_from_statement(b) 174 | rule_actions = _expand_wildcard_action(a) 175 | for action in rule_actions: 176 | if action.lower() in statement_actions: 177 | result = True 178 | break 179 | elif test == 'notContainAction': 180 | result = (not pass_condition(b, 'containAction', a)) 181 | elif test == 'containAtLeastOneAction': 182 | result = False 183 | if type(b) != dict: 184 | b = json.loads(b) 185 | if type(a) != list: 186 | a = [ a ] 187 | actions = get_actions_from_statement(b) 188 | for c in a: 189 | if c.lower() in actions: 190 | result = True 191 | break 192 | 193 | # Policy principal tests 194 | elif test == 'isCrossAccount': 195 | result = False 196 | if type(b) != list: 197 | b = [b] 198 | for c in b: 199 | if c != a and not re.match(r'arn:aws:iam:.*?:%s:.*' % a, c): 200 | result = True 201 | break 202 | elif test == 'isSameAccount': 203 | result = False 204 | if type(b) != list: 205 | b = [b] 206 | for c in b: 207 | if c == a or re.match(r'arn:aws:iam:.*?:%s:.*' % a, c): 208 | result = True 209 | break 210 | 211 | # Unknown test case 212 | else: 213 | printError('Error: unknown test case %s' % test) 214 | raise Exception 215 | 216 | return result 217 | -------------------------------------------------------------------------------- /opinel/utils/console.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import os 4 | import re 5 | import sys 6 | import traceback 7 | 8 | 9 | 10 | ######################################## 11 | # Globals 12 | ######################################## 13 | 14 | mfa_serial_format = r'^arn:aws:iam::\d+:mfa/[a-zA-Z0-9\+=,.@_-]+$' 15 | re_mfa_serial_format = re.compile(mfa_serial_format) 16 | 17 | 18 | 19 | ######################################## 20 | # Print configuration functions 21 | ######################################## 22 | 23 | def configPrintException(enable): 24 | """ 25 | Configure whether full stacktraces should be dumped in the console output 26 | 27 | :param enable: 28 | 29 | :return: 30 | """ 31 | global verbose_exceptions 32 | verbose_exceptions = enable 33 | 34 | 35 | 36 | ######################################## 37 | # Print functions 38 | ######################################## 39 | 40 | def printDebug(msg): 41 | if verbose_exceptions: 42 | printGeneric(sys.stderr, msg) 43 | 44 | 45 | def printError(msg, newLine = True): 46 | printGeneric(sys.stderr, msg, newLine) 47 | 48 | 49 | def printException(e, debug_only = False): 50 | global verbose_exceptions 51 | if verbose_exceptions: 52 | printError(str(traceback.format_exc())) 53 | elif not debug_only: 54 | printError(str(e)) 55 | 56 | 57 | def printGeneric(out, msg, newLine = True): 58 | out.write(msg) 59 | out.flush() 60 | if newLine == True: 61 | out.write('\n') 62 | 63 | 64 | def printInfo(msg, newLine = True ): 65 | printGeneric(sys.stdout, msg, newLine) 66 | 67 | 68 | 69 | ######################################## 70 | # Prompt functions 71 | ######################################## 72 | 73 | def prompt(test_input = None): 74 | """ 75 | Prompt function that works for Python2 and Python3 76 | 77 | :param test_input: Value to be returned when testing 78 | 79 | :return: Value typed by user (or passed in argument when testing) 80 | """ 81 | if test_input != None: 82 | if type(test_input) == list and len(test_input): 83 | choice = test_input.pop(0) 84 | elif type(test_input) == list: 85 | choice = '' 86 | else: 87 | choice = test_input 88 | else: 89 | # Coverage: 4 missed statements 90 | try: 91 | choice = raw_input() 92 | except: 93 | choice = input() 94 | return choice 95 | 96 | 97 | def prompt_4_mfa_code(activate = False, input = None): 98 | """ 99 | Prompt for an MFA code 100 | 101 | :param activate: Set to true when prompting for the 2nd code when activating a new MFA device 102 | :param input: Used for unit testing 103 | 104 | :return: The MFA code 105 | """ 106 | while True: 107 | if activate: 108 | prompt_string = 'Enter the next value: ' 109 | else: 110 | prompt_string = 'Enter your MFA code (or \'q\' to abort): ' 111 | mfa_code = prompt_4_value(prompt_string, no_confirm = True, input = input) 112 | try: 113 | if mfa_code == 'q': 114 | return mfa_code 115 | int(mfa_code) 116 | mfa_code[5] 117 | break 118 | except: 119 | printError('Error: your MFA code must only consist of digits and be at least 6 characters long.') 120 | return mfa_code 121 | 122 | 123 | def prompt_4_mfa_serial(input = None): 124 | """ 125 | Prompt for an MFA serial number 126 | 127 | :param input: Used for unit testing 128 | 129 | :return: The MFA serial number 130 | """ 131 | return prompt_4_value('Enter your MFA serial:', required = False, regex = re_mfa_serial_format, regex_format = mfa_serial_format, input = input) 132 | 133 | 134 | def prompt_4_overwrite(filename, force_write, input = None): 135 | """ 136 | Prompt whether the file should be overwritten 137 | 138 | :param filename: Name of the file about to be written 139 | :param force_write: Skip confirmation prompt if this flag is set 140 | :param input: Used for unit testing 141 | 142 | :return: Boolean whether file write operation is allowed 143 | """ 144 | if not os.path.exists(filename) or force_write: 145 | return True 146 | return prompt_4_yes_no('File \'{}\' already exists. Do you want to overwrite it'.format(filename), input = input) 147 | 148 | 149 | def prompt_4_value(question, choices = None, default = None, display_choices = True, display_indices = False, authorize_list = False, is_question = False, no_confirm = False, required = True, regex = None, regex_format = '', max_laps = 5, input = None, return_index = False): 150 | """ 151 | Prompt for a value 152 | . . 153 | :param question: Question to be asked 154 | :param choices: List of authorized answers 155 | :param default: Value suggested by default 156 | :param display_choices: Display accepted choices 157 | :param display_indices: Display the indice in the list next to the choice 158 | :param authorize_list: Set to true if a list of answers may be accepted 159 | :param is_question: Set to true to append a question mark 160 | :param no_confirm: Set to true to not prompt for a confirmation of the value 161 | :param required: Set to false if an empty answer is authorized 162 | :param regex: TODO 163 | :param regex_format TODO 164 | :param max_laps: Exit after N laps 165 | :param input: Used for unit testing 166 | 167 | :return: 168 | """ 169 | if choices and display_choices and not display_indices: 170 | question = question + ' (' + '/'.join(choices) + ')' 171 | lap_n = 0 172 | while True: 173 | if lap_n >= max_laps: 174 | printError('Automatically abording prompt loop after 5 failures') 175 | return None 176 | lap_n += 1 177 | can_return = False 178 | # Display the question, choices, and prompt for the answer 179 | if is_question: 180 | question = question + '? ' 181 | printError(question) 182 | if choices and display_indices: 183 | for c in choices: 184 | printError('%3d. %s' % (choices.index(c), c)) 185 | printError('Enter the number corresponding to your choice: ', False) 186 | choice = prompt(input) 187 | # Set the default value if empty choice 188 | if not choice or choice == '': 189 | if default: 190 | if no_confirm or prompt_4_yes_no('Use the default value (' + default + ')'): 191 | #return default 192 | choice = default 193 | can_return = True 194 | elif not required: 195 | can_return = True 196 | else: 197 | printError('Error: you cannot leave this parameter empty.') 198 | # Validate the value against a whitelist of choices 199 | elif choices: 200 | user_choices = [item.strip() for item in choice.split(',')] 201 | if not authorize_list and len(user_choices) > 1: 202 | printError('Error: multiple values are not supported; please enter a single value.') 203 | else: 204 | choice_valid = True 205 | if display_indices and int(choice) < len(choices): 206 | int_choice = choice 207 | choice = choices[int(choice)] 208 | else: 209 | for c in user_choices: 210 | if not c in choices: 211 | printError('Invalid value (%s).' % c) 212 | choice_valid = False 213 | break 214 | if choice_valid: 215 | can_return = True 216 | # Validate against a regex 217 | elif regex: 218 | if regex.match(choice): 219 | #return choice 220 | can_return = True 221 | else: 222 | printError('Error: expected format is: %s' % regex_format) 223 | else: 224 | # No automated validation, can attempt to return 225 | can_return = True 226 | if can_return: 227 | # Manually onfirm that the entered value is correct if needed 228 | if no_confirm or prompt_4_yes_no('You entered "' + choice + '". Is that correct', input=input): 229 | return int(int_choice) if return_index else choice 230 | 231 | 232 | def prompt_4_yes_no(question, input = None): 233 | """ 234 | Prompt for a yes/no or y/n answer 235 | . 236 | :param question: Question to be asked 237 | :param input: Used for unit testing 238 | 239 | :return: True for yes/y, False for no/n 240 | """ 241 | count = 0 242 | while True: 243 | printError(question + ' (y/n)? ') 244 | choice = prompt(input).lower() 245 | if choice == 'yes' or choice == 'y': 246 | return True 247 | elif choice == 'no' or choice == 'n': 248 | return False 249 | else: 250 | count += 1 251 | printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice) 252 | if count > 3: 253 | return None 254 | -------------------------------------------------------------------------------- /opinel/utils/credentials.py: -------------------------------------------------------------------------------- 1 | # Import future print 2 | from __future__ import print_function 3 | 4 | import boto3 5 | import datetime 6 | import dateutil.parser 7 | import json 8 | import fileinput 9 | import os 10 | import re 11 | import requests # TODO: get rid of that and make sure urllib2 validates certs ? 12 | import string 13 | 14 | from opinel.utils.console import printException, printError, printInfo 15 | from opinel.utils.console import prompt_4_mfa_code 16 | from opinel.utils.fs import save_blob_as_json 17 | from opinel.utils.aws import connect_service 18 | 19 | 20 | ######################################## 21 | # Globals 22 | ######################################## 23 | 24 | re_profile_name = re.compile(r'\[(.*)\]') 25 | re_access_key = re.compile(r'aws_access_key_id') 26 | re_secret_key = re.compile(r'aws_secret_access_key') 27 | re_role_arn = re.compile(r'role_arn') 28 | re_session_token = re.compile(r'aws_session_token') 29 | re_security_token = re.compile(r'aws_security_token') 30 | re_expiration = re.compile(r'expiration') 31 | re_source_profile = re.compile(r'source_profile') 32 | re_external_id = re.compile(r'aws_external_id') 33 | 34 | re_gov_region = re.compile(r'(.*?)-gov-(.*?)') 35 | re_cn_region = re.compile(r'^cn-(.*?)') 36 | 37 | re_port_range = re.compile(r'(\d+)\-(\d+)') 38 | re_single_port = re.compile(r'(\d+)') 39 | 40 | mfa_serial = r'(aws_mfa_serial|mfa_serial)' 41 | mfa_serial_format = r'arn:aws:iam::\d+:mfa/[a-zA-Z0-9\+=,.@_-]+' 42 | re_mfa_serial = re.compile(mfa_serial) 43 | re_mfa_serial_format = re.compile(mfa_serial_format) 44 | 45 | 46 | aws_config_dir = os.path.join(os.path.expanduser('~'), '.aws') 47 | aws_credentials_file = os.path.join(aws_config_dir, 'credentials') 48 | aws_credentials_file_tmp = os.path.join(aws_config_dir, 'credentials.tmp') 49 | aws_config_file = os.path.join(aws_config_dir, 'config') 50 | 51 | 52 | ######################################## 53 | # Credentials read/write functions 54 | ######################################## 55 | 56 | 57 | def assume_role(role_name, credentials, role_arn, role_session_name, silent = False): 58 | """ 59 | Assume role and save credentials 60 | 61 | :param role_name: 62 | :param credentials: 63 | :param role_arn: 64 | :param role_session_name: 65 | :param silent: 66 | :return: 67 | """ 68 | external_id = credentials.pop('ExternalId') if 'ExternalId' in credentials else None 69 | # Connect to STS 70 | sts_client = connect_service('sts', credentials, silent = silent) 71 | # Set required arguments for assume role call 72 | sts_args = { 73 | 'RoleArn': role_arn, 74 | 'RoleSessionName': role_session_name 75 | } 76 | # MFA used ? 77 | if 'mfa_serial' in credentials and 'mfa_code' in credentials: 78 | sts_args['TokenCode'] = credentials['mfa_code'] 79 | sts_args['SerialNumber'] = credentials['mfa_serial'] 80 | # External ID used ? 81 | if external_id: 82 | sts_args['ExternalId'] = external_id 83 | # Assume the role 84 | sts_response = sts_client.assume_role(**sts_args) 85 | credentials = sts_response['Credentials'] 86 | cached_credentials_filename = get_cached_credentials_filename(role_name, role_arn) 87 | #with open(cached_credentials_filename, 'wt+') as f: 88 | # write_data_to_file(f, sts_response, True, False) 89 | cached_credentials_path = os.path.dirname(cached_credentials_filename) 90 | if not os.path.isdir(cached_credentials_path): 91 | os.makedirs(cached_credentials_path) 92 | save_blob_as_json(cached_credentials_filename, sts_response, True, False) # blob, force_write, debug): 93 | return credentials 94 | 95 | 96 | def get_cached_credentials_filename(role_name, role_arn): 97 | """ 98 | Construct filepath for cached credentials (AWS CLI scheme) 99 | 100 | :param role_name: 101 | :param role_arn: 102 | :return: 103 | """ 104 | filename_p1 = role_name.replace('/','-') 105 | filename_p2 = role_arn.replace('/', '-').replace(':', '_') 106 | return os.path.join(os.path.join(os.path.expanduser('~'), '.aws'), 'cli/cache/%s--%s.json' % 107 | (filename_p1, filename_p2)) 108 | 109 | 110 | def get_profiles_from_aws_credentials_file(credentials_files = [aws_credentials_file, aws_config_file]): 111 | """ 112 | 113 | :param credentials_files: 114 | 115 | :return: 116 | """ 117 | profiles = [] 118 | for filename in credentials_files: 119 | if os.path.isfile(filename): 120 | with open(filename) as f: 121 | lines = f.readlines() 122 | for line in lines: 123 | groups = re_profile_name.match(line) 124 | if groups: 125 | profiles.append(groups.groups()[0]) 126 | return sorted(profiles) 127 | 128 | 129 | def generate_password(length=16): 130 | """ 131 | Generate a password using random characters from uppercase, lowercase, digits, and symbols 132 | 133 | :param length: Length of the password to be generated 134 | :return: The random password 135 | """ 136 | chars = string.ascii_letters + string.digits + '!@#$%^&*()_+-=[]{};:,<.>?|' 137 | modulus = len(chars) 138 | pchars = os.urandom(16) 139 | if type(pchars) == str: 140 | return ''.join(chars[i % modulus] for i in map(ord, pchars)) 141 | else: 142 | return ''.join(chars[i % modulus] for i in pchars) 143 | 144 | 145 | def init_creds(): 146 | """ 147 | Create a dictionary with all the necessary keys set to "None" 148 | 149 | :return: 150 | """ 151 | return { 'AccessKeyId': None, 'SecretAccessKey': None, 'SessionToken': None, 152 | 'Expiration': None, 'SerialNumber': None, 'TokenCode': None } 153 | 154 | 155 | def init_sts_session(profile_name, credentials, duration = 28800, session_name = None, save_creds = True): 156 | """ 157 | Fetch STS credentials 158 | 159 | :param profile_name: 160 | :param credentials: 161 | :param duration: 162 | :param session_name: 163 | :param save_creds: 164 | :return: 165 | """ 166 | # Set STS arguments 167 | sts_args = { 168 | 'DurationSeconds': duration 169 | } 170 | # Prompt for MFA code if MFA serial present 171 | if 'SerialNumber' in credentials and credentials['SerialNumber']: 172 | if not credentials['TokenCode']: 173 | credentials['TokenCode'] = prompt_4_mfa_code() 174 | if credentials['TokenCode'] == 'q': 175 | credentials['SerialNumber'] = None 176 | sts_args['TokenCode'] = credentials['TokenCode'] 177 | sts_args['SerialNumber'] = credentials['SerialNumber'] 178 | # Init session 179 | sts_client = boto3.session.Session(credentials['AccessKeyId'], credentials['SecretAccessKey']).client('sts') 180 | sts_response = sts_client.get_session_token(**sts_args) 181 | if save_creds: 182 | # Move long-lived credentials if needed 183 | if not profile_name.endswith('-nomfa') and credentials['AccessKeyId'].startswith('AKIA'): 184 | write_creds_to_aws_credentials_file(profile_name + '-nomfa', credentials) 185 | # Save STS values in the .aws/credentials file 186 | write_creds_to_aws_credentials_file(profile_name, sts_response['Credentials']) 187 | return sts_response['Credentials'] 188 | 189 | 190 | def read_creds_from_aws_credentials_file(profile_name, credentials_file = aws_credentials_file): 191 | """ 192 | Read credentials from AWS config file 193 | 194 | :param profile_name: 195 | :param credentials_file: 196 | :return: 197 | """ 198 | credentials = init_creds() 199 | profile_found = False 200 | try: 201 | # Make sure the ~.aws folder exists 202 | if not os.path.exists(aws_config_dir): 203 | os.makedirs(aws_config_dir) 204 | with open(credentials_file, 'rt') as cf: 205 | for line in cf: 206 | profile_line = re_profile_name.match(line) 207 | if profile_line: 208 | if profile_line.groups()[0] == profile_name: 209 | profile_found = True 210 | else: 211 | profile_found = False 212 | if profile_found: 213 | if re_access_key.match(line): 214 | credentials['AccessKeyId'] = line.split("=")[1].strip() 215 | elif re_secret_key.match(line): 216 | credentials['SecretAccessKey'] = line.split("=")[1].strip() 217 | elif re_mfa_serial.match(line): 218 | credentials['SerialNumber'] = (line.split('=')[1]).strip() 219 | elif re_session_token.match(line) or re_security_token.match(line): 220 | credentials['SessionToken'] = ('='.join(x for x in line.split('=')[1:])).strip() 221 | elif re_expiration.match(line): 222 | credentials['Expiration'] = ('='.join(x for x in line.split('=')[1:])).strip() 223 | except Exception as e: 224 | # Silent if error is due to no ~/.aws/credentials file 225 | if not hasattr(e, 'errno') or e.errno != 2: 226 | printException(e) 227 | return credentials 228 | 229 | 230 | def read_creds_from_csv(filename): 231 | """ 232 | Read credentials from a CSV file 233 | 234 | :param filename: 235 | :return: 236 | """ 237 | key_id = None 238 | secret = None 239 | mfa_serial = None 240 | secret_next = False 241 | with open(filename, 'rt') as csvfile: 242 | for i, line in enumerate(csvfile): 243 | values = line.split(',') 244 | for v in values: 245 | if v.startswith('AKIA'): 246 | key_id = v.strip() 247 | secret_next = True 248 | elif secret_next: 249 | secret = v.strip() 250 | secret_next = False 251 | elif re_mfa_serial_format.match(v): 252 | mfa_serial = v.strip() 253 | return key_id, secret, mfa_serial 254 | 255 | 256 | def read_creds_from_ec2_instance_metadata(): 257 | """ 258 | Read credentials from EC2 instance metadata (IAM role) 259 | 260 | :return: 261 | """ 262 | creds = init_creds() 263 | try: 264 | has_role = requests.get('http://169.254.169.254/latest/meta-data/iam/security-credentials', timeout = 1) 265 | if has_role.status_code == 200: 266 | iam_role = has_role.text 267 | credentials = requests.get('http://169.254.169.254/latest/meta-data/iam/security-credentials/%s/' % 268 | iam_role.strip()).json() 269 | for c in ['AccessKeyId', 'SecretAccessKey']: 270 | creds[c] = credentials[c] 271 | creds['SessionToken'] = credentials['Token'] 272 | return creds 273 | except Exception as e: 274 | return False 275 | 276 | 277 | def read_creds_from_ecs_container_metadata(): 278 | """ 279 | Read credentials from ECS instance metadata (IAM role) 280 | 281 | :return: 282 | """ 283 | creds = init_creds() 284 | try: 285 | ecs_metadata_relative_uri = os.environ['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] 286 | credentials = requests.get('http://169.254.170.2' + ecs_metadata_relative_uri, timeout = 1).json() 287 | for c in ['AccessKeyId', 'SecretAccessKey']: 288 | creds[c] = credentials[c] 289 | creds['SessionToken'] = credentials['Token'] 290 | return creds 291 | except Exception as e: 292 | return False 293 | 294 | 295 | def read_creds_from_environment_variables(): 296 | """ 297 | Read credentials from environment variables 298 | 299 | :return: 300 | """ 301 | creds = init_creds() 302 | # Check environment variables 303 | if 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ: 304 | creds['AccessKeyId'] = os.environ['AWS_ACCESS_KEY_ID'] 305 | creds['SecretAccessKey'] = os.environ['AWS_SECRET_ACCESS_KEY'] 306 | if 'AWS_SESSION_TOKEN' in os.environ: 307 | creds['SessionToken'] = os.environ['AWS_SESSION_TOKEN'] 308 | return creds 309 | 310 | 311 | def read_profile_from_environment_variables(): 312 | """ 313 | Read profiles from env 314 | 315 | :return: 316 | """ 317 | role_arn = os.environ.get('AWS_ROLE_ARN', None) 318 | external_id = os.environ.get('AWS_EXTERNAL_ID', None) 319 | return role_arn, external_id 320 | 321 | 322 | def read_profile_from_aws_config_file(profile_name, config_file = aws_config_file): 323 | """ 324 | Read profiles from AWS config file 325 | 326 | :param profile_name: 327 | :param config_file: 328 | :return: 329 | """ 330 | role_arn = None 331 | source_profile = 'default' 332 | mfa_serial = None 333 | profile_found = False 334 | external_id = None 335 | try: 336 | with open(config_file, 'rt') as config: 337 | for line in config: 338 | profile_line = re_profile_name.match(line) 339 | if profile_line: 340 | role_profile_name = profile_line.groups()[0].split()[-1] 341 | if role_profile_name == profile_name: 342 | profile_found = True 343 | else: 344 | profile_found = False 345 | if profile_found: 346 | if re_role_arn.match(line): 347 | role_arn = line.split('=')[1].strip() 348 | elif re_source_profile.match(line): 349 | source_profile = line.split('=')[1].strip() 350 | elif re_mfa_serial.match(line): 351 | mfa_serial = line.split('=')[1].strip() 352 | elif re_external_id.match(line): 353 | external_id = line.split('=')[1].strip() 354 | except Exception as e: 355 | # Silent if error is due to no .aws/config file 356 | if not hasattr(e, 'errno') or e.errno != 2: 357 | printException(e) 358 | return role_arn, source_profile, mfa_serial, external_id 359 | 360 | 361 | def show_profiles_from_aws_credentials_file(credentials_files = [aws_credentials_file, aws_config_file]): 362 | """ 363 | Show profile names from ~/.aws/credentials 364 | 365 | :param credentials_files: 366 | :return: 367 | """ 368 | profiles = get_profiles_from_aws_credentials_file(credentials_files) 369 | for profile in set(profiles): 370 | printInfo(' * %s' % profile) 371 | 372 | 373 | def write_creds_to_aws_credentials_file(profile_name, credentials, credentials_file = aws_credentials_file): 374 | """ 375 | Write credentials to AWS config file 376 | 377 | :param profile_name: 378 | :param credentials: 379 | :param credentials_file: 380 | :return: 381 | """ 382 | profile_found = False 383 | profile_ever_found = False 384 | session_token_written = False 385 | security_token_written = False 386 | mfa_serial_written = False 387 | expiration_written = False 388 | # Create the .aws folder if needed 389 | if not os.path.isdir(aws_config_dir): 390 | os.mkdir(aws_config_dir) 391 | # Create an empty file if target does not exist 392 | if not os.path.isfile(credentials_file): 393 | open(credentials_file, 'a').close() 394 | # Open and parse/edit file 395 | for line in fileinput.input(credentials_file, inplace=True): 396 | profile_line = re_profile_name.match(line) 397 | if profile_line: 398 | if profile_line.groups()[0] == profile_name: 399 | profile_found = True 400 | profile_ever_found = True 401 | else: 402 | profile_found = False 403 | print(line.rstrip()) 404 | elif profile_found: 405 | if re_access_key.match(line) and 'AccessKeyId' in credentials and credentials['AccessKeyId']: 406 | print('aws_access_key_id = %s' % credentials['AccessKeyId']) 407 | elif re_secret_key.match(line) and 'SecretAccessKey' in credentials and credentials['SecretAccessKey']: 408 | print('aws_secret_access_key = %s' % credentials['SecretAccessKey']) 409 | elif re_mfa_serial.match(line) and 'SerialNumber' in credentials and credentials['SerialNumber']: 410 | print('aws_mfa_serial = %s' % credentials['SerialNumber']) 411 | mfa_serial_written = True 412 | elif re_session_token.match(line) and 'SessionToken' in credentials and credentials['SessionToken']: 413 | print('aws_session_token = %s' % credentials['SessionToken']) 414 | session_token_written = True 415 | elif re_security_token.match(line) and 'SessionToken' in credentials and credentials['SessionToken']: 416 | print('aws_security_token = %s' % credentials['SessionToken']) 417 | security_token_written = True 418 | elif re_expiration.match(line) and 'Expiration' in credentials and credentials['Expiration']: 419 | print('expiration = %s' % credentials['Expiration']) 420 | expiration_written = True 421 | else: 422 | print(line.rstrip()) 423 | else: 424 | print(line.rstrip()) 425 | 426 | # Complete the profile if needed 427 | if profile_found: 428 | with open(credentials_file, 'a') as f: 429 | complete_profile(f, credentials, session_token_written, mfa_serial_written) 430 | 431 | # Add new profile if not found 432 | if not profile_ever_found: 433 | with open(credentials_file, 'a') as f: 434 | f.write('[%s]\n' % profile_name) 435 | f.write('aws_access_key_id = %s\n' % credentials['AccessKeyId']) 436 | f.write('aws_secret_access_key = %s\n' % credentials['SecretAccessKey']) 437 | complete_profile(f, credentials, session_token_written, mfa_serial_written) 438 | 439 | 440 | def complete_profile(f, credentials, session_token_written, mfa_serial_written): 441 | """ 442 | Append session token and mfa serial if needed 443 | 444 | :param f: 445 | :param credentials: 446 | :param session_token_written: 447 | :param mfa_serial_written: 448 | :return: 449 | """ 450 | session_token = credentials['SessionToken'] if 'SessionToken' in credentials else None 451 | mfa_serial = credentials['SerialNumber'] if 'SerialNumber' in credentials else None 452 | if session_token and not session_token_written: 453 | f.write('aws_session_token = %s\n' % session_token) 454 | if mfa_serial and not mfa_serial_written: 455 | f.write('aws_mfa_serial = %s\n' % mfa_serial) 456 | 457 | ######################################## 458 | # Main function 459 | ######################################## 460 | 461 | 462 | def read_creds(profile_name, csv_file = None, mfa_serial_arg = None, mfa_code = None, force_init = False, 463 | role_session_name = 'opinel'): 464 | """ 465 | Read credentials from anywhere (CSV, Environment, Instance metadata, config/credentials) 466 | 467 | :param profile_name: 468 | :param csv_file: 469 | :param mfa_serial_arg: 470 | :param mfa_code: 471 | :param force_init: 472 | :param role_session_name: 473 | 474 | :return: 475 | """ 476 | first_sts_session = False 477 | source_profile = None 478 | role_mfa_serial = None 479 | expiration = None 480 | credentials = init_creds() 481 | role_arn, external_id = read_profile_from_environment_variables() 482 | if csv_file: 483 | # Read credentials from a CSV file that was provided 484 | credentials['AccessKeyId'], credentials['SecretAccessKey'], credentials['SerialNumber'] = \ 485 | read_creds_from_csv(csv_file) 486 | elif profile_name == 'default': 487 | # Try reading credentials from environment variables (Issue #11) if the profile name is 'default' 488 | credentials = read_creds_from_environment_variables() 489 | if ('AccessKeyId' not in credentials or not credentials['AccessKeyId']) \ 490 | and not csv_file and profile_name == 'default': 491 | ec2_credentials = read_creds_from_ec2_instance_metadata() 492 | if ec2_credentials: 493 | credentials = ec2_credentials 494 | else: 495 | ecs_credentials = read_creds_from_ecs_container_metadata() 496 | if ecs_credentials: 497 | credentials = ecs_credentials 498 | # TODO support lambda 499 | if role_arn or (not credentials['AccessKeyId'] and not csv_file): 500 | # Lookup if a role is defined in ~/.aws/config 501 | if not role_arn: 502 | role_arn, source_profile, role_mfa_serial, external_id = read_profile_from_aws_config_file(profile_name) 503 | # Scout2 issue 237 - credentials file may be used to configure role-based profiles... 504 | if not role_arn: 505 | role_arn, source_profile, role_mfa_serial, external_id = \ 506 | read_profile_from_aws_config_file(profile_name, config_file = aws_credentials_file) 507 | if role_arn: 508 | # Lookup cached credentials 509 | try: 510 | cached_credentials_filename = get_cached_credentials_filename(profile_name, role_arn) 511 | with open(cached_credentials_filename, 'rt') as f: 512 | assume_role_data = json.load(f) 513 | oldcred = credentials 514 | credentials = assume_role_data['Credentials'] 515 | expiration = dateutil.parser.parse(credentials['Expiration']) 516 | expiration = expiration.replace(tzinfo=None) 517 | current = datetime.datetime.utcnow() 518 | if expiration < current: 519 | print('Role\'s credentials have expired on %s' % credentials['Expiration']) 520 | credentials = oldcred 521 | except Exception as e: 522 | pass 523 | if not expiration or expiration < current or credentials['AccessKeyId'] == None: 524 | if source_profile: 525 | credentials = read_creds(source_profile) 526 | if role_mfa_serial: 527 | credentials['SerialNumber'] = role_mfa_serial 528 | # Auto prompt for a code... 529 | if not mfa_code: 530 | credentials['TokenCode'] = prompt_4_mfa_code() 531 | if external_id: 532 | credentials['ExternalId'] = external_id 533 | credentials = assume_role(profile_name, credentials, role_arn, role_session_name) 534 | # Read from ~/.aws/credentials 535 | else: 536 | credentials = read_creds_from_aws_credentials_file(profile_name) 537 | if credentials['SessionToken']: 538 | if 'Expiration' in credentials and credentials['Expiration']: 539 | expiration = dateutil.parser.parse(credentials['Expiration']) 540 | expiration = expiration.replace(tzinfo=None) 541 | current = datetime.datetime.utcnow() 542 | if expiration < current: 543 | printInfo('Saved STS credentials expired on %s' % credentials['Expiration']) 544 | force_init = True 545 | else: 546 | force_init = True 547 | sts_credentials = credentials 548 | else: 549 | first_sts_session = True 550 | if force_init or (mfa_serial_arg and mfa_code): 551 | credentials = read_creds_from_aws_credentials_file(profile_name if first_sts_session 552 | else '%s-nomfa' % profile_name) 553 | if not credentials['AccessKeyId']: 554 | printInfo('Warning: Unable to determine STS token expiration; later API calls may fail.') 555 | credentials = sts_credentials 556 | else: 557 | if mfa_serial_arg: 558 | credentials['SerialNumber'] = mfa_serial_arg 559 | if mfa_code: 560 | credentials['TokenCode'] = mfa_code 561 | if 'AccessKeyId' in credentials and credentials['AccessKeyId']: 562 | credentials = init_sts_session(profile_name, credentials) 563 | # If we don't have valid creds by now, print an error message 564 | if 'AccessKeyId' not in credentials or credentials['AccessKeyId'] == None or \ 565 | 'SecretAccessKey' not in credentials or credentials['SecretAccessKey'] == None: 566 | printError('Error: could not find AWS credentials. Use the --help option for more information.') 567 | if not 'AccessKeyId' in credentials: 568 | credentials = { 'AccessKeyId': None } 569 | return credentials 570 | -------------------------------------------------------------------------------- /opinel/utils/fs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function 3 | 4 | import datetime 5 | import json 6 | import os 7 | import yaml 8 | 9 | from opinel.utils.console import printError, printException, prompt_4_overwrite 10 | from opinel.utils.conditions import pass_condition 11 | 12 | 13 | 14 | class CustomJSONEncoder(json.JSONEncoder): 15 | """ 16 | JSON encoder class 17 | """ 18 | def default(self, o): 19 | if type(o) == datetime.datetime: 20 | return str(o) 21 | else: 22 | return o.__dict__ 23 | 24 | 25 | def load_data(data_file, key_name = None, local_file = False, format = 'json'): 26 | """ 27 | Load a JSON data file 28 | 29 | :param data_file: 30 | :param key_name: 31 | :param local_file: 32 | :return: 33 | """ 34 | if local_file: 35 | if data_file.startswith('/'): 36 | src_file = data_file 37 | else: 38 | src_dir = os.getcwd() 39 | src_file = os.path.join(src_dir, data_file) 40 | else: 41 | src_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') 42 | if not os.path.isdir(src_dir): 43 | src_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data') 44 | src_file = os.path.join(src_dir, data_file) 45 | with open(src_file) as f: 46 | if format == 'json': 47 | data = json.load(f) 48 | elif format == 'yaml': 49 | data = yaml.load(f) 50 | elif format not in ['json', 'yaml'] and not key_name: 51 | data = f.read() 52 | else: 53 | printError('Error, argument \'key_name\' may not be used with data in %s format.' % format) 54 | return None 55 | if key_name: 56 | data = data[key_name] 57 | return data 58 | 59 | 60 | def read_ip_ranges(filename, local_file = True, ip_only = False, conditions = []): 61 | """ 62 | Returns the list of IP prefixes from an ip-ranges file 63 | 64 | :param filename: 65 | :param local_file: 66 | :param conditions: 67 | :param ip_only: 68 | :return: 69 | """ 70 | targets = [] 71 | data = load_data(filename, local_file = local_file) 72 | if 'source' in data: 73 | # Filtered IP ranges 74 | conditions = data['conditions'] 75 | local_file = data['local_file'] if 'local_file' in data else False 76 | data = load_data(data['source'], local_file = local_file, key_name = 'prefixes') 77 | else: 78 | # Plain IP ranges 79 | data = data['prefixes'] 80 | for d in data: 81 | condition_passed = True 82 | for condition in conditions: 83 | if type(condition) != list or len(condition) < 3: 84 | continue 85 | condition_passed = pass_condition(d[condition[0]], condition[1], condition[2]) 86 | if not condition_passed: 87 | break 88 | if condition_passed: 89 | targets.append(d) 90 | if ip_only: 91 | ips = [] 92 | for t in targets: 93 | ips.append(t['ip_prefix']) 94 | return ips 95 | else: 96 | return targets 97 | 98 | 99 | def read_file(file_path, mode = 'rt'): 100 | """ 101 | Read the contents of a file 102 | 103 | :param file_path: Path of the file to be read 104 | 105 | :return: Contents of the file 106 | """ 107 | contents = '' 108 | with open(file_path, mode) as f: 109 | contents = f.read() 110 | return contents 111 | 112 | 113 | def save_blob_as_json(filename, blob, force_write, debug): 114 | """ 115 | Creates/Modifies file and saves python object as JSON 116 | 117 | :param filename: 118 | :param blob: 119 | :param force_write: 120 | :param debug: 121 | 122 | :return: 123 | """ 124 | try: 125 | if prompt_4_overwrite(filename, force_write): 126 | with open(filename, 'wt') as f: 127 | print('%s' % json.dumps(blob, indent=4 if debug else None, separators=(',', ': '), sort_keys=True, cls=CustomJSONEncoder), file=f) 128 | except Exception as e: 129 | printException(e) 130 | pass 131 | 132 | 133 | def save_ip_ranges(profile_name, prefixes, force_write, debug, output_format = 'json'): 134 | """ 135 | Creates/Modifies an ip-range-XXX.json file 136 | 137 | :param profile_name: 138 | :param prefixes: 139 | :param force_write: 140 | :param debug: 141 | 142 | :return: 143 | """ 144 | filename = 'ip-ranges-%s.json' % profile_name 145 | ip_ranges = {} 146 | ip_ranges['createDate'] = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') 147 | # Unique prefixes 148 | unique_prefixes = {} 149 | for prefix in prefixes: 150 | if type(prefix) == dict: 151 | unique_prefixes[prefix['ip_prefix']] = prefix 152 | else: 153 | unique_prefixes[prefix] = {'ip_prefix': prefix} 154 | unique_prefixes = list(unique_prefixes.values()) 155 | ip_ranges['prefixes'] = unique_prefixes 156 | if output_format == 'json': 157 | save_blob_as_json(filename, ip_ranges, force_write, debug) 158 | else: 159 | # Write as CSV 160 | output = 'account_id, region, ip, instance_id, instance_name\n' 161 | for prefix in unique_prefixes: 162 | output += '%s, %s, %s, %s, %s\n' % (prefix['account_id'], prefix['region'], prefix['ip_prefix'], prefix['instance_id'], prefix['name']) 163 | with open('ip-ranges-%s.csv' % profile_name, 'wt') as f: 164 | f.write(output) 165 | -------------------------------------------------------------------------------- /opinel/utils/globals.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import boto3 4 | from distutils.version import StrictVersion 5 | import os 6 | import re 7 | 8 | from opinel import __version__ as OPINEL_VERSION 9 | from opinel.utils.console import printError 10 | 11 | 12 | ######################################## 13 | # Regex 14 | ######################################## 15 | 16 | re_opinel = re.compile(r'^opinel>=([0-9.]+),<([0-9.]+).*') 17 | re_boto3 = re.compile(r'^boto3>=([0-9.]+)(,<([0-9.]+).*)?') 18 | 19 | 20 | ######################################## 21 | # Functions 22 | ######################################## 23 | 24 | def check_requirements(script_path, requirements_file = None): 25 | """ 26 | Check versions of opinel and boto3 27 | :param script_path: 28 | :return: 29 | """ 30 | script_dir = os.path.dirname(script_path) 31 | opinel_min_version = opinel_max_version = boto3_min_version = boto3_max_version = None 32 | # Requirements file is either next to the script or in data/requirements 33 | if not requirements_file: 34 | requirements_file = os.path.join(script_dir, 'data/requirements.txt') 35 | if not os.path.isfile(requirements_file): 36 | requirements_file = os.path.join(script_dir, 'requirements.txt') 37 | with open(requirements_file, 'rt') as f: 38 | for requirement in f.readlines(): 39 | opinel_requirements = re_opinel.match(requirement) 40 | if opinel_requirements: 41 | opinel_requirements = opinel_requirements.groups() 42 | opinel_min_version = opinel_requirements[0] 43 | opinel_max_version = opinel_requirements[1] 44 | boto3_requirements = re_boto3.match(requirement) 45 | if boto3_requirements: 46 | boto3_requirements = boto3_requirements.groups() 47 | boto3_min_version = boto3_requirements[0] 48 | boto3_max_version = boto3_requirements[1] 49 | if not check_versions(opinel_min_version, OPINEL_VERSION, opinel_max_version, 'opinel'): 50 | return False 51 | if not check_versions(boto3_min_version, boto3.__version__, boto3_max_version, 'boto3'): 52 | return False 53 | return True 54 | 55 | 56 | def check_versions(min_version, installed_version, max_version, package_name, strict = False): 57 | """ 58 | 59 | :param min_version: 60 | :param installed_version: 61 | :param max_version: 62 | :param package_name: 63 | 64 | :return: 65 | """ 66 | if not min_version: 67 | # If no minimum version was specified, pass 68 | return True 69 | if StrictVersion(installed_version) < StrictVersion(min_version): 70 | printError('Error: the version of %s installed on this system (%s) is too old. ' 71 | 'You need at least version %s to run this tool.' % (package_name, OPINEL_VERSION, min_version)) 72 | return False 73 | if max_version and StrictVersion(installed_version) >= StrictVersion(max_version): 74 | printError('Warning: ther version of %s installed on this system (%s) is too recent; ' 75 | 'you may experience unexpected runtime errors as versions above %s have not been tested.' % 76 | (package_name, installed_version, max_version)) 77 | if strict: 78 | printError('Warning treated as error.') 79 | return False 80 | return True 81 | 82 | 83 | def manage_dictionary(dictionary, key, init, callback = None): 84 | """ 85 | 86 | :param dictionary: 87 | :param key: 88 | :param init: 89 | :param callback: 90 | 91 | :return: 92 | """ 93 | if not str(key) in dictionary: 94 | dictionary[str(key)] = init 95 | manage_dictionary(dictionary, key, init) 96 | if callback: 97 | callback(dictionary[key]) 98 | return dictionary 99 | 100 | 101 | def snake_to_camel(snake): 102 | return "".join(val.title() for val in snake.split('_')) 103 | 104 | def snake_to_words(snake, capitalize = False): 105 | return " ".join(val.title() if capitalize else val for val in snake.split('_')) 106 | -------------------------------------------------------------------------------- /opinel/utils/profiles.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import fileinput 4 | import os 5 | import re 6 | 7 | from opinel.utils.aws import get_aws_account_id 8 | from opinel.utils.console import printDebug 9 | from opinel.utils.credentials import read_creds 10 | 11 | aws_dir = os.path.join(os.path.expanduser('~'), '.aws') 12 | aws_credentials_file = os.path.join(aws_dir, 'credentials') 13 | aws_config_file = os.path.join(aws_dir, 'config') 14 | 15 | re_profile_name = re.compile(r'(\[(profile\s+)?(.*?)\])') 16 | 17 | class AWSProfile(object): 18 | 19 | def __init__(self, filename = None, raw_profile = None, name = None, credentials = None, account_id = None): 20 | self.filename = filename 21 | self.raw_profile = raw_profile 22 | self.name = name 23 | self.account_id = account_id 24 | self.attributes = {} 25 | if self.raw_profile: 26 | self.parse_raw_profile() 27 | 28 | 29 | def get_credentials(self): 30 | # For now, use the existing code... 31 | self.credentials = read_creds(self.name) 32 | try: 33 | self.account_id = get_aws_account_id(self.credentials) 34 | except: 35 | pass 36 | return self.credentials 37 | 38 | 39 | def set_attribute(self, attribute, value): 40 | self.attributes[attribute] = value 41 | 42 | 43 | def parse_raw_profile(self): 44 | for line in self.raw_profile.split('\n')[1:]: 45 | line = line.strip() 46 | if line: 47 | values = line.split('=') 48 | attribute = values[0].strip() 49 | value = ''.join(values[1:]).strip() 50 | self.attributes[attribute] = value 51 | 52 | 53 | def write(self): 54 | tmp = AWSProfiles.get(self.name, quiet = True) 55 | if not self.raw_profile: 56 | self.raw_profile = tmp[0].raw_profile if len(tmp) else None 57 | if not self.filename: 58 | self.filename = tmp[0].filename if len(tmp) else self.filename 59 | if not self.raw_profile: 60 | if 'role_arn' in self.attributes and 'source_profile' in self.attributes: 61 | self.filename = aws_config_file 62 | new_raw_profile = '\n[profile %s]' % self.name 63 | else: 64 | self.filename = aws_credentials_file 65 | new_raw_profile = '\n[%s]' % self.name 66 | for attribute in self.attributes: 67 | new_raw_profile += '\n%s=%s' % (attribute, self.attributes[attribute]) 68 | with open(self.filename, 'a') as f: 69 | f.write(new_raw_profile) 70 | else: 71 | new_raw_profile = '' 72 | for line in self.raw_profile.splitlines(): 73 | line_updated = False 74 | for attribute in self.attributes: 75 | if line.startswith(attribute): 76 | new_raw_profile += '%s=%s\n' % (attribute, self.attributes[attribute]) 77 | line_updated = True 78 | break 79 | if not line_updated: 80 | new_raw_profile += '%s\n' % line 81 | with open(self.filename, 'rt') as f: 82 | contents = f.read() 83 | contents = contents.replace(self.raw_profile, new_raw_profile) 84 | with open(self.filename, 'wt') as f: 85 | f.write(contents) 86 | 87 | 88 | 89 | class AWSProfiles(object): 90 | 91 | @staticmethod 92 | def list(names = []): 93 | """ 94 | @brief 95 | 96 | :return: List of all profile names found in .aws/config and .aws/credentials 97 | """ 98 | return [p.name for p in AWSProfiles.get(names)] 99 | 100 | 101 | @staticmethod 102 | def get(names = [], quiet = False): 103 | """ 104 | """ 105 | profiles = [] 106 | profiles += AWSProfiles.find_profiles_in_file(aws_credentials_file, names, quiet) 107 | profiles += AWSProfiles.find_profiles_in_file(aws_config_file, names, quiet) 108 | return profiles 109 | 110 | 111 | @staticmethod 112 | def find_profiles_in_file(filename, names = [], quiet = True): 113 | profiles = [] 114 | if type(names) != list: 115 | names = [ names ] 116 | if not quiet: 117 | printDebug('Searching for profiles matching %s in %s ... ' % (str(names), filename)) 118 | name_filters = [] 119 | for name in names: 120 | name_filters.append(re.compile('^%s$' % name)) 121 | if os.path.isfile(filename): 122 | with open(filename, 'rt') as f: 123 | aws_credentials = f.read() 124 | existing_profiles = re_profile_name.findall(aws_credentials) 125 | profile_count = len(existing_profiles) - 1 126 | for i, profile in enumerate(existing_profiles): 127 | matching_profile = False 128 | raw_profile = None 129 | for name_filter in name_filters: 130 | if name_filter.match(profile[2]): 131 | matching_profile = True 132 | i1 = aws_credentials.index(profile[0]) 133 | if i < profile_count: 134 | i2 = aws_credentials.index(existing_profiles[i+1][0]) 135 | raw_profile = aws_credentials[i1:i2] 136 | else: 137 | raw_profile = aws_credentials[i1:] 138 | if len(name_filters) == 0 or matching_profile: 139 | profiles.append(AWSProfile(filename = filename, raw_profile = raw_profile, name = profile[2])) 140 | return profiles 141 | 142 | -------------------------------------------------------------------------------- /opinel/utils/threads.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from threading import Thread 4 | try: 5 | # Python2 6 | from Queue import Queue 7 | except ImportError: 8 | # Python3 9 | from queue import Queue 10 | 11 | from opinel.utils.console import printException 12 | 13 | 14 | 15 | def thread_work(targets, function, params = {}, num_threads = 0): 16 | """ 17 | Generic multithreading helper 18 | 19 | :param targets: 20 | :param function: 21 | :param params: 22 | :param num_threads: 23 | 24 | :return: 25 | """ 26 | q = Queue(maxsize=0) 27 | if not num_threads: 28 | num_threads = len(targets) 29 | for i in range(num_threads): 30 | worker = Thread(target=function, args=(q, params)) 31 | worker.setDaemon(True) 32 | worker.start() 33 | for target in targets: 34 | q.put(target) 35 | q.join() 36 | 37 | 38 | def threaded_per_region(q, params): 39 | """ 40 | Helper for multithreading on a per-region basis 41 | 42 | :param q: 43 | :param params: 44 | 45 | :return: 46 | """ 47 | while True: 48 | try: 49 | params['region'] = q.get() 50 | method = params['method'] 51 | method(params) 52 | except Exception as e: 53 | printException(e) 54 | finally: 55 | q.task_done() 56 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opinel/data/requirements.txt -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # distutils/setuptools install script for opinel 4 | import os 5 | from setuptools import setup, find_packages 6 | 7 | # Package info 8 | NAME = 'opinel' 9 | ROOT = os.path.dirname(__file__) 10 | VERSION = __import__(NAME).__version__ 11 | 12 | # Requirements 13 | requirements = [] 14 | with open('requirements.txt') as f: 15 | for r in f.readlines(): 16 | requirements.append(r.strip()) 17 | 18 | # Setup 19 | setup( 20 | name=NAME, 21 | version=VERSION, 22 | description='Code shared between Scout2 and AWS-recipes.', 23 | long_description=open('README.rst').read(), 24 | author='l01cd3v', 25 | author_email='l01cd3v@gmail.com', 26 | url='https://github.com/nccgroup/opinel', 27 | packages=[ 28 | 'opinel', 'opinel.utils', 'opinel.services' 29 | ], 30 | package_data={ 31 | 'opinel': [ 32 | 'data/*.json', 33 | ] 34 | }, 35 | include_package_data=True, 36 | install_requires=requirements, 37 | license='GNU General Public License v2 (GPLv2)', 38 | classifiers=[ 39 | 'Development Status :: 4 - Beta', 40 | 'Intended Audience :: Developers', 41 | 'Intended Audience :: Information Technology', 42 | 'Intended Audience :: System Administrators', 43 | 'Natural Language :: English', 44 | 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 45 | 'Programming Language :: Python', 46 | 'Programming Language :: Python :: 2.7', 47 | 'Programming Language :: Python :: 3', 48 | 'Programming Language :: Python :: 3.3', 49 | 'Programming Language :: Python :: 3.4' 50 | ], 51 | ) 52 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nccgroup/opinel/2d4f5b96e0a1f9cb0356629f4f87e4ed99ce2606/tests/__init__.py -------------------------------------------------------------------------------- /tests/data/accessKeys1.csv: -------------------------------------------------------------------------------- 1 | Access key ID,Secret access key 2 | AKIAJJ5TE81PVO72WPTQ,67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123 3 | -------------------------------------------------------------------------------- /tests/data/accessKeys2.csv: -------------------------------------------------------------------------------- 1 | AKIAJJ5TE81PVO72WPTQ,67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123 2 | -------------------------------------------------------------------------------- /tests/data/accessKeys3.csv: -------------------------------------------------------------------------------- 1 | Access key ID,Secret access key,foo,bar,baz 2 | AKIAJJ5TE81PVO72WPTQ,67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123,,,,arn:aws:iam::123456789111:mfa/l01cd3v,,, 3 | -------------------------------------------------------------------------------- /tests/data/accessKeys4.csv: -------------------------------------------------------------------------------- 1 | Access key ID,Secret access key,foo,bar,baz 2 | AKIAJJ5TE81PVO72WPTQ,67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123,,,,arn:aws:iam::123456789111:mfa/l01cd3v 3 | -------------------------------------------------------------------------------- /tests/data/cloudformation-001.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 001", 4 | "Resources": { 5 | "OpinelTopic001": { 6 | "Type": "AWS::SNS::Topic", 7 | "Properties" : { 8 | "TopicName": "OpinelUnitTestTopic001" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/data/cloudformation-002.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 002", 4 | "Parameters": { 5 | "Param002": { 6 | "Type": "String", 7 | "Description": "A parameter", 8 | "MinLength": "1", 9 | "MaxLength": "100" 10 | } 11 | }, 12 | "Resources": { 13 | "OpinelTopic002": { 14 | "Type": "AWS::SNS::Topic", 15 | "Properties" : { 16 | "TopicName": "OpinelUnitTestTopic002" 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /tests/data/cloudformation-003.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 003", 4 | "Resources": { 5 | "OpinelTopic003": { 6 | "Type": "AWS::SNS::Topic", 7 | "Properties" : { 8 | "TopicName": "OpinelUnitTestTopic003" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/data/cloudformation-003bad.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 003", 4 | "Resources": { 5 | "OpinelTopic003": { 6 | "Type": "AWS::SNS::InvalidTopic", 7 | "Properties" : { 8 | "TopicName": "OpinelUnitTestTopic003" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/data/cloudformation-004.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 004", 4 | "Resources": { 5 | "OpinelTopic004": { 6 | "Type": "AWS::SNS::Topic", 7 | "Properties" : { 8 | "TopicName": "OpinelUnitTestTopic004" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/data/cloudformation-005.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 005", 4 | "Resources": { 5 | "OpinelTopic005": { 6 | "Type": "AWS::SNS::Topic", 7 | "Properties" : { 8 | "TopicName": "OpinelUnitTestTopic005" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/data/cloudformation-005bad.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Opinel unit test 005", 4 | "Resources": { 5 | "OpinelTopic005": { 6 | "Type": "AWS::SNS::InvalidTopic", 7 | "Properties" : { 8 | "TopicName": "OpinelUnitTestTopic005" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/data/config: -------------------------------------------------------------------------------- 1 | [profile l01cd3v-role1] 2 | role_arn = arn:aws:iam::123456789012:role/Role1 3 | source_profile = l01cd3v-1 4 | 5 | [profile l01cd3v-role2] 6 | role_arn = arn:aws:iam::123456789012:role/Role2 7 | source_profile = l01cd3v-2 8 | aws_mfa_serial = arn:aws:iam::123456789222:mfa/l01cd3v 9 | 10 | [profile l01cd3v-role3] 11 | role_arn = arn:aws:iam::123456789012:role/Role3 12 | source_profile = l01cd3v-2 13 | mfa_serial = arn:aws:iam::123456789333:mfa/l01cd3v 14 | aws_external_id = external-id-for-role3 15 | 16 | [profile l01cd3v-role4] 17 | role_arn = arn:aws:iam::123456789012:role/Role4 18 | 19 | [profile scout2fortravis] 20 | role_arn = arn:aws:iam::179374595322:role/Scout2 21 | source_profile = travislike 22 | -------------------------------------------------------------------------------- /tests/data/credentials: -------------------------------------------------------------------------------- 1 | [l01cd3v-1] 2 | aws_access_key_id = AKIAXXXXXXXXXXXXXXX1 3 | aws_secret_access_key = deadbeefdeadbeefdeadbeefdeadbeef11111111 4 | aws_mfa_serial = arn:aws:iam::123456789111:mfa/l01cd3v 5 | [l01cd3v-2] 6 | aws_access_key_id=AKIAXXXXXXXXXXXXXXX2 7 | aws_secret_access_key=deadbeefdeadbeefdeadbeefdeadbeef22222222 8 | [l01cd3v-3] 9 | aws_access_key_id = ASIAXXXXXXXXXXXXXXX3 10 | aws_secret_access_key = deadbeefdeadbeefdeadbeefdeadbeef33333333 11 | aws_session_token = deadbeef333//////////ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==+ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg== 12 | [l01cd3v-4] 13 | aws_access_key_id=ASIAXXXXXXXXXXXXXXX4 14 | aws_secret_access_key=deadbeefdeadbeefdeadbeefdeadbeef44444444 15 | aws_security_token=deadbeef444//////////ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==+ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg== 16 | expiration=2017-11-10 04:08:21+00:00 17 | [scout2fortraviswithexternalid] 18 | aws_region_name = us-west-2 19 | role_arn = arn:aws:iam::179374595322:role/Scout2WithExternalId 20 | aws_external_id = external-id-for-scout2 21 | [testprofile] 22 | aws_access_key_id = l01cd3v 23 | aws_secret_access_key = l01cd3v 24 | aws_session_token = l01cd3v 25 | aws_security_token = l01cd3v 26 | expiration = l01cd3v 27 | randomline = l01cd3v 28 | -------------------------------------------------------------------------------- /tests/data/credentials1.csv: -------------------------------------------------------------------------------- 1 | Access key ID,Secret access key 2 | AKIAAAAAAAAAAAAAAAAA,dddddddddddddddddddddddddddddddddddddddd 3 | -------------------------------------------------------------------------------- /tests/data/credentials2.csv: -------------------------------------------------------------------------------- 1 | Access key ID,Secret access key,MFA Serial Number 2 | AKIAAAAAAAAAAAAAAAAA,dddddddddddddddddddddddddddddddddddddddd,arn:aws:iam::123456789012:mfa/username 3 | -------------------------------------------------------------------------------- /tests/data/default_args.json: -------------------------------------------------------------------------------- 1 | { 2 | "awsrecipes_sort_iam_users.py": { 3 | "common_groups": [ "SomethingDifferent" ] 4 | }, 5 | "awsrecipes_create_iam_user.py": { 6 | "force_common_group": "True" 7 | }, 8 | "shared": { 9 | "common_groups": [ "AllUsers" ], 10 | "category_groups": [ "AllHumanUsers", "AllHeadlessUsers", "AllMisconfiguredUsers" ], 11 | "category_regex": [ "", "Headless-.*", "MisconfiguredUser-.*" ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tests/data/ip-ranges-1.json: -------------------------------------------------------------------------------- 1 | {"createDate": "2015-10-01-19-05-51","prefixes": [{"field_a": "a1","field_b": "b1","ip_prefix": "1.2.3.4"},{"field_a": "a2","field_b": "b2","ip_prefix": "5.6.7.8"}]} 2 | -------------------------------------------------------------------------------- /tests/data/ip-ranges-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "syncToken": "1498515131", 3 | "createDate": "2017-06-26-22-12-11", 4 | "prefixes": [ 5 | { 6 | "ip_prefix": "13.32.0.0/15", 7 | "region": "GLOBAL", 8 | "service": "AMAZON" 9 | }, 10 | { 11 | "ip_prefix": "13.52.0.0/15", 12 | "region": "ca-central-1", 13 | "service": "AMAZON" 14 | }, 15 | { 16 | "ip_prefix": "52.92.252.0/22", 17 | "region": "us-gov-west-1", 18 | "service": "AMAZON" 19 | }, 20 | { 21 | "ip_prefix": "177.71.207.128/26", 22 | "region": "sa-east-1", 23 | "service": "ROUTE53_HEALTHCHECKS" 24 | }, 25 | { 26 | "ip_prefix": "52.92.48.0/22", 27 | "region": "us-west-1", 28 | "service": "S3" 29 | }, 30 | { 31 | "ip_prefix": "52.92.52.0/22", 32 | "region": "ap-southeast-2", 33 | "service": "S3" 34 | }, 35 | { 36 | "ip_prefix": "13.56.0.0/16", 37 | "region": "us-west-1", 38 | "service": "EC2" 39 | }, 40 | { 41 | "ip_prefix": "13.57.0.0/16", 42 | "region": "us-west-1", 43 | "service": "EC2" 44 | }, 45 | { 46 | "ip_prefix": "13.58.0.0/15", 47 | "region": "us-east-2", 48 | "service": "EC2" 49 | }, 50 | { 51 | "ip_prefix": "13.112.0.0/14", 52 | "region": "ap-northeast-1", 53 | "service": "EC2" 54 | }, 55 | { 56 | "ip_prefix": "13.124.0.0/16", 57 | "region": "ap-northeast-2", 58 | "service": "EC2" 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /tests/data/ip-ranges-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "source": "tests/data/ip-ranges-2.json", 3 | "local_file": "True", 4 | "conditions": [ "and", 5 | [ "region", "match", [ "us-.*" ] ] 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /tests/data/ip-ranges-4.json: -------------------------------------------------------------------------------- 1 | { 2 | "source": "tests/data/ip-ranges-2.json", 3 | "local_file": "True", 4 | "conditions": [ "and", 5 | [ "service", "equal", "EC2" ] 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /tests/data/policy1.json: -------------------------------------------------------------------------------- 1 | { 2 | "Statement": [ 3 | { 4 | "Action": [ 5 | "cloudwatch:Describe*", 6 | "iam:GetUser", 7 | "iam:List*", 8 | "s3:GetBucket*", 9 | "s3:GetObjectAcl", 10 | "s3:ListAllMyBuckets", 11 | "sqs:ListQueues" 12 | ], 13 | "Effect": "Allow", 14 | "Resource": [ 15 | "*" 16 | ] 17 | }, 18 | { 19 | "Action": [ 20 | "iam:*" 21 | ], 22 | "Effect": "Allow", 23 | "Resource": [ 24 | "*" 25 | ] 26 | }, 27 | { 28 | "Action": "*", 29 | "Effect": "Allow", 30 | "Resource": "*" 31 | } 32 | 33 | ], 34 | "Version": "2012-10-17" 35 | } 36 | -------------------------------------------------------------------------------- /tests/data/policy2.json: -------------------------------------------------------------------------------- 1 | { 2 | "Statement": [ 3 | { 4 | "Action": [ 5 | "sts:AssumeRole" 6 | ], 7 | "Effect": "Allow", 8 | "Principal": { 9 | "AWS": "arn:aws:iam::179374595322:root" 10 | } 11 | } 12 | ] , 13 | "Version": "2012-10-17" 14 | } 15 | -------------------------------------------------------------------------------- /tests/data/protocols.json: -------------------------------------------------------------------------------- 1 | { 2 | "protocols": { 3 | "-2": "TEST" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /tests/data/protocols.txt: -------------------------------------------------------------------------------- 1 | some text here 2 | -------------------------------------------------------------------------------- /tests/data/protocols.yaml: -------------------------------------------------------------------------------- 1 | protocols: 2 | "-2": TEST 3 | -------------------------------------------------------------------------------- /tests/data/requirements1.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.4.4 2 | opinel>=0.0.1,<42.0.0 3 | -------------------------------------------------------------------------------- /tests/data/requirements2.txt: -------------------------------------------------------------------------------- 1 | boto3>=42.0.0 2 | opinel>=3.0.1,<4.0.0 3 | -------------------------------------------------------------------------------- /tests/data/requirements3.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.4.4 2 | opinel>=42.0.1,<4.0.0 3 | -------------------------------------------------------------------------------- /tests/data/statement1.json: -------------------------------------------------------------------------------- 1 | { 2 | "Action": [ 3 | "cloudwatch:Describe*", 4 | "iam:GetUser", 5 | "iam:List*", 6 | "s3:GetBucket*", 7 | "s3:GetObjectAcl", 8 | "s3:ListAllMyBuckets", 9 | "sqs:ListQueues" 10 | ], 11 | "Effect": "Allow", 12 | "Resource": [ 13 | "*" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tests/load_data.py: -------------------------------------------------------------------------------- 1 | 2 | # Import opinel load_data 3 | from opinel.load_data import * 4 | 5 | # 6 | # Test methods from load_data.py 7 | # 8 | class TestLoadDataClass: 9 | 10 | # 11 | # Unit tests for load_data() 12 | # 13 | def test_load_data(self): 14 | test = load_data('protocols.json', 'protocols') 15 | assert type(test) == dict 16 | assert test['1'] == 'ICMP' 17 | test = load_data('tests/data/protocols.json', 'protocols', True) 18 | assert type(test) == dict 19 | assert test['-2'] == 'TEST' 20 | # TODO : add test case without key name (both local and not local) 21 | test = load_data('protocols.json') 22 | assert type(test) == dict 23 | assert 'protocols' in test 24 | assert test['protocols']['1'] == 'ICMP' 25 | test = load_data('tests/data/protocols.json', local_file = True) 26 | assert type(test) == dict 27 | assert 'protocols' in test 28 | assert test['protocols']['-2'] == 'TEST' 29 | 30 | -------------------------------------------------------------------------------- /tests/local.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import re 4 | import subprocess 5 | 6 | with open('.travis.yml', 'rt') as f: 7 | for line in f.readlines(): 8 | match = re.match(r'(.*?)(nosetest.*)', line) 9 | if match: 10 | command = match.groups()[-1].split() 11 | print(command) 12 | subprocess.check_call(command) 13 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | ../opinel/data/requirements.txt -------------------------------------------------------------------------------- /tests/results/read_ip_ranges/ip-ranges-1a.json: -------------------------------------------------------------------------------- 1 | [{"ip_prefix": "1.2.3.4", "field_b": "b1", "field_a": "a1"}, {"ip_prefix": "5.6.7.8", "field_b": "b2", "field_a": "a2"}] 2 | -------------------------------------------------------------------------------- /tests/results/read_ip_ranges/ip-ranges-1b.json: -------------------------------------------------------------------------------- 1 | ["1.2.3.4", "5.6.7.8"] 2 | -------------------------------------------------------------------------------- /tests/results/read_ip_ranges/ip-ranges-1c.json: -------------------------------------------------------------------------------- 1 | ["1.2.3.4"] 2 | -------------------------------------------------------------------------------- /tests/results/read_ip_ranges/ip-ranges-3.json: -------------------------------------------------------------------------------- 1 | ["52.92.252.0/22", "52.92.48.0/22", "13.56.0.0/16", "13.57.0.0/16", "13.58.0.0/15"] 2 | -------------------------------------------------------------------------------- /tests/results/read_ip_ranges/ip-ranges-4.json: -------------------------------------------------------------------------------- 1 | ["13.56.0.0/16", "13.57.0.0/16", "13.58.0.0/15", "13.112.0.0/14", "13.124.0.0/16"] 2 | -------------------------------------------------------------------------------- /tests/results/read_ip_ranges/ip-ranges-a.json: -------------------------------------------------------------------------------- 1 | [{"region": "us-east-1", "ip_prefix": "23.20.0.0/14", "service": "AMAZON"}, {"region": "us-east-1", "ip_prefix": "23.20.0.0/14", "service": "EC2"}] 2 | 3 | -------------------------------------------------------------------------------- /tests/test-services-cloudformation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import binascii 4 | import os 5 | import sys 6 | import time 7 | 8 | from opinel.services.cloudformation import * 9 | from opinel.utils.aws import connect_service, get_aws_account_id 10 | from opinel.utils.console import configPrintException 11 | from opinel.utils.credentials import read_creds, read_creds_from_environment_variables 12 | 13 | 14 | class TestOpinelServicesCloudformation: 15 | 16 | def setup(self): 17 | configPrintException(True) 18 | self.creds = read_creds_from_environment_variables() 19 | if self.creds['AccessKeyId'] == None: 20 | self.creds = read_creds('travislike') 21 | self.api_client = connect_service('cloudformation', self.creds, 'us-east-1') 22 | self.python = re.sub(r'\W+', '', sys.version) 23 | self.cleanup = {'stacks': [], 'stacksets': []} 24 | 25 | 26 | def make_travisname(self, testname): 27 | return '%s-%s-%s' % (testname, binascii.b2a_hex(os.urandom(4)).decode('utf-8'), self.python) 28 | 29 | 30 | def test_create_cloudformation_resource_from_template(self): 31 | # Tested by other functions... 32 | pass 33 | 34 | def test_create_stack(self): 35 | stack_name = self.make_travisname('OpinelUnitTestStack001') 36 | create_stack(self.api_client, stack_name, 'tests/data/cloudformation-001.json') 37 | self.cleanup['stacks'].append(stack_name) 38 | try: 39 | tags = [ {'Key': 'Opinel', 'Value': 'Opinel'} ] 40 | create_stack(self.api_client, stack_name, 'tests/data/cloudformation-001.json', tags = tags) 41 | except: 42 | pass 43 | stack_name = self.make_travisname('OpinelUnitTestStack002') 44 | params = [ 'Param002', 'l01cd3v' ] 45 | create_stack(self.api_client, stack_name, 'tests/data/cloudformation-002.json', params) 46 | self.cleanup['stacks'].append(stack_name) 47 | 48 | 49 | def test_create_or_update_stack(self): 50 | stack_name = self.make_travisname('OpinelUnitTestStack003') 51 | create_or_update_stack(self.api_client, stack_name, 'tests/data/cloudformation-003.json', wait_for_completion = True) 52 | printError('Ready for update !') 53 | self.cleanup['stacks'].append(stack_name) 54 | create_or_update_stack(self.api_client, stack_name, 'tests/data/cloudformation-003.json', wait_for_completion = True) 55 | # Trigger exception 56 | try: 57 | create_or_update_stack(self.api_client. stack_name, 'tests/data/cloudformation-003bad.json') 58 | except: 59 | pass 60 | 61 | 62 | def test_create_stack_instances(self): 63 | stack_set_name = self.make_travisname('OpinelUnitTestStackSet002') 64 | create_stack_set(self.api_client, stack_set_name, 'tests/data/cloudformation-004.json', wait_for_completion = True) 65 | operation_id = create_stack_instances(self.api_client, stack_set_name, [ get_aws_account_id(self.creds) ], [ 'us-east-1' ]) 66 | wait_for_operation(self.api_client, stack_set_name, operation_id) 67 | self.cleanup['stacksets'].append(stack_set_name) 68 | 69 | 70 | def test_create_stack_set(self): 71 | stack_set_name = self.make_travisname('OpinelUnitTestStackSet001') 72 | create_stack_set(self.api_client, stack_set_name, 'tests/data/cloudformation-004.json', wait_for_completion = True) 73 | self.cleanup['stacksets'].append(stack_set_name) 74 | 75 | 76 | def test_delete_stackset(self): 77 | # Tested in teardown() 78 | pass 79 | 80 | 81 | def test_get_stackset_ready_accounts(self): 82 | accounts_ready = [ get_aws_account_id(self.creds) ] 83 | test1 = get_stackset_ready_accounts(self.creds, accounts_ready) 84 | assert (test1 == accounts_ready) 85 | test2 = get_stackset_ready_accounts(self.creds, accounts_ready + [ '123456789012' ]) 86 | assert (test2 == accounts_ready) 87 | 88 | 89 | def test_make_awsrecipes_stack_name(self): 90 | assert (make_awsrecipes_stack_name('/home/l01cd3v/test.json') == 'AWSRecipes-test') 91 | 92 | 93 | def test_make_opinel_stack_name(self): 94 | assert (make_opinel_stack_name('/home/l01cd3v/test.json') == 'Opinel-test') 95 | 96 | 97 | def test_make_prefixed_stack_name(self): 98 | assert (make_prefixed_stack_name('test', '/home/l01cd3v/test.json') == 'test-test') 99 | assert (make_prefixed_stack_name('test', 'test') == 'test-test') 100 | 101 | 102 | def test_prepare_cloudformation_params(self): 103 | #prepare_cloudformation_params(stack_name, template_path, template_parameters, resource_type, tags=[], need_on_failure=False) 104 | # Should be tested by other calls 105 | pass 106 | 107 | 108 | def test_update_cloudformation_resource_from_template(self): 109 | pass 110 | 111 | 112 | def test_update_stack(self): 113 | pass 114 | 115 | def test_update_stack_set(self): 116 | pass 117 | 118 | 119 | def test_delete_stack_set(self): 120 | pass 121 | 122 | def test_wait_for_operation(self): 123 | wait_for_operation(self.api_client, 'name', 'id', 0) 124 | 125 | 126 | def test_wait_for_stack_set(self): 127 | pass 128 | 129 | def test_cloudformation_wait(self): 130 | pass 131 | 132 | def test_create_or_update_stack_set(self): 133 | stack_set_name = self.make_travisname('OpinelUnitTestStackSet003') 134 | create_or_update_stack_set(self.api_client, stack_set_name, 'tests/data/cloudformation-005.json', wait_for_completion = True) 135 | self.cleanup['stacksets'].append(stack_set_name) 136 | printError('Ready for update !') 137 | create_or_update_stack_set(self.api_client, stack_set_name, 'tests/data/cloudformation-005.json', wait_for_completion = True) 138 | # Trigger exception 139 | try: 140 | create_or_update_stack_set(self.api_client, stack_set_name, 'tests/data/cloudformation-005bad.json', wait_for_completion = True) 141 | except: 142 | pass 143 | 144 | def test_still_running(self): 145 | pass 146 | 147 | 148 | def teardown(self): 149 | if len(self.cleanup['stacks']): 150 | for stack_name in self.cleanup['stacks']: 151 | self.api_client.delete_stack(StackName = stack_name) 152 | if len(self.cleanup['stacksets']): 153 | for stack_set_name in self.cleanup['stacksets']: 154 | wait_for_stack_set(self.api_client, stack_set_name) 155 | delete_stack_set(self.api_client, stack_set_name) 156 | 157 | 158 | -------------------------------------------------------------------------------- /tests/test-services-cloudtrail.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.services.cloudtrail import * 4 | from opinel.utils.aws import connect_service 5 | from opinel.utils.credentials import read_creds, read_creds_from_environment_variables 6 | 7 | 8 | class TestOpinelServicesCloudtrail: 9 | 10 | def setup(self): 11 | self.creds = read_creds_from_environment_variables() 12 | if self.creds['AccessKeyId'] == None: 13 | self.creds = read_creds('travislike') 14 | self.api_client = connect_service('cloudtrail', self.creds, 'us-east-1') 15 | 16 | 17 | def test_get_trails(self): 18 | trails = get_trails(self.api_client) 19 | assert (type(trails) == list) 20 | trail_names = [trail['Name'] for trail in trails] 21 | assert ('GlobalTrail' in trail_names) 22 | -------------------------------------------------------------------------------- /tests/test-services-iam.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import binascii 4 | import copy 5 | import os 6 | import sys 7 | import time 8 | 9 | from opinel.services.iam import * 10 | from opinel.utils.aws import connect_service 11 | from opinel.utils.console import configPrintException, printDebug 12 | from opinel.utils.credentials import read_creds, read_creds_from_environment_variables 13 | 14 | 15 | class TestOpinelServicesIAM: 16 | 17 | def setup(self): 18 | configPrintException(True) 19 | self.creds = read_creds_from_environment_variables() 20 | if self.creds['AccessKeyId'] == None: 21 | self.creds = read_creds('travislike') 22 | self.api_client = connect_service('iam', self.creds) 23 | self.python = re.sub(r'\W+', '', sys.version) 24 | self.cleanup = {'groups': [], 'users': []} 25 | 26 | 27 | def make_travisname(self, testname): 28 | return '%s-%s-%s' % (testname, binascii.b2a_hex(os.urandom(4)).decode('utf-8'), self.python) 29 | 30 | 31 | def assert_group_create(self, groups_data, error_count, force_add = False): 32 | for group_data in groups_data: 33 | self.assert_create('groups', group_data, error_count, force_add) 34 | 35 | 36 | def assert_user_create(self, user_data, error_count, force_add = False): 37 | self.assert_create('users', user_data, error_count, force_add) 38 | 39 | 40 | def assert_create(self, resource_type, resource_data, error_count, force_add = False): 41 | assert len(resource_data['errors']) == error_count 42 | nameattr = '%sname' % resource_type[:-1] 43 | if force_add or error_count == 0: 44 | printDebug('Successfully created %s %s' % (resource_type[:-1], resource_data[nameattr])) 45 | self.cleanup[resource_type].append(resource_data[nameattr]) 46 | 47 | 48 | def test_create_user(self): 49 | user_data = create_user(self.api_client, self.make_travisname('OpinelUnitTest001')) 50 | self.assert_user_create(user_data, 0) 51 | user_data = create_user(self.api_client, self.cleanup['users'][0]) 52 | self.assert_user_create(user_data, 1) 53 | user_data = create_user(self.api_client, self.make_travisname('OpinelUnitTest002'), 'BlockedUsers') 54 | self.assert_user_create(user_data, 0) 55 | user_data = create_user(self.api_client, self.make_travisname('OpinelUnitTest003'), ['BlockedUsers', 'AllUsers']) 56 | self.assert_user_create(user_data, 1, True) 57 | user_data = create_user(self.api_client, self.make_travisname('OpinelUnitTest004'), with_password = True) 58 | self.assert_user_create(user_data, 0) 59 | assert 'password' in user_data 60 | assert len(user_data['password']) == 16 61 | user_data = create_user(self.api_client, self.make_travisname('OpinelUnitTest005'), with_password=True ,require_password_reset = True) 62 | self.assert_user_create(user_data, 0) 63 | assert 'password' in user_data 64 | assert len(user_data['password']) == 16 65 | user_data = create_user(self.api_client, self.make_travisname('OpinelUnitTest006'), with_access_key = True) 66 | self.assert_user_create(user_data, 0) 67 | assert 'AccessKeyId' in user_data 68 | assert user_data['AccessKeyId'].startswith('AKIA') 69 | assert 'SecretAccessKey' in user_data 70 | 71 | 72 | def test_delete_user(self): 73 | # Mostly tested as part of teardown 74 | try: 75 | delete_user(self.api_client, 'PhonyUserWithMFA') 76 | except Exception as e: 77 | pass 78 | pass 79 | 80 | 81 | def test_add_user_to_group(self): 82 | user010 = create_user(self.api_client, self.make_travisname('OpinelUnitTest010')) 83 | self.assert_user_create(user010, 0) 84 | user011 = create_user(self.api_client, self.make_travisname('OpinelUnitTest011')) 85 | self.assert_user_create(user011, 0) 86 | add_user_to_group(self.api_client, user010['username'], 'BlockedUsers', True) 87 | add_user_to_group(self.api_client, user011['username'], 'BlockedUsers', False) 88 | 89 | 90 | def test_delete_virtual_mfa_device(self): 91 | try: 92 | delete_virtual_mfa_device(self.api_client, 'arn:aws:iam::179374595322:mfa/PhonyUserWithMFA') 93 | except Exception as e: 94 | assert (e.response['Error']['Code'] == 'AccessDenied') 95 | 96 | 97 | def test_get_access_keys(self): 98 | user020 = create_user(self.api_client, self.make_travisname('OpinelUnitTest020'), with_access_key = True) 99 | self.assert_user_create(user020, 0) 100 | access_keys = get_access_keys(self.api_client, self.cleanup['users'][0]) 101 | assert len(access_keys) == 1 102 | 103 | 104 | def test_show_access_keys(self): 105 | user021 = create_user(self.api_client, self.make_travisname('OpinelUnitTest021'), with_access_key = True) 106 | self.assert_user_create(user021, 0) 107 | show_access_keys(self.api_client, self.cleanup['users'][0]) 108 | 109 | 110 | def test_init_group_category_regex(self): 111 | result = init_group_category_regex(['a', 'b'], ['', '.*hello.*']) 112 | assert (type(result) == list) 113 | result = init_group_category_regex(['a', 'b'], ['', '']) 114 | assert (result == None) 115 | result = init_group_category_regex(['a', 'b', 'c'], ['.*hello.*']) 116 | assert (result == None) 117 | 118 | 119 | def test_create_groups(self): 120 | group001 = self.make_travisname('OpinelUnitTest001') 121 | groups = create_groups(self.api_client, group001) 122 | self.assert_group_create(groups, 0) 123 | group002 = self.make_travisname('OpinelUnitTest002') 124 | group003 = self.make_travisname('OpinelUnitTest003') 125 | groups = create_groups(self.api_client, [ group002, group003 ]) 126 | self.assert_group_create(groups, 0) 127 | group004 = self.make_travisname('HelloWorld') 128 | groups = create_groups(self.api_client, group004) 129 | self.assert_group_create(groups, 1) 130 | 131 | 132 | def teardown(self): 133 | if len(self.cleanup['users']): 134 | self.delete_resources('users') 135 | if len(self.cleanup['groups']): 136 | self.delete_resources('groups') 137 | 138 | 139 | def delete_resources(self, resource_type): 140 | resources = copy.deepcopy(self.cleanup[resource_type]) 141 | while True: 142 | unmodifiable_resource = False 143 | remaining_resources = [] 144 | printDebug('Deleting the following %s: %s' % (resource_type, str(resources)) ) 145 | time.sleep(5) 146 | for resource in resources: 147 | if resource_type == 'groups': 148 | errors = [] 149 | try: 150 | self.api_client.delete_group(GroupName = resource) 151 | except: 152 | errors = [ 'EntityTemporarilyUnmodifiable' ] 153 | else: 154 | method = globals()['delete_%s' % resource_type[:-1]] 155 | errors = method(self.api_client, resource) 156 | if len(errors): 157 | printDebug('Errors when deleting %s' % resource) 158 | remaining_resources.append(resource) 159 | for handled_code in ['EntityTemporarilyUnmodifiable', 'DeleteConflict']: 160 | if handled_code in errors: 161 | unmodifiable_resource = True 162 | else: 163 | printError('Failed to delete %s %s' % (resource_type[:-1], resource)) 164 | assert (False) 165 | resources = copy.deepcopy(remaining_resources) 166 | if not unmodifiable_resource: 167 | break 168 | 169 | -------------------------------------------------------------------------------- /tests/test-services-organizations.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.services.cloudtrail import * 4 | from opinel.utils.aws import connect_service 5 | from opinel.utils.console import configPrintException, printError, printException, printInfo 6 | from opinel.utils.credentials import read_creds, read_creds_from_environment_variables, assume_role 7 | 8 | from opinel.services.organizations import * 9 | 10 | class TestOpinelServicesOrganizations: 11 | 12 | def setup(self): 13 | configPrintException(True) 14 | self.creds = read_creds_from_environment_variables() 15 | if self.creds['AccessKeyId'] == None: 16 | self.creds = read_creds('travislike') 17 | self.org_creds = assume_role('OpinelUnitTest', self.creds,'arn:aws:iam::990492604467:role/OpinelUnitTest', 'opinelunittesting') 18 | self.badapi_client = connect_service('organizations', self.creds, 'us-east-1') 19 | self.api_client = connect_service('organizations', self.org_creds, 'us-east-1') 20 | 21 | 22 | def test_get_children_organizational_units(self): 23 | try: 24 | get_children_organizational_units(self.badapi_client, [{'Id': 'r-6qnh'}]) 25 | except Exception as e: 26 | assert (e.response['Error']['Code'] == 'AccessDeniedException') 27 | ous = get_children_organizational_units(self.api_client, [{'Id': 'r-6qnh'}]) 28 | self.check_ous(ous) 29 | 30 | 31 | def test_get_organization_account_ids(self): 32 | try: 33 | get_organization_account_ids(self.badapi_client) 34 | except Exception as e: 35 | assert (e.response['Error']['Code'] == 'AccessDeniedException') 36 | assert ('990492604467' in get_organization_account_ids(self.api_client)) 37 | assert ('990492604467' not in get_organization_account_ids(self.api_client, exceptions = ['990492604467'])) 38 | get_organization_account_ids(self.api_client, quiet = False) 39 | 40 | 41 | def test_get_organization_accounts(self): 42 | try: 43 | get_organization_accounts(self.badapi_client) 44 | except Exception as e: 45 | assert (e.response['Error']['Code'] == 'AccessDeniedException') 46 | accounts = get_organization_accounts(self.api_client) 47 | self.check_accounts(accounts) 48 | 49 | 50 | def test_get_organizational_units(self): 51 | try: 52 | get_organizational_units(self.badapi_client) 53 | except Exception as e: 54 | assert (e.response['Error']['Code'] == 'AccessDeniedException') 55 | ous = get_organizational_units(self.api_client) 56 | self.check_ous(ous) 57 | 58 | 59 | def test_list_accounts_for_parent(self): 60 | try: 61 | list_accounts_for_parent(self.badapi_client, {'Id': 'r-6qnh'}) 62 | except Exception as e: 63 | assert (e.response['Error']['Code'] == 'AccessDeniedException') 64 | accounts = list_accounts_for_parent(self.api_client, {'Id': 'r-6qnh'}) 65 | self.check_accounts(accounts) 66 | 67 | 68 | def check_accounts(self, accounts): 69 | root_found = False 70 | for account in accounts: 71 | if account['Id'] == '990492604467': 72 | root_found = True 73 | break 74 | assert (root_found) 75 | 76 | def check_ous(self, ous): 77 | ou_found = False 78 | for ou in ous: 79 | if ou['Id'] == 'ou-6qnh-jqod4xev': 80 | ou_found = True 81 | break 82 | assert (ou_found) 83 | -------------------------------------------------------------------------------- /tests/test-services-s3.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.services.s3 import * 4 | from opinel.utils.aws import connect_service 5 | from opinel.utils.credentials import read_creds, read_creds_from_environment_variables 6 | 7 | 8 | class TestOpinelServicesS3: 9 | 10 | def setup(self): 11 | self.creds = read_creds_from_environment_variables() 12 | if self.creds['AccessKeyId'] == None: 13 | self.creds = read_creds('travislike') 14 | self.api_client = connect_service('s3', self.creds, 'us-east-1') 15 | 16 | 17 | def test_get_s3_bucket_location(self): 18 | location = get_s3_bucket_location(self.api_client, 'l01cd3v-scout2-region-sa-east-1') 19 | assert (location == 'sa-east-1') 20 | location = get_s3_bucket_location(self.api_client, 'l01cd3v-scout2-region-us-east-1') 21 | assert (location == 'us-east-1') 22 | location = get_s3_bucket_location(self.api_client, 'l01cd3v-scout2-region-eu-central-1') 23 | assert (location == 'eu-central-1') 24 | -------------------------------------------------------------------------------- /tests/test-tests.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import importlib 4 | import os 5 | import re 6 | import types 7 | 8 | from opinel.utils.console import printError 9 | 10 | class TestTests: 11 | 12 | def setup(self): 13 | self.submodules = [] 14 | for (root, dirnames, filenames) in os.walk('opinel'): 15 | for filename in filenames: 16 | if filename.endswith('.py') and filename != '__init__.py': 17 | self.submodules.append(os.path.join(root, filename)) 18 | 19 | 20 | def module_filename_to_parts(self, filename): 21 | filename = filename.replace('opinel/', '') 22 | part1 = os.path.dirname(filename) 23 | part2 = os.path.basename(filename).replace('.py', '') 24 | return (part1, part2) 25 | 26 | 27 | def test_one_testfile_per_submodule(self): 28 | for filename in self.submodules: 29 | part1, part2 = self.module_filename_to_parts(filename) 30 | test_filename = 'tests/test-%s-%s.py' % (part1, part2) 31 | try: 32 | assert (os.path.exists(test_filename)) 33 | assert (os.path.isfile(test_filename)) 34 | except: 35 | printError('Missing file: %s' % test_filename) 36 | assert (False) 37 | 38 | def test_call_each_testfile(self): 39 | with open('.travis.yml', 'rt') as f: 40 | contents = f.read() 41 | for filename in self.submodules: 42 | part1, part2 = self.module_filename_to_parts(filename) 43 | test_filename = 'tests/test-%s-%s.py' % (part1, part2) 44 | check = re.findall(r'%s' % test_filename, contents) 45 | if not check: 46 | printError('Missing call in Travis configuration: %s' % test_filename) 47 | assert (False) 48 | 49 | def test_one_testcase_per_function(self): 50 | missing_testcase = False 51 | for submodule_filename in self.submodules: 52 | submodule_name = submodule_filename.replace('/', '.').replace('.py', '') 53 | submodule = importlib.import_module(submodule_name) 54 | submodule_functions = [ f for f in dir(submodule) if type(getattr(submodule, f)) == types.FunctionType ] 55 | with open(submodule_filename, 'rt') as f: 56 | contents = f.read() 57 | submodule_definitions = re.findall(r'def (.*?)\(', contents) 58 | submodule_functions = [ f for f in submodule_functions if f in submodule_definitions ] 59 | part1, part2 = self.module_filename_to_parts(submodule_filename) 60 | testcase_filename = 'tests/test-%s-%s.py' % (part1, part2) 61 | with open(testcase_filename, 'rt') as f: 62 | contents = f.read() 63 | testclass_name = re.findall(r'class (.*?)(:|\()', contents)[0][0] 64 | testcase = importlib.import_module('tests.test-%s-%s' % (part1, part2)) 65 | testclass = getattr(testcase, testclass_name) 66 | testcase_functions = [ f for f in dir(testclass) if f.startswith('test_') and callable(getattr(testclass, f)) ] 67 | for function in submodule_functions: 68 | test_function = 'test_%s' % function 69 | if test_function not in testcase_functions: 70 | ordered_case_found = False 71 | regex = re.compile('test_\d+_%s' % function) 72 | for testcase_function in testcase_functions: 73 | if regex.match(testcase_function): 74 | ordered_case_found = True 75 | break 76 | if not ordered_case_found: 77 | printError('Missing test case in %s: %s' % (testcase_filename, test_function)) 78 | missing_testcase = True 79 | if missing_testcase: 80 | assert (False) 81 | -------------------------------------------------------------------------------- /tests/test-utils-aws.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from botocore.config import Config 4 | 5 | from opinel.utils.aws import * 6 | from opinel.utils.credentials import read_creds, read_creds_from_environment_variables 7 | 8 | class TestOpinelAWS: 9 | 10 | def setup(self): 11 | self.creds = read_creds_from_environment_variables() 12 | if self.creds['AccessKeyId'] == None: 13 | self.creds = read_creds('travislike') 14 | 15 | 16 | def test_build_region_list(self): 17 | assert type(build_region_list('ec2', [])) == list 18 | assert type(build_region_list('ec2', [], 'aws-us-gov')) == list 19 | assert 'cn-north-1' in build_region_list('ec2', [], 'aws-cn') 20 | assert 'cn-north-1' not in build_region_list('ec2') 21 | assert 'us-gov-west-1' in build_region_list('ec2', [], 'aws-us-gov') 22 | assert 'us-gov-west-1' not in build_region_list('ec2') 23 | assert ['us-east-1'] == build_region_list('ec2', ['us-east-1']) 24 | assert 'us-west-2' in build_region_list('ec2', ['us-east-1', 'us-east-2', 'us-west-2']) 25 | assert 'us-east-1' not in build_region_list('ec2', ['us-west-1']) 26 | assert 'us-east-1' not in build_region_list('ec2', ['us-east-1', 'us-east-2'], 'aws-cn') 27 | assert build_region_list('') == [] 28 | 29 | 30 | def test_connect_service(self): 31 | client = connect_service('iam', self.creds) 32 | client = connect_service('iam', self.creds, config={}) 33 | client = connect_service('iam', self.creds, silent=True) 34 | client = connect_service('ec2', self.creds, region_name = 'us-east-1') 35 | try: 36 | client = connect_service('opinelunittest', creds) 37 | assert(False) 38 | except: 39 | pass 40 | config = Config(region_name = 'us-east-1') 41 | client = connect_service('ec2', self.creds, config = config) 42 | try: 43 | client = connect_service('ec2', self.creds, region_name = config) 44 | assert(False) 45 | except: 46 | pass 47 | 48 | 49 | def test_get_aws_account_id(self): 50 | account_id = get_aws_account_id(self.creds) 51 | assert (account_id == '179374595322') 52 | 53 | 54 | def test_get_caller_identity(self): 55 | result = { 56 | "Account": "179374595322", 57 | "UserId": [ "AIDAISSRBZ2MQ4EEY25GM", "AIDAI6OKAM7LYCYPYQVFK" ], 58 | "Arn": [ "arn:aws:iam::179374595322:user/CI-local", "arn:aws:iam::179374595322:user/CI-travis-opinel" ] 59 | } 60 | identity = get_caller_identity(self.creds) 61 | assert (identity['Account'] == result['Account']) 62 | assert (identity['UserId'] in result['UserId']) 63 | assert (identity['Arn'] in result['Arn']) 64 | 65 | 66 | def test_get_name(self): 67 | src1 = {'Id': 'IdValue'} 68 | src2 = {'Tags': [{'Key': 'Foo', 'Value': 'Bar'}, {'Key': 'Name', 'Value': 'TaggedName'}, {'Key': 'Opinel', 'Value': 'UnitTest'}], 'Id': 'IdValue'} 69 | src3 = {'Tags': [{'Key': 'Foo', 'Value': 'Bar'}, {'Key': 'Fake', 'Value': 'TaggedName'}, {'Key': 'Opinel', 'Value': 'UnitTest'}], 'Id': 'IdValue'} 70 | name = get_name(src1, {}, 'Id') 71 | assert (name == 'IdValue') 72 | name = get_name(src2, {}, 'Id') 73 | assert (name == 'TaggedName') 74 | name = get_name(src3, {}, 'Id') 75 | assert (name == 'IdValue') 76 | 77 | 78 | def test_get_partition_name(self): 79 | partition_name = get_partition_name(self.creds) 80 | assert (partition_name == 'aws') 81 | 82 | 83 | def test_get_username(self): 84 | username = get_username(self.creds) 85 | assert (username == 'CI-local' or username == 'CI-travis-opinel') 86 | 87 | 88 | def test_handle_truncated_response(self): 89 | iam_client = connect_service('iam', self.creds) 90 | users = handle_truncated_response(iam_client.list_users, {'MaxItems': 5}, ['Users'])['Users'] 91 | assert (len(users) > 5) 92 | 93 | def test_is_throttled(self): 94 | pass 95 | -------------------------------------------------------------------------------- /tests/test-utils-cli_parser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import copy 4 | import shutil 5 | 6 | from opinel.utils.cli_parser import * 7 | 8 | class TestOpinelUtilsCliParserClass: 9 | 10 | def cmp(self, d1, d2, root = True): 11 | """ 12 | Implement cmp() for Python3 tests 13 | 14 | :return 15 | -3 D1 and D2 have types that mismatch 16 | -2 D1 has more keys than D2 17 | -1 D1 has key larger than D2 18 | 0 D1 and D2 are identical 19 | 1 D2 has a key larger than D1 20 | 2 D2 has more keys than D1 21 | 3 D1 and D2 have keys with type mismatch 22 | """ 23 | tmp = copy.deepcopy(d2) 24 | if type(d1) in [dict, list] and type(d1) != type(tmp): 25 | return -3 if root else 3 26 | elif type(d1) == dict: 27 | for k1 in d1: 28 | if k1 not in tmp: 29 | return -2 30 | else: 31 | val = tmp.pop(k1) 32 | result = self.cmp(d1[k1], val, False) 33 | if result != 0: 34 | return result 35 | if len(tmp) > 0: 36 | return 2 37 | elif type(d1) == list: 38 | for (i, v) in enumerate(d1): 39 | if v != d2[i]: 40 | return (v > d2[i]) - (v < d2[i]) 41 | elif d1 != tmp: 42 | return (d1 > tmp) - (d1 < tmp) 43 | return 0 44 | 45 | def setup(self): 46 | if not os.path.isdir(opinel_arg_dir): 47 | os.makedirs(opinel_arg_dir) 48 | shutil.copyfile('tests/data/default_args.json', os.path.join(opinel_arg_dir, 'default.json')) 49 | 50 | def test_class(self): 51 | parser = OpinelArgumentParser() 52 | parser.add_argument('debug') 53 | parser.add_argument('dry-run') 54 | parser.add_argument('profile') 55 | parser.add_argument('regions') 56 | parser.add_argument('partition-name') 57 | parser.add_argument('vpc') 58 | parser.add_argument('force') 59 | parser.add_argument('ip-ranges') 60 | parser.add_argument('ip-ranges-name-key') 61 | parser.add_argument('mfa-serial') 62 | parser.add_argument('mfa-code') 63 | parser.add_argument('csv-credentials') 64 | parser.add_argument('bucket-name') 65 | parser.add_argument('group-name') 66 | parser.add_argument('user-name') 67 | parser.add_argument('foo1', help='I need somebody', nargs='+', default=[]) 68 | parser.add_argument('bar1', help='I need somebody', action='store_true', default=False) 69 | parser.add_argument('foo2', help='I need somebody', nargs='+', default=[]) 70 | parser.add_argument('bar2', help='I need somebody', action='store_true', default=False) 71 | 72 | # Check exception case 73 | try: 74 | parser.add_argument('opinelunittest') # Should throw an exception 75 | assert False 76 | except: 77 | pass 78 | 79 | # Invoke parse_args() 80 | parser.parser.add_argument('--with-coverage', dest='unittest', default=None, help='Unit test artefact') 81 | args = parser.parse_args() 82 | return 83 | 84 | def test_read_default_args(self): 85 | # 1 missed statement due to reading sys.argv[] 86 | expected_shared_args = { 87 | 'category_groups': [ 88 | 'AllHumanUsers', 89 | 'AllHeadlessUsers', 90 | 'AllMisconfiguredUsers' 91 | ], 92 | 'common_groups': [ 93 | 'AllUsers' 94 | ], 95 | 'category_regex': [ 96 | '', 97 | 'Headless-.*', 98 | 'MisconfiguredUser-.*' 99 | ] 100 | } 101 | # Test of cmp() 102 | tmp1 = copy.deepcopy(expected_shared_args) 103 | tmp1['category_groups'] = 'foobar' 104 | assert self.cmp(expected_shared_args, tmp1) == 3 105 | assert self.cmp(tmp1, 'expectedsharedargs') == -3 106 | tmp1 = copy.deepcopy(expected_shared_args) 107 | tmp1.pop('category_groups') 108 | assert self.cmp(tmp1, expected_shared_args) == 2 109 | assert self.cmp(expected_shared_args, tmp1) == -2 110 | tmp1 = copy.deepcopy(expected_shared_args) 111 | tmp1['common_groups'] = [ '0' ] 112 | assert self.cmp(expected_shared_args, tmp1) == 1 113 | assert self.cmp(tmp1, expected_shared_args) == -1 114 | # Test of read_default_args 115 | shared_args = read_default_args('shared') 116 | assert self.cmp(shared_args, expected_shared_args) == 0 117 | default_args = read_default_args('awsrecipes_foobar.py') 118 | assert self.cmp(default_args, expected_shared_args) == 0 119 | default_args = read_default_args('awsrecipes_create_iam_user.py') 120 | tmp1 = copy.deepcopy(expected_shared_args) 121 | tmp1['force_common_group'] = 'True' 122 | assert self.cmp(default_args, tmp1) == 0 123 | default_args = read_default_args('awsrecipes_sort_iam_users.py') 124 | tmp1 = copy.deepcopy(expected_shared_args) 125 | tmp1['common_groups'] = ['SomethingDifferent'] 126 | assert self.cmp(default_args, tmp1) == 0 127 | tmp_opinel_arg_dir = '%s.tmp' % opinel_arg_dir 128 | shutil.move(opinel_arg_dir, tmp_opinel_arg_dir) 129 | default_args = read_default_args('awsrecipes_sort_iam_users.py') 130 | shutil.rmtree(opinel_arg_dir) 131 | assert self.cmp(default_args, {}) == 0 132 | shutil.move(tmp_opinel_arg_dir, opinel_arg_dir) 133 | 134 | -------------------------------------------------------------------------------- /tests/test-utils-conditions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.utils.conditions import * 4 | 5 | class TestOpinelConditionClass: 6 | """ 7 | Test opinel.condition 8 | """ 9 | 10 | def test___prepare_age_test(self): 11 | pass 12 | 13 | def test_pass_condition(self): 14 | 15 | assert pass_condition('a', 'equal', 'a') == True 16 | assert pass_condition('a', 'equal', 'b') == False 17 | assert pass_condition(1, 'equal', 1) == True 18 | assert pass_condition(1, 'equal', 0) == False 19 | assert pass_condition(('a', 'b'), 'equal', ('a', 'b')) == True 20 | assert pass_condition(('a', 'b'), 'equal', ('b', 'a')) == False 21 | assert pass_condition('a', 'notEqual', 'a') == False 22 | assert pass_condition('a', 'notEqual', 'b') == True 23 | assert pass_condition(1, 'notEqual', 1) == False 24 | assert pass_condition(1, 'notEqual', 0) == True 25 | assert pass_condition(('a', 'b'), 'notEqual', ('a', 'b')) == False 26 | assert pass_condition(('a', 'b'), 'notEqual', ('b', 'a')) == True 27 | 28 | assert pass_condition(1, 'lessThan', 2) == True 29 | assert pass_condition(1, 'lessThan', 1) == False 30 | assert pass_condition(2, 'lessThan', 1) == False 31 | assert pass_condition(1, 'lessOrEqual', 2) == True 32 | assert pass_condition(1, 'lessOrEqual', 1) == True 33 | assert pass_condition(2, 'lessOrEqual', 1) == False 34 | assert pass_condition(1, 'moreThan', 2) == False 35 | assert pass_condition(1, 'moreThan', 1) == False 36 | assert pass_condition(2, 'moreThan', 1) == True 37 | assert pass_condition(1, 'moreOrEqual', 2) == False 38 | assert pass_condition(1, 'moreOrEqual', 1) == True 39 | assert pass_condition(2, 'moreOrEqual', 1) == True 40 | 41 | assert pass_condition({}, 'empty', '') == True 42 | assert pass_condition({'a': 'b'}, 'empty', '') == False 43 | assert pass_condition([], 'empty', '') == True 44 | assert pass_condition([None], 'empty', '') == True 45 | assert pass_condition(['a'], 'empty', '') == False 46 | assert pass_condition({}, 'notEmpty', '') == False 47 | assert pass_condition({'a': 'b'}, 'notEmpty', '') == True 48 | assert pass_condition([], 'notEmpty', '') == False 49 | assert pass_condition([None], 'notEmpty', '') == False 50 | assert pass_condition(['a'], 'notEmpty', '') == True 51 | assert pass_condition(None, 'null', '') == True 52 | assert pass_condition('None', 'null', '') == True 53 | assert pass_condition(None, 'notNull', '') == False 54 | assert pass_condition('None', 'notNull', '') == False 55 | 56 | assert pass_condition(True, 'true', '') == True 57 | assert pass_condition('TrUE', 'true', '') == True 58 | assert pass_condition(False, 'false', '') == True 59 | assert pass_condition('FaLSe', 'notTrue', '') == True 60 | 61 | test_list1 = [] 62 | test_dict1 = {} 63 | test_list2 = [ 64 | 'a'] 65 | test_dict2 = {'a': 'b'} 66 | test_list3 = ['a', 'b'] 67 | test_dict3 = {'a': 'b','c': 'd'} 68 | assert pass_condition(test_list1, 'lengthLessThan', 1) == True 69 | assert pass_condition(test_list1, 'lengthMoreThan', 1) == False 70 | assert pass_condition(test_list1, 'lengthEqual', 1) == False 71 | assert pass_condition(test_list2, 'lengthLessThan', 1) == False 72 | assert pass_condition(test_list2, 'lengthMoreThan', 1) == False 73 | assert pass_condition(test_list2, 'lengthEqual', 1) == True 74 | assert pass_condition(test_list3, 'lengthLessThan', 1) == False 75 | assert pass_condition(test_list3, 'lengthMoreThan', 1) == True 76 | assert pass_condition(test_list3, 'lengthEqual', 1) == False 77 | assert pass_condition(test_dict1, 'lengthLessThan', 1) == True 78 | assert pass_condition(test_dict1, 'lengthMoreThan', 1) == False 79 | assert pass_condition(test_dict1, 'lengthEqual', 1) == False 80 | assert pass_condition(test_dict2, 'lengthLessThan', 1) == False 81 | assert pass_condition(test_dict2, 'lengthMoreThan', 1) == False 82 | assert pass_condition(test_dict2, 'lengthEqual', 1) == True 83 | assert pass_condition(test_dict3, 'lengthLessThan', 1) == False 84 | assert pass_condition(test_dict3, 'lengthMoreThan', 1) == True 85 | assert pass_condition(test_dict3, 'lengthEqual', 1) == False 86 | 87 | assert pass_condition(test_dict1, 'withKey', 'a') == False 88 | assert pass_condition(test_dict2, 'withKey', 'a') == True 89 | assert pass_condition(test_dict1, 'withoutKey', 'a') == True 90 | assert pass_condition(test_dict2, 'withoutKey', 'a') == False 91 | 92 | assert pass_condition(test_list1, 'containAtLeastOneOf', test_list1) == False 93 | assert pass_condition(test_list1, 'containAtLeastOneOf', test_list2) == False 94 | assert pass_condition(test_list2, 'containAtLeastOneOf', test_list2) == True 95 | assert pass_condition(test_list2, 'containAtLeastOneOf', ['b']) == False 96 | assert pass_condition(test_list3, 'containAtLeastOneOf', ['c']) == False 97 | assert pass_condition(test_list3, 'containAtLeastOneOf', ['c', 'b']) == True 98 | assert pass_condition('', 'containAtLeastOneOf', test_list1) == False 99 | assert pass_condition('a', 'containAtLeastOneOf', test_list2) == True 100 | assert pass_condition(test_list2, 'containAtLeastOneOf', '') == False 101 | assert pass_condition(test_list2, 'containAtLeastOneOf', 'a') == True 102 | assert pass_condition(test_list1, 'containAtLeastOneDifferentFrom', test_list1) == False 103 | assert pass_condition(test_list1, 'containAtLeastOneDifferentFrom', test_list3) == False 104 | assert pass_condition(test_list2, 'containAtLeastOneDifferentFrom', test_list1) == True 105 | assert pass_condition(test_list2, 'containAtLeastOneDifferentFrom', test_list2) == False 106 | assert pass_condition(test_list2, 'containAtLeastOneDifferentFrom', test_list3) == False 107 | assert pass_condition(['c'], 'containAtLeastOneDifferentFrom', test_list3) == True 108 | assert pass_condition(test_list3, 'containAtLeastOneDifferentFrom', test_list3) == False 109 | assert pass_condition(test_list3, 'containAtLeastOneDifferentFrom', test_list2) == True 110 | assert pass_condition(test_list3, 'containAtLeastOneDifferentFrom', test_list1) == True 111 | assert pass_condition('', 'containAtLeastOneDifferentFrom', test_list1) == False 112 | assert pass_condition('a', 'containAtLeastOneDifferentFrom', test_list3) == False 113 | assert pass_condition('d', 'containAtLeastOneDifferentFrom', test_list3) == True 114 | assert pass_condition(test_list1, 'containAtLeastOneDifferentFrom', 'a') == False 115 | assert pass_condition(test_list2, 'containAtLeastOneDifferentFrom', 'd') == True 116 | assert pass_condition(test_list1, 'containNoneOf', test_list1) == True 117 | assert pass_condition(test_list2, 'containNoneOf', test_list2) == False 118 | assert pass_condition(test_list1, 'containNoneOf', 'a') == True 119 | assert pass_condition('a', 'containNoneOf', test_list1) == True 120 | 121 | assert pass_condition('abcdefg', 'match', '.*cde.*') == True 122 | assert pass_condition('abcdefg', 'notMatch', '.*cde.*') == False 123 | assert pass_condition('abcdefg', 'match', '.*345.*') == False 124 | assert pass_condition('abcdefg', 'notMatch', '.*345.*') == True 125 | assert pass_condition('abcdefg', 'notMatch', '.*345.*') == True 126 | assert pass_condition('abcdefg', 'match', ['.*xyx.*', '.*pqr.*']) == False 127 | assert pass_condition('abcdefg', 'match', ['.*xyx.*', '.*345.*', '.*cde.*']) == True 128 | 129 | date1 = '2016-04-11 12:20:26.996000+00:00' 130 | date2 = '2017-04-11 12:20:26.996000+00:00' 131 | date3 = datetime.datetime.now() - datetime.timedelta(days=1) 132 | date4 = datetime.datetime.now() - datetime.timedelta(days=100) 133 | date5 = datetime.datetime.now() - datetime.timedelta(hours=5) 134 | assert pass_condition(date1, 'priorToDate', date2) == True 135 | assert pass_condition(date2, 'priorToDate', date1) == False 136 | assert pass_condition(date3, 'olderThan', [90, 'days']) == False 137 | assert pass_condition(date4, 'olderThan', [90, 'days']) == True 138 | assert pass_condition(date5, 'olderThan', [90, 'minutes']) == True 139 | assert pass_condition(date5, 'olderThan', [360, 'minutes']) == False 140 | assert pass_condition(date5, 'olderThan', [1, 'hours']) == True 141 | assert pass_condition(date5, 'olderThan', [6, 'hours']) == False 142 | try: 143 | assert pass_condition(date5, 'olderThan', [6, 'unittest']) == False 144 | except: 145 | pass 146 | 147 | try: 148 | assert pass_condition(date5, 'olderThan', '90') == False 149 | except: 150 | pass 151 | 152 | assert pass_condition(date3, 'newerThan', [90, 'days']) == True 153 | assert pass_condition(date4, 'newerThan', [90, 'days']) == False 154 | assert pass_condition(date4, 'newerThan', ['90', 'days']) == False 155 | 156 | assert pass_condition('192.168.0.1', 'inSubnets', '192.168.0.0/24') == True 157 | assert pass_condition('192.168.0.1', 'inSubnets', ['192.168.0.0/24']) == True 158 | assert pass_condition('192.168.1.1', 'inSubnets', ['192.168.0.0/24']) == False 159 | assert pass_condition('192.168.1.1', 'notInSubnets', ['192.168.0.0/24']) == True 160 | 161 | with open('tests/data/policy1.json', 'rt') as f: 162 | testpolicy = json.load(f) 163 | assert pass_condition(testpolicy['Statement'][0], 'containAction', 'iam:GetUser') == True 164 | assert pass_condition(testpolicy['Statement'][0], 'containAction', 'iam:CreateUser') == False 165 | assert pass_condition(testpolicy['Statement'][1], 'containAction', 'iam:CreateUser') == True 166 | assert pass_condition(testpolicy['Statement'][2], 'containAction', 'iam:CreateUser') == True 167 | assert pass_condition(testpolicy['Statement'][0], 'notContainAction', 'iam:CreateUser') == True 168 | assert pass_condition(testpolicy['Statement'][0], 'notContainAction', 'iam:GetUser') == False 169 | assert pass_condition(testpolicy['Statement'][0], 'containAtLeastOneAction', '') == False 170 | assert pass_condition(testpolicy['Statement'][0], 'containAtLeastOneAction', 'iam:GetUser') == True 171 | assert pass_condition(testpolicy['Statement'][0], 'containAtLeastOneAction', ['iam:CreateUser', 'iam:GetUser']) == True 172 | with open('tests/data/statement1.json', 'rt') as f: 173 | stringstatement = f.read() 174 | assert pass_condition(stringstatement, 'containAction', 'iam:GetUser') == True 175 | assert pass_condition(stringstatement, 'containAtLeastOneAction', 'iam:GetUser') == True 176 | 177 | assert pass_condition('123456789012', 'isSameAccount', '123456789012') == True 178 | assert pass_condition(['123456789013', '123456789012'], 'isSameAccount', '123456789012') == True 179 | assert pass_condition('arn:aws:iam::123456789012:root', 'isSameAccount', '123456789012') == True 180 | assert pass_condition('arn:aws:iam::123456789012:user/name', 'isSameAccount', '123456789012') == True 181 | assert pass_condition('arn:aws:iam::123456789012:role/name', 'isSameAccount', '123456789012') == True 182 | assert pass_condition('123456789012', 'isSameAccount', '123456789013') == False 183 | assert pass_condition('arn:aws:iam::123456789012:root', 'isSameAccount', '123456789013') == False 184 | assert pass_condition('123456789012', 'isCrossAccount', '123456789013') == True 185 | assert pass_condition(['123456789013', '123456789012'], 'isCrossAccount', '123456789013') == True 186 | assert pass_condition('arn:aws:iam::123456789012:root', 'isCrossAccount', '123456789013') == True 187 | 188 | try: 189 | pass_condition('foo', 'bar', 'baz') 190 | except: 191 | pass 192 | 193 | return 194 | -------------------------------------------------------------------------------- /tests/test-utils-console.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.utils.console import * 4 | 5 | class TestOpinelUtilsConsoleClass: 6 | 7 | def test_configPrintException(self): 8 | configPrintException(False) 9 | configPrintException(True) 10 | 11 | 12 | def test_printDebug(self): 13 | printDebug('hello') 14 | 15 | 16 | def test_printError(self): 17 | printError('hello', False) 18 | printError('hello') 19 | 20 | 21 | def test_printException(self): 22 | configPrintException(True) 23 | try: 24 | raise Exception('opinelunittest') 25 | except Exception as e: 26 | printException(e) 27 | configPrintException(False) 28 | try: 29 | raise Exception('opinelunittest') 30 | except Exception as e: 31 | printException(e) 32 | try: 33 | raise Exception('opinelunittest') 34 | except Exception as e: 35 | printException(e, True) 36 | 37 | 38 | def test_printInfo(msg, newLine=True): 39 | printInfo('hello', False) 40 | printInfo('hello') 41 | 42 | 43 | def test_printGeneric(self): 44 | printGeneric(sys.stdout, 'hello', False) 45 | printGeneric(sys.stderr, 'hello') 46 | 47 | 48 | def test_prompt(self): 49 | assert prompt('a') == 'a' 50 | assert prompt('') == '' 51 | test = ['a', 'b'] 52 | assert prompt(test) == 'a' 53 | assert prompt(test) == 'b' 54 | assert prompt(test) == '' 55 | 56 | 57 | def test_prompt_4_mfa_code(self): 58 | prompt_4_mfa_code(input='q') 59 | prompt_4_mfa_code(input='012345') 60 | prompt_4_mfa_code(input='0123456789') 61 | prompt_4_mfa_code(input=['helloworld', '0123456']) 62 | prompt_4_mfa_code(activate=True, input='012345') 63 | prompt_4_mfa_code(activate=True, input='q') 64 | 65 | 66 | def test_prompt_4_mfa_serial(self): 67 | prompt_4_mfa_serial(['a', 'n', 'arn:aws:iam::123456789012:mfa/username', 'y']) 68 | 69 | 70 | def test_prompt_4_overwrite(self): 71 | assert prompt_4_overwrite(os.path.realpath(__file__), True) == True 72 | assert prompt_4_overwrite(os.path.realpath(__file__), False, input='y') == True 73 | assert prompt_4_overwrite(os.path.realpath(__file__), False, input='n') == False 74 | 75 | 76 | def test_prompt_4_value(self): 77 | assert prompt_4_value('prompt_4_value', no_confirm=True, input='inputvalue') == 'inputvalue' 78 | assert prompt_4_value('prompt_4_value', no_confirm=True, is_question=True, input='inputvalue') == 'inputvalue' 79 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], no_confirm=True, input='b') == 'b' 80 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], display_choices=False, no_confirm=True, input='b') == 'b' 81 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], display_indices=True, no_confirm=True, input='1') == 'b' 82 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], default='b', no_confirm=True, input='') == 'b' 83 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], no_confirm=True, authorize_list=True, input='a,b') == 'a,b' 84 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], required=True, no_confirm=True, input=['', 'b']) == 'b' 85 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], required=True, no_confirm=True, input=['invalid', 'b']) == 'b' 86 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], no_confirm=True, input='a,c') == None 87 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], no_confirm=True, input='a,b', authorize_list = True) == 'a,b' 88 | assert prompt_4_value('prompt_4_value', choices=['a', 'b', 'c'], no_confirm=True, input='a,e', authorize_list = True) == None 89 | assert prompt_4_value('prompt_4_value', regex=re_mfa_serial_format, regex_format=mfa_serial_format, required=True, input=['inputvalue', 'arn:aws:iam::123456789012:mfa/username', 'y']) == 'arn:aws:iam::123456789012:mfa/username' 90 | assert prompt_4_value('prompt_4_value', regex=re_mfa_serial_format, regex_format=mfa_serial_format, required=False, input=['inputvalue', '', 'y']) == '' 91 | 92 | 93 | 94 | 95 | def test_prompt_4_yes_no(self): 96 | assert prompt_4_yes_no('hello', input='N') == False 97 | assert prompt_4_yes_no('hello', input='no') == False 98 | assert prompt_4_yes_no('hello', input='Y') == True 99 | assert prompt_4_yes_no('hello', input='yes') == True 100 | assert prompt_4_yes_no('hello', input=['foo', 'bar', 'no']) == False 101 | assert prompt_4_yes_no('hello', input='Ye') == None 102 | assert prompt_4_yes_no('hello', input='Non') == None 103 | return 104 | -------------------------------------------------------------------------------- /tests/test-utils-credentials.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import copy 4 | import shutil 5 | from opinel.utils.credentials import * 6 | 7 | class TestOpinelCredentialsClass: 8 | 9 | def setup(self): 10 | self.creds = read_creds_from_environment_variables() 11 | self.write_creds = False 12 | if self.creds['AccessKeyId'] == None: 13 | self.creds = read_creds('travislike') 14 | self.write_creds = True 15 | # Backup current config 16 | self.tmpaws_config_dir = aws_config_dir + 'tmp' 17 | if os.path.exists(aws_config_dir): 18 | shutil.move(aws_config_dir, self.tmpaws_config_dir) 19 | # Import configs 20 | os.mkdir(aws_config_dir) 21 | shutil.copy('tests/data/credentials', aws_credentials_file) 22 | shutil.copy('tests/data/config', aws_config_file) 23 | # Write the test credentials 24 | if self.write_creds: 25 | write_creds_to_aws_credentials_file('travislike', self.creds) 26 | write_creds_to_aws_credentials_file('default', self.creds) 27 | 28 | 29 | def teardown(self): 30 | # Reset original config 31 | shutil.rmtree(aws_config_dir) 32 | if os.path.exists(self.tmpaws_config_dir): 33 | shutil.move(self.tmpaws_config_dir, aws_config_dir) 34 | 35 | 36 | def cmp(self, a, b): 37 | """ 38 | Implement cmp() for Python3 tests 39 | """ 40 | return (a > b) - (a < b) 41 | 42 | 43 | def check_credentials_dict(self, creds): 44 | assert 'AccessKeyId' in creds 45 | assert 'SecretAccessKey' in creds 46 | assert 'SessionToken' in creds 47 | assert 'Expiration' in creds 48 | #assert 'SerialNumber' in creds 49 | #assert 'TokenCode' in creds 50 | 51 | 52 | def check_credentials_not_empty(self, creds): 53 | assert (creds['AccessKeyId'].startswith('AKIA') or creds['AccessKeyId'].startswith('ASIA')) 54 | assert (creds['SecretAccessKey'] != None) 55 | 56 | 57 | def test_assume_role(self): 58 | creds = assume_role('Scout2', self.creds, 'arn:aws:iam::179374595322:role/Scout2', 'opinelunittesting') 59 | self.check_credentials_dict(creds) 60 | self.check_credentials_not_empty(creds) 61 | assert (creds['SessionToken'] != None) 62 | fake_creds = copy.deepcopy(self.creds) 63 | fake_creds['mfa_serial'] = 'arn:aws:iam::179374595322:mfa/fake' 64 | fake_creds['mfa_code'] = '123456' 65 | fake_creds['ExternalId'] = 'opinelunittesting' 66 | try: 67 | creds = assume_role('Scout2', fake_creds, 'arn:aws:iam::179374595322:role/Scout2', 'opinelunittesting') 68 | except Exception as e: 69 | pass 70 | 71 | 72 | def test_get_cached_credentials_filename(self): 73 | filename = get_cached_credentials_filename('Scout2', 'arn:aws:iam::179374595322:role/Scout2') 74 | assert(filename.endswith('.aws/cli/cache/Scout2--arn_aws_iam__179374595322_role-Scout2.json')) 75 | 76 | 77 | def test_generate_password(self): 78 | password = generate_password(16) 79 | assert len(password) == 16 80 | 81 | 82 | def test_init_creds(self): 83 | creds = init_creds() 84 | self.check_credentials_dict(creds) 85 | 86 | 87 | def test_init_sts_session(self): 88 | creds = init_sts_session('travislike-sts', self.creds, 900, 'opinelunittesting', True) 89 | self.check_credentials_dict(creds) 90 | fake_creds = copy.deepcopy(self.creds) 91 | fake_creds['SerialNumber'] = 'arn:aws:iam::179374595322:mfa/fake' 92 | fake_creds['TokenCode'] = '123456' 93 | try: 94 | creds = init_sts_session('travislike-sts', fake_creds, 900, 'opinelunittesting', False) 95 | except: 96 | pass 97 | 98 | 99 | def test_read_creds_from_aws_credentials_file(self): 100 | test_cases = [{'profile_name': 'l01cd3v-1','credentials_file': 'tests/data/credentials'}, 101 | {'profile_name': 'l01cd3v-2','credentials_file': 'tests/data/credentials'}, 102 | {'profile_name': 'l01cd3v-3','credentials_file': 'tests/data/credentials'}, 103 | {'profile_name': 'l01cd3v-4','credentials_file': 'tests/data/credentials'}] 104 | results = [ 105 | ('AKIAXXXXXXXXXXXXXXX1', 'deadbeefdeadbeefdeadbeefdeadbeef11111111', 'arn:aws:iam::123456789111:mfa/l01cd3v', 106 | None), 107 | ('AKIAXXXXXXXXXXXXXXX2', 'deadbeefdeadbeefdeadbeefdeadbeef22222222', None, None), 108 | ('ASIAXXXXXXXXXXXXXXX3', 'deadbeefdeadbeefdeadbeefdeadbeef33333333', None, 109 | 'deadbeef333//////////ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWF' 110 | 'kYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==+ZGVhZGJlZWZkZWFkYmVlZg==Z' 111 | 'GVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg=='), 112 | ('ASIAXXXXXXXXXXXXXXX4', 'deadbeefdeadbeefdeadbeefdeadbeef44444444', None, 113 | 'deadbeef444//////////ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWF' 114 | 'kYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==+ZGVhZGJlZWZkZWFkYmVlZg==Z' 115 | 'GVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==/ZGVhZGJlZWZkZWFkYmVlZg==ZGVhZGJlZWZkZWFkYmVlZg==')] 116 | for test_case, result in zip(test_cases, results): 117 | credentials = {} 118 | credentials = read_creds_from_aws_credentials_file(**test_case) 119 | assert credentials['AccessKeyId'] == result[0] 120 | assert credentials['SecretAccessKey'] == result[1] 121 | assert credentials['SerialNumber'] == result[2] 122 | assert credentials['SessionToken'] == result[3] 123 | os.remove(aws_credentials_file) 124 | creds = read_creds_from_aws_credentials_file('test') 125 | shutil.rmtree(aws_config_dir) 126 | creds = read_creds_from_aws_credentials_file('test') 127 | 128 | 129 | def test_read_creds_from_csv(self): 130 | creds = read_creds_from_csv('tests/data/accessKeys1.csv') 131 | assert creds != None 132 | assert type(creds) == tuple 133 | assert creds[0] == 'AKIAJJ5TE81PVO72WPTQ' 134 | assert creds[1] == '67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 135 | assert creds[2] == None 136 | creds = read_creds_from_csv('tests/data/accessKeys2.csv') 137 | assert creds[0] == 'AKIAJJ5TE81PVO72WPTQ' 138 | assert creds[1] == '67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 139 | assert creds[2] == None 140 | creds = read_creds_from_csv('tests/data/accessKeys3.csv') 141 | assert creds[0] == 'AKIAJJ5TE81PVO72WPTQ' 142 | assert creds[1] == '67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 143 | assert creds[2] == 'arn:aws:iam::123456789111:mfa/l01cd3v' 144 | creds = read_creds_from_csv('tests/data/accessKeys4.csv') 145 | assert creds[0] == 'AKIAJJ5TE81PVO72WPTQ' 146 | assert creds[1] == '67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 147 | assert creds[2] == 'arn:aws:iam::123456789111:mfa/l01cd3v' 148 | return 149 | 150 | 151 | def test_read_creds_from_ec2_instance_metadata(self): 152 | creds = read_creds_from_ec2_instance_metadata() 153 | 154 | 155 | def test_read_creds_from_ecs_container_metadata(self): 156 | creds = read_creds_from_ecs_container_metadata() 157 | 158 | 159 | def test_read_creds_from_environment_variables(self): 160 | os.environ['AWS_ACCESS_KEY_ID'] = 'environment-AKIAJJ5TE81PVO72WPTQ' 161 | os.environ['AWS_SECRET_ACCESS_KEY'] = 'environment-67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 162 | os.environ['AWS_SESSION_TOKEN'] = 'environment-session/////token' 163 | creds = read_creds_from_environment_variables() 164 | assert creds != None 165 | assert type(creds) == dict 166 | assert creds['AccessKeyId'] == 'environment-AKIAJJ5TE81PVO72WPTQ' 167 | assert creds['SecretAccessKey'] == 'environment-67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 168 | assert creds['SessionToken'] == 'environment-session/////token' 169 | 170 | 171 | def test_read_profile_from_environment_variables(self): 172 | os.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789012:role/cross_role_test' 173 | os.environ['AWS_EXTERNAL_ID'] = '42' 174 | role_arn, external_id = read_profile_from_environment_variables() 175 | assert role_arn == 'arn:aws:iam::123456789012:role/cross_role_test' 176 | assert external_id == '42' 177 | 178 | 179 | def test_read_profile_from_aws_config_file(self): 180 | role_arn, source_profile, mfa_serial, external_id = \ 181 | read_profile_from_aws_config_file('l01cd3v-role1', config_file='tests/data/config') 182 | assert role_arn == 'arn:aws:iam::123456789012:role/Role1' 183 | assert source_profile == 'l01cd3v-1' 184 | assert mfa_serial == None 185 | role_arn, source_profile, mfa_serial, external_id = \ 186 | read_profile_from_aws_config_file('l01cd3v-role2', config_file='tests/data/config') 187 | assert role_arn == 'arn:aws:iam::123456789012:role/Role2' 188 | assert source_profile == 'l01cd3v-2' 189 | assert mfa_serial == 'arn:aws:iam::123456789222:mfa/l01cd3v' 190 | role_arn, source_profile, mfa_serial, external_id = \ 191 | read_profile_from_aws_config_file('l01cd3v-role3', config_file='tests/data/config') 192 | assert role_arn == 'arn:aws:iam::123456789012:role/Role3' 193 | assert source_profile == 'l01cd3v-2' 194 | assert mfa_serial == 'arn:aws:iam::123456789333:mfa/l01cd3v' 195 | assert external_id == 'external-id-for-role3' 196 | role_arn, source_profile, mfa_serial, external_id = \ 197 | read_profile_from_aws_config_file('l01cd3v-role4', config_file='tests/data/config') 198 | assert role_arn == 'arn:aws:iam::123456789012:role/Role4' 199 | assert source_profile == 'default' 200 | assert mfa_serial == None 201 | assert external_id == None 202 | role_arn, source_profile, mfa_serial, external_id = \ 203 | read_profile_from_aws_config_file('scout2fortraviswithexternalid', config_file='tests/data/credentials') 204 | assert role_arn == 'arn:aws:iam::179374595322:role/Scout2WithExternalId' 205 | assert source_profile == 'default' 206 | assert mfa_serial == None 207 | assert external_id == 'external-id-for-scout2' 208 | os.remove(aws_config_file) 209 | role_arn, source_profile, mfa_serial, external_id = read_profile_from_aws_config_file('l01cd3v-role1') 210 | 211 | 212 | def test_get_profiles_from_aws_credentials_file(self): 213 | profiles1 = get_profiles_from_aws_credentials_file(credentials_files=['tests/data/credentials']) 214 | profiles2 = sorted(['l01cd3v-1', 'l01cd3v-2', 'l01cd3v-3', 'l01cd3v-4', 215 | 'scout2fortraviswithexternalid', 'testprofile']) 216 | assert profiles1 == profiles2 217 | 218 | 219 | def test_show_profiles_from_aws_credentials_file(self): 220 | show_profiles_from_aws_credentials_file(credentials_files=['tests/data/credentials']) 221 | 222 | 223 | def test_write_creds_to_aws_credentials_file(self): 224 | creds = init_creds() 225 | creds['AccessKeyId'] = 'AKIAJJ5TE81PVO72WPTQ' 226 | creds['SecretAccessKey'] = '67YkvxJ8Qx0EI97NvlIyM9kVz/uKddd0z0uGj123' 227 | write_creds_to_aws_credentials_file('testprofile', creds) 228 | creds['SessionToken'] = 'opineltestsessiontoken' 229 | creds['SerialNumber'] = 'arn:aws:iam::123456789111:mfa/l01cd3v' 230 | creds['Expiration'] = '2017-04-19 02:23:16+00:00' 231 | write_creds_to_aws_credentials_file('testprofile', creds) 232 | write_creds_to_aws_credentials_file('testprofile', creds) 233 | shutil.rmtree(aws_config_dir) 234 | write_creds_to_aws_credentials_file('testprofile', creds) 235 | 236 | 237 | def test_complete_profile(self): 238 | pass 239 | 240 | 241 | def test_read_creds(self): 242 | creds = read_creds('travislike') 243 | creds = read_creds('', csv_file='tests/data/accessKeys1.csv') 244 | # Assume role within read_creds() 245 | creds = read_creds('scout2fortravis') 246 | # Read from CLI cache 247 | creds = read_creds('scout2fortravis') 248 | # Pretend cached creds have expired 249 | filename = get_cached_credentials_filename('scout2fortravis', 'arn:aws:iam::179374595322:role/Scout2') 250 | printError(str(filename)) 251 | with open(filename, 'rt') as f: 252 | creds = json.load(f) 253 | creds['Credentials']['Expiration'] = '2016-11-21 22:32:18+00:00' 254 | with open(filename, 'wt') as f: 255 | f.write(json.dumps(creds)) 256 | creds = read_creds('scout2fortravis') 257 | creds = read_creds('default') 258 | creds = read_creds('scout2fortraviswithexternalid') 259 | -------------------------------------------------------------------------------- /tests/test-utils-fs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import datetime 4 | 5 | from opinel.utils.fs import * 6 | from opinel.utils.console import configPrintException, printError 7 | 8 | class TestOpinelFsClass: 9 | """ 10 | Test opinel.fs 11 | """ 12 | 13 | def cmp(self, a, b): 14 | """ 15 | Implement cmp() for Python3 tests 16 | """ 17 | return (a > b) - (a < b) 18 | 19 | def setup(self): 20 | configPrintException(True) 21 | 22 | def test_CustomJSONEncoder(self): 23 | date = datetime.datetime(2017, 6, 12) 24 | blob1 = {'foo': 'bar', 'date': date} 25 | print('%s' % json.dumps(blob1, cls=CustomJSONEncoder)) 26 | blob2 = {'foo': 'bar', 'baz': {'foo': 'bar'}} 27 | print('%s' % json.dumps(blob2, cls=CustomJSONEncoder)) 28 | 29 | def test_load_data(self): 30 | test = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/protocols.json') 31 | load_data(test, local_file=True) 32 | load_data(test, 'protocols', local_file=True) 33 | load_data('protocols.json', 'protocols') 34 | load_data('ip-ranges/aws.json', 'prefixes') 35 | load_data('tests/data/protocols.json', 'protocols', local_file=True) 36 | test = load_data('protocols.json', 'protocols') 37 | assert type(test) == dict 38 | assert test['1'] == 'ICMP' 39 | test = load_data('tests/data/protocols.json', 'protocols', True) 40 | assert type(test) == dict 41 | assert test['-2'] == 'TEST' 42 | test = load_data('tests/data/protocols.yaml', 'protocols', True, format = 'yaml') 43 | assert type(test) == dict 44 | assert test['-2'] == 'TEST' 45 | test = load_data('tests/data/protocols.txt', local_file = True, format = 'txt') 46 | assert (test.rstrip() == 'some text here') 47 | test = load_data('tests/data/protocols.json', 'protocols', True, format = 'unknown') 48 | assert test == None 49 | 50 | 51 | def test_read_default_args(self): 52 | pass 53 | 54 | 55 | def test_read_ip_ranges(self): 56 | read_ip_ranges('ip-ranges/aws.json', local_file=False) 57 | read_ip_ranges('tests/data/ip-ranges-1.json', local_file=True) 58 | read_ip_ranges('tests/data/ip-ranges-1.json', local_file=True, ip_only=True) 59 | successful_read_ip_ranges_runs = True 60 | test_cases = [ 61 | { 62 | 'filename': 'tests/data/ip-ranges-1.json', 63 | 'local_file': True, 64 | 'conditions': [],'ip_only': False, 65 | 'results': 'tests/results/read_ip_ranges/ip-ranges-1a.json' 66 | }, 67 | { 68 | 'filename': 'tests/data/ip-ranges-1.json', 69 | 'local_file': True, 70 | 'conditions': [],'ip_only': True, 71 | 'results': 'tests/results/read_ip_ranges/ip-ranges-1b.json' 72 | }, 73 | { 74 | 'filename': 'tests/data/ip-ranges-1.json', 75 | 'local_file': True, 76 | 'conditions': [ 77 | [ 78 | 'field_a', 'equal', 'a1']], 79 | 'ip_only': True, 80 | 'results': 'tests/results/read_ip_ranges/ip-ranges-1c.json' 81 | }, 82 | { 83 | 'filename': 'ip-ranges/aws.json', 84 | 'local_file': False, 85 | 'conditions': [ 86 | [ 'ip_prefix', 'equal', '23.20.0.0/14' ] 87 | ], 88 | 'ip_only': False, 89 | 'results': 'tests/results/read_ip_ranges/ip-ranges-a.json' 90 | }, 91 | { 92 | "filename": 'tests/data/ip-ranges-3.json', 93 | "local_file": True, 94 | 'results': None, 95 | "ip_only": True, 96 | "results": "tests/results/read_ip_ranges/ip-ranges-3.json" 97 | }, 98 | { 99 | "filename": 'tests/data/ip-ranges-3.json', 100 | "local_file": True, 101 | 'results': None, 102 | "ip_only": True, 103 | "results": "tests/results/read_ip_ranges/ip-ranges-3.json" 104 | } 105 | ] 106 | for test_case in test_cases: 107 | results = test_case.pop('results') 108 | test_results = read_ip_ranges(**test_case) 109 | known_results = load_data(results, local_file=True) 110 | if self.cmp(test_results, known_results) != 0: 111 | successful_read_ip_ranges_runs = False 112 | 113 | assert successful_read_ip_ranges_runs 114 | 115 | def test_save_blob_as_json(self): 116 | date = datetime.datetime.now() 117 | save_blob_as_json('tmp1.json', {'foo': 'bar','date': date}, True, False) 118 | save_blob_as_json('tmp1.json', {'foo': 'bar'}, True, True) 119 | save_blob_as_json('/root/tmp1.json', {'foo': 'bar'}, True, True) 120 | 121 | def test_save_ip_ranges(self): 122 | if os.path.isfile('ip-ranges-default.json'): 123 | os.remove('ip-ranges-default.json') 124 | save_ip_ranges('default', ['1.2.3.4'], False, False) 125 | save_ip_ranges('default', [{'ip_prefix': '5.6.7.8'}], True, True) 126 | 127 | def test_read_file(self): 128 | test = read_file('tests/data/protocols.txt') 129 | assert (test.rstrip() == 'some text here') 130 | -------------------------------------------------------------------------------- /tests/test-utils-globals.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.utils.globals import * 4 | 5 | class TestOpinelUtils: 6 | 7 | def callback(self, object): 8 | object['foo'] = 'bar' 9 | 10 | 11 | def test_manage_dictionary(self): 12 | test = {} 13 | manage_dictionary(test, 'a', []) 14 | assert 'a' in test 15 | assert type(test['a']) == list 16 | manage_dictionary(test, 'a', {}) 17 | assert test['a'] == [] 18 | manage_dictionary(test, 'b', {}, self.callback) 19 | assert type(test['b'] == dict) 20 | assert ('foo' in test['b']) 21 | assert test['b']['foo'] == 'bar' 22 | 23 | 24 | def test_check_requirements(self): # script_path): 25 | assert (check_requirements(os.path.realpath(__file__)) == True) 26 | assert (check_requirements(os.path.realpath(__file__), 'tests/data/requirements1.txt') == True) 27 | assert (check_requirements(os.path.realpath(__file__), 'tests/data/requirements2.txt') == False) 28 | assert (check_requirements(os.path.realpath(__file__), 'tests/data/requirements3.txt') == False) 29 | 30 | 31 | def test_check_versions(self): 32 | assert (check_versions('1.0.0', '1.4.2', '2.0.0', 'opinelunittest') == True) 33 | assert (check_versions('1.0.0', '2.4.2', '2.0.0', 'opinelunittest') == True) 34 | assert (check_versions('1.0.0', '2.4.2', '2.0.0', 'opinelunittest', True) == False) 35 | assert (check_versions('1.0.0', '0.4.2', '2.0.0', 'opinelunittest') == False) 36 | assert (check_versions(None, None, None, None) == True) 37 | 38 | def test_snake_to_camel(self): 39 | assert (snake_to_camel('one_two_three') == 'OneTwoThree') 40 | 41 | def test_snake_to_words(self): 42 | assert (snake_to_words('one_two_three') == 'one two three') 43 | assert (snake_to_words('one_two_three', True) == 'One Two Three') 44 | 45 | -------------------------------------------------------------------------------- /tests/test-utils-profiles.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import os 4 | import shutil 5 | 6 | from opinel.utils.profiles import * 7 | from opinel.utils.console import configPrintException, printDebug 8 | 9 | class TestOpinelUtilsAWSProfiles(object): 10 | 11 | def setup(self): 12 | configPrintException(True) 13 | self.tmp_aws_dir = '%s-opineltest' % aws_dir 14 | if os.path.isdir(aws_dir): 15 | os.rename(aws_dir, self.tmp_aws_dir) 16 | os.mkdir(aws_dir) 17 | shutil.copyfile('tests/data/config', os.path.join(aws_dir, 'config')) 18 | shutil.copyfile('tests/data/credentials', os.path.join(aws_dir, 'credentials')) 19 | 20 | 21 | def teardown(self): 22 | shutil.rmtree(aws_dir) 23 | if os.path.isdir(self.tmp_aws_dir): 24 | os.rename(self.tmp_aws_dir, aws_dir) 25 | 26 | 27 | def test_list(self): 28 | profiles = sorted(set(AWSProfiles.list())) 29 | printDebug(str(profiles)) 30 | testprofiles = sorted(set(['l01cd3v-1', 'l01cd3v-2', 'l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-role3', 'l01cd3v-role4', 'l01cd3v-3', 'l01cd3v-4', 'testprofile', 'scout2fortravis', 'scout2fortraviswithexternalid'])) 31 | printDebug(str(testprofiles)) 32 | assert (testprofiles == profiles) 33 | profiles = AWSProfiles.list(names = 'l01cd3v-role.*') 34 | printDebug(str(profiles)) 35 | assert(set(['l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-role3', 'l01cd3v-role4']) == set(profiles)) 36 | profiles = AWSProfiles.list(names = '.*1') 37 | assert(set(['l01cd3v-1', 'l01cd3v-role1']) == set(profiles)) 38 | 39 | 40 | def test_get(self): 41 | profile = AWSProfiles.get('l01cd3v-role1')[0] 42 | assert('role_arn' in profile.attributes) 43 | assert('source_profile' in profile.attributes) 44 | assert(profile.attributes['role_arn'] == 'arn:aws:iam::123456789012:role/Role1') 45 | assert(profile.attributes['source_profile'] == 'l01cd3v-1') 46 | 47 | 48 | def test_get_credentials(self): 49 | profile = AWSProfiles.get('l01cd3v-1')[0] 50 | credentials = profile.get_credentials() 51 | assert(credentials['SerialNumber'] == 'arn:aws:iam::123456789111:mfa/l01cd3v') 52 | assert(credentials['SecretAccessKey'] == 'deadbeefdeadbeefdeadbeefdeadbeef11111111') 53 | assert(credentials['AccessKeyId'] == 'AKIAXXXXXXXXXXXXXXX1') 54 | 55 | def test_write(self): 56 | profile = AWSProfile(name = 'l01cd3v-role3') 57 | profile.set_attribute('role_arn', 'arn:aws:iam::123456789012:role/Role3') 58 | profile.set_attribute('source_profile', 'l01cd3v-3') 59 | profile.write() 60 | profile = AWSProfile(name = 'l01cd3v-role4') 61 | profile.set_attribute('role_arn', 'arn:aws:iam::123456789012:role/Role4') 62 | profile.set_attribute('source_profile', 'l01cd3v-4') 63 | profile.write() 64 | profile = AWSProfile(name = 'l01cd3v-5') 65 | profile.set_attribute('aws_access_key_id', 'AKIAXXXXXXXXXXXXXXX5') 66 | profile.set_attribute('aws_secret_access_key', 'deadbeefdeadbeefdeadbeefdeadbeef55555555') 67 | profile.write() 68 | profile = AWSProfile(name = 'l01cd3v-2') 69 | profile.set_attribute('aws_mfa_serial', 'arn:aws:iam::123456789222:mfa/l01cd3v-2') 70 | profile.write() 71 | -------------------------------------------------------------------------------- /tests/test-utils-threads.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from opinel.utils.console import configPrintException 4 | from opinel.utils.threads import * 5 | 6 | class TestOpinelUtilsThreads: 7 | 8 | def setup(self): 9 | configPrintException(True) 10 | 11 | def callback(self, q, params): 12 | while True: 13 | try: 14 | i = q.get() 15 | except: 16 | pass 17 | finally: 18 | q.task_done() 19 | 20 | 21 | def test_thread_work(self): 22 | targets = [] 23 | for i in range(50): 24 | targets.append(i) 25 | thread_work(targets, self.callback, {}, 10) 26 | for i in range(5): 27 | targets.append(i) 28 | thread_work(targets, self.callback) 29 | 30 | 31 | def test_threaded_per_region(self): 32 | regions = [ 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2' ] 33 | thread_work(regions, threaded_per_region, params = {'method': self.callback}) 34 | --------------------------------------------------------------------------------