├── .circleci └── config.yml ├── .gitignore ├── .mypy.ini ├── .pylintrc ├── LICENSE ├── README-agent.md ├── README.md ├── build.sh ├── check.sh ├── ec2-session ├── ec2-ssh ├── ecs-session ├── requirements.txt ├── sample-templates ├── README.md ├── template-ecs-task.yml └── terraform │ ├── .gitignore │ ├── ecs.tf │ ├── infra.tf │ ├── main.tf │ └── variables.tf ├── setup-agent.py ├── setup.py ├── ssm-tunnel ├── ssm-tunnel-agent ├── ssm-tunnel-updown.dns-example ├── ssm_tools ├── __init__.py ├── common.py ├── ec2_instance_connect.py ├── ecs_session_cli.py ├── resolver.py ├── ssm_session_cli.py ├── ssm_ssh_cli.py ├── ssm_tunnel_cli.py └── talker.py └── upload.sh /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | workflows: 4 | build_and_publish: 5 | jobs: 6 | - build: 7 | filters: 8 | tags: 9 | only: /.*/ 10 | - publish: 11 | requires: 12 | - build 13 | filters: 14 | tags: 15 | only: /^v[0-9]+\.[0-9]+.*/ 16 | branches: 17 | ignore: /.*/ 18 | 19 | jobs: 20 | build: 21 | docker: 22 | - image: cimg/python:3.6 23 | 24 | steps: 25 | - checkout 26 | 27 | - run: 28 | name: Install prerequisities 29 | command: | 30 | set -x 31 | python3 -m pip install pylint mypy black 32 | python3 -m pip install -r requirements.txt 33 | pyenv rehash 34 | 35 | - run: 36 | name: Check syntax and style 37 | command: ./check.sh 38 | 39 | publish: 40 | docker: 41 | - image: cimg/python:3.6 42 | 43 | steps: 44 | - checkout 45 | - run: 46 | name: Verify git tag vs. version 47 | command: | 48 | python3 setup.py verify 49 | 50 | - run: 51 | name: Init .pypirc 52 | command: | 53 | echo -e "[pypi]" >> ~/.pypirc 54 | echo -e "username = $PYPI_USERNAME" >> ~/.pypirc 55 | echo -e "password = $PYPI_PASSWORD" >> ~/.pypirc 56 | 57 | - run: 58 | name: Build packages 59 | command: ./build.sh 60 | 61 | - run: 62 | name: Install twine 63 | command: | 64 | python3 -m pip install --upgrade pip 65 | python3 -m pip install twine 66 | pyenv rehash 67 | 68 | - run: 69 | name: Upload to PyPI 70 | command: | 71 | twine upload dist/* 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | *.egg-info/ 4 | __pycache__/ 5 | build/ 6 | dist/ 7 | .idea/ 8 | .mypy_cache/ 9 | -------------------------------------------------------------------------------- /.mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | warn_return_any = True 3 | warn_unused_configs = True 4 | 5 | disallow_untyped_calls = True 6 | disallow_untyped_defs = True 7 | disallow_incomplete_defs = True 8 | 9 | scripts_are_modules = True 10 | ignore_missing_imports = True 11 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code. 6 | #extension-pkg-whitelist= 7 | 8 | # Add files or directories to the blacklist. They should be base names, not 9 | # paths. 10 | #ignore=CVS 11 | 12 | # Add files or directories matching the regex patterns to the blacklist. The 13 | # regex matches against base names, not paths. 14 | #ignore-patterns= 15 | 16 | # Python code to execute, usually for sys.path manipulation such as 17 | # pygtk.require(). 18 | #init-hook= 19 | 20 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 21 | # number of processors available to use. 22 | #jobs=1 23 | 24 | # Control the amount of potential inferred values when inferring a single 25 | # object. This can help the performance when dealing with large functions or 26 | # complex, nested conditions. 27 | #limit-inference-results=100 28 | 29 | # List of plugins (as comma separated values of python modules names) to load, 30 | # usually to register additional checkers. 31 | #load-plugins= 32 | 33 | # Pickle collected data for later comparisons. 34 | #persistent=yes 35 | 36 | # Specify a configuration file. 37 | #rcfile= 38 | 39 | # When enabled, pylint would attempt to guess common misconfiguration and emit 40 | # user-friendly hints instead of false-positive error messages. 41 | #suggestion-mode=yes 42 | 43 | # Allow loading of arbitrary C extensions. Extensions are imported into the 44 | # active Python interpreter and may run arbitrary code. 45 | #unsafe-load-any-extension=no 46 | 47 | 48 | [MESSAGES CONTROL] 49 | 50 | # Only show warnings with the listed confidence levels. Leave empty to show 51 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. 52 | #confidence= 53 | 54 | # Disable the message, report, category or checker with the given id(s). You 55 | # can either give multiple identifiers separated by comma (,) or put this 56 | # option multiple times (only on the command line, not in the configuration 57 | # file where it should appear only once). You can also use "--disable=all" to 58 | # disable everything first and then reenable specific checks. For example, if 59 | # you want to run only the similarities checker, you can use "--disable=all 60 | # --enable=similarities". If you want to run only the classes checker, but have 61 | # no Warning level messages displayed, use "--disable=all --enable=classes 62 | # --disable=W". 63 | #disable=print-statement, 64 | # parameter-unpacking, 65 | # unpacking-in-except, 66 | # old-raise-syntax, 67 | # backtick, 68 | # long-suffix, 69 | # old-ne-operator, 70 | # old-octal-literal, 71 | # import-star-module-level, 72 | # non-ascii-bytes-literal, 73 | # raw-checker-failed, 74 | # bad-inline-option, 75 | # locally-disabled, 76 | # file-ignored, 77 | # suppressed-message, 78 | # useless-suppression, 79 | # deprecated-pragma, 80 | # use-symbolic-message-instead, 81 | # apply-builtin, 82 | # basestring-builtin, 83 | # buffer-builtin, 84 | # cmp-builtin, 85 | # coerce-builtin, 86 | # execfile-builtin, 87 | # file-builtin, 88 | # long-builtin, 89 | # raw_input-builtin, 90 | # reduce-builtin, 91 | # standarderror-builtin, 92 | # unicode-builtin, 93 | # xrange-builtin, 94 | # coerce-method, 95 | # delslice-method, 96 | # getslice-method, 97 | # setslice-method, 98 | # no-absolute-import, 99 | # old-division, 100 | # dict-iter-method, 101 | # dict-view-method, 102 | # next-method-called, 103 | # metaclass-assignment, 104 | # indexing-exception, 105 | # raising-string, 106 | # reload-builtin, 107 | # oct-method, 108 | # hex-method, 109 | # nonzero-method, 110 | # cmp-method, 111 | # input-builtin, 112 | # round-builtin, 113 | # intern-builtin, 114 | # unichr-builtin, 115 | # map-builtin-not-iterating, 116 | # zip-builtin-not-iterating, 117 | # range-builtin-not-iterating, 118 | # filter-builtin-not-iterating, 119 | # using-cmp-argument, 120 | # eq-without-hash, 121 | # div-method, 122 | # idiv-method, 123 | # rdiv-method, 124 | # exception-message-attribute, 125 | # invalid-str-codec, 126 | # sys-max-int, 127 | # bad-python3-import, 128 | # deprecated-string-function, 129 | # deprecated-str-translate-call, 130 | # deprecated-itertools-function, 131 | # deprecated-types-field, 132 | # next-method-defined, 133 | # dict-items-not-iterating, 134 | # dict-keys-not-iterating, 135 | # dict-values-not-iterating, 136 | # deprecated-operator-function, 137 | # deprecated-urllib-function, 138 | # xreadlines-attribute, 139 | # deprecated-sys-function, 140 | # exception-escape, 141 | # comprehension-escape 142 | 143 | # Enable the message, report, category or checker with the given id(s). You can 144 | # either give multiple identifier separated by comma (,) or put this option 145 | # multiple time (only on the command line, not in the configuration file where 146 | # it should appear only once). See also the "--disable" option for examples. 147 | #enable=c-extension-no-member 148 | 149 | 150 | [REPORTS] 151 | 152 | # Python expression which should return a note less than 10 (10 is the highest 153 | # note). You have access to the variables errors warning, statement which 154 | # respectively contain the number of errors / warnings messages and the total 155 | # number of statements analyzed. This is used by the global evaluation report 156 | # (RP0004). 157 | #evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 158 | 159 | # Template used to display messages. This is a python new-style format string 160 | # used to format the message information. See doc for all details. 161 | #msg-template= 162 | 163 | # Set the output format. Available formats are text, parseable, colorized, json 164 | # and msvs (visual studio). You can also give a reporter class, e.g. 165 | # mypackage.mymodule.MyReporterClass. 166 | output-format=colorized 167 | 168 | # Tells whether to display a full report or only the messages. 169 | #reports=no 170 | 171 | # Activate the evaluation score. 172 | #score=yes 173 | 174 | 175 | [REFACTORING] 176 | 177 | # Maximum number of nested blocks for function / method body 178 | #max-nested-blocks=5 179 | 180 | # Complete name of functions that never returns. When checking for 181 | # inconsistent-return-statements if a never returning function is called then 182 | # it will be considered as an explicit return statement and no message will be 183 | # printed. 184 | #never-returning-functions=sys.exit 185 | 186 | 187 | [TYPECHECK] 188 | 189 | # List of decorators that produce context managers, such as 190 | # contextlib.contextmanager. Add to this list to register other decorators that 191 | # produce valid context managers. 192 | #contextmanager-decorators=contextlib.contextmanager 193 | 194 | # List of members which are set dynamically and missed by pylint inference 195 | # system, and so shouldn't trigger E1101 when accessed. Python regular 196 | # expressions are accepted. 197 | #generated-members= 198 | 199 | # Tells whether missing members accessed in mixin class should be ignored. A 200 | # mixin class is detected if its name ends with "mixin" (case insensitive). 201 | #ignore-mixin-members=yes 202 | 203 | # Tells whether to warn about missing members when the owner of the attribute 204 | # is inferred to be None. 205 | #ignore-none=yes 206 | 207 | # This flag controls whether pylint should warn about no-member and similar 208 | # checks whenever an opaque object is returned when inferring. The inference 209 | # can return multiple potential results while evaluating a Python object, but 210 | # some branches might not be evaluated, which results in partial inference. In 211 | # that case, it might be useful to still emit no-member and other checks for 212 | # the rest of the inferred objects. 213 | #ignore-on-opaque-inference=yes 214 | 215 | # List of class names for which member attributes should not be checked (useful 216 | # for classes with dynamically set attributes). This supports the use of 217 | # qualified names. 218 | #ignored-classes=optparse.Values,thread._local,_thread._local 219 | 220 | # List of module names for which member attributes should not be checked 221 | # (useful for modules/projects where namespaces are manipulated during runtime 222 | # and thus existing member attributes cannot be deduced by static analysis. It 223 | # supports qualified module names, as well as Unix pattern matching. 224 | #ignored-modules= 225 | 226 | # Show a hint with possible names when a member name was not found. The aspect 227 | # of finding the hint is based on edit distance. 228 | #missing-member-hint=yes 229 | 230 | # The minimum edit distance a name should have in order to be considered a 231 | # similar match for a missing member name. 232 | #missing-member-hint-distance=1 233 | 234 | # The total number of similar names that should be taken in consideration when 235 | # showing a hint for a missing member. 236 | #missing-member-max-choices=1 237 | 238 | 239 | [STRING] 240 | 241 | # This flag controls whether the implicit-str-concat-in-sequence should 242 | # generate a warning on implicit string concatenation in sequences defined over 243 | # several lines. 244 | #check-str-concat-over-line-jumps=no 245 | 246 | 247 | [BASIC] 248 | 249 | # Naming style matching correct argument names. 250 | #argument-naming-style=snake_case 251 | 252 | # Regular expression matching correct argument names. Overrides argument- 253 | # naming-style. 254 | #argument-rgx= 255 | 256 | # Naming style matching correct attribute names. 257 | #attr-naming-style=snake_case 258 | 259 | # Regular expression matching correct attribute names. Overrides attr-naming- 260 | # style. 261 | #attr-rgx= 262 | 263 | # Bad variable names which should always be refused, separated by a comma. 264 | #bad-names=foo, 265 | # bar, 266 | # baz, 267 | # toto, 268 | # tutu, 269 | # tata 270 | 271 | # Naming style matching correct class attribute names. 272 | #class-attribute-naming-style=any 273 | 274 | # Regular expression matching correct class attribute names. Overrides class- 275 | # attribute-naming-style. 276 | #class-attribute-rgx= 277 | 278 | # Naming style matching correct class names. 279 | #class-naming-style=PascalCase 280 | 281 | # Regular expression matching correct class names. Overrides class-naming- 282 | # style. 283 | #class-rgx= 284 | 285 | # Naming style matching correct constant names. 286 | #const-naming-style=UPPER_CASE 287 | 288 | # Regular expression matching correct constant names. Overrides const-naming- 289 | # style. 290 | #const-rgx= 291 | 292 | # Minimum line length for functions/classes that require docstrings, shorter 293 | # ones are exempt. 294 | #docstring-min-length=-1 295 | 296 | # Naming style matching correct function names. 297 | #function-naming-style=snake_case 298 | 299 | # Regular expression matching correct function names. Overrides function- 300 | # naming-style. 301 | #function-rgx= 302 | 303 | # Good variable names which should always be accepted, separated by a comma. 304 | good-names=i, 305 | j, 306 | k, 307 | e, 308 | ex, 309 | Run, 310 | _ 311 | 312 | # Include a hint for the correct naming format with invalid-name. 313 | #include-naming-hint=no 314 | 315 | # Naming style matching correct inline iteration names. 316 | #inlinevar-naming-style=any 317 | 318 | # Regular expression matching correct inline iteration names. Overrides 319 | # inlinevar-naming-style. 320 | #inlinevar-rgx= 321 | 322 | # Naming style matching correct method names. 323 | #method-naming-style=snake_case 324 | 325 | # Regular expression matching correct method names. Overrides method-naming- 326 | # style. 327 | #method-rgx= 328 | 329 | # Naming style matching correct module names. 330 | #module-naming-style=snake_case 331 | 332 | # Regular expression matching correct module names. Overrides module-naming- 333 | # style. 334 | #module-rgx= 335 | 336 | # Colon-delimited sets of names that determine each other's naming style when 337 | # the name regexes allow several styles. 338 | #name-group= 339 | 340 | # Regular expression which should only match function or class names that do 341 | # not require a docstring. 342 | #no-docstring-rgx=^_ 343 | 344 | # List of decorators that produce properties, such as abc.abstractproperty. Add 345 | # to this list to register other decorators that produce valid properties. 346 | # These decorators are taken in consideration only for invalid-name. 347 | #property-classes=abc.abstractproperty 348 | 349 | # Naming style matching correct variable names. 350 | #variable-naming-style=snake_case 351 | 352 | # Regular expression matching correct variable names. Overrides variable- 353 | # naming-style. 354 | #variable-rgx= 355 | 356 | 357 | [MISCELLANEOUS] 358 | 359 | # List of note tags to take in consideration, separated by a comma. 360 | #notes=FIXME, 361 | # XXX, 362 | # TODO 363 | 364 | 365 | [SPELLING] 366 | 367 | # Limits count of emitted suggestions for spelling mistakes. 368 | #max-spelling-suggestions=4 369 | 370 | # Spelling dictionary name. Available dictionaries: none. To make it working 371 | # install python-enchant package.. 372 | #spelling-dict= 373 | 374 | # List of comma separated words that should not be checked. 375 | #spelling-ignore-words= 376 | 377 | # A path to a file that contains private dictionary; one word per line. 378 | #spelling-private-dict-file= 379 | 380 | # Tells whether to store unknown words to indicated private dictionary in 381 | # --spelling-private-dict-file option instead of raising a message. 382 | #spelling-store-unknown-words=no 383 | 384 | 385 | [LOGGING] 386 | 387 | # Format style used to check logging format string. `old` means using % 388 | # formatting, while `new` is for `{}` formatting. 389 | #logging-format-style=old 390 | 391 | # Logging modules to check that the string format arguments are in logging 392 | # function parameter format. 393 | #logging-modules=logging 394 | 395 | 396 | [VARIABLES] 397 | 398 | # List of additional names supposed to be defined in builtins. Remember that 399 | # you should avoid defining new builtins when possible. 400 | #additional-builtins= 401 | 402 | # Tells whether unused global variables should be treated as a violation. 403 | #allow-global-unused-variables=yes 404 | 405 | # List of strings which can identify a callback function by name. A callback 406 | # name must start or end with one of those strings. 407 | #callbacks=cb_, 408 | # _cb 409 | 410 | # A regular expression matching the name of dummy variables (i.e. expected to 411 | # not be used). 412 | #dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 413 | 414 | # Argument names that match this expression will be ignored. Default to name 415 | # with leading underscore. 416 | #ignored-argument-names=_.*|^ignored_|^unused_ 417 | 418 | # Tells whether we should check for unused import in __init__ files. 419 | #init-import=no 420 | 421 | # List of qualified module names which can have objects that can redefine 422 | # builtins. 423 | #redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io 424 | 425 | 426 | [SIMILARITIES] 427 | 428 | # Ignore comments when computing similarities. 429 | #ignore-comments=yes 430 | 431 | # Ignore docstrings when computing similarities. 432 | #ignore-docstrings=yes 433 | 434 | # Ignore imports when computing similarities. 435 | #ignore-imports=no 436 | 437 | # Minimum lines number of a similarity. 438 | #min-similarity-lines=4 439 | 440 | 441 | [FORMAT] 442 | 443 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 444 | expected-line-ending-format=LF 445 | 446 | # Regexp for a line that is allowed to be longer than the limit. 447 | #ignore-long-lines=^\s*(# )??$ 448 | 449 | # Number of spaces of indent required inside a hanging or continued line. 450 | #indent-after-paren=4 451 | 452 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 453 | # tab). 454 | #indent-string=' ' 455 | 456 | # Maximum number of characters on a single line. 457 | max-line-length=120 458 | 459 | # Maximum number of lines in a module. 460 | #max-module-lines=1000 461 | 462 | # List of optional constructs for which whitespace checking is disabled. `dict- 463 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. 464 | # `trailing-comma` allows a space between comma and closing bracket: (a, ). 465 | # `empty-line` allows space-only lines. 466 | #no-space-check=trailing-comma, 467 | # dict-separator 468 | 469 | # Allow the body of a class to be on the same line as the declaration if body 470 | # contains single statement. 471 | #single-line-class-stmt=no 472 | 473 | # Allow the body of an if to be on the same line as the test if there is no 474 | # else. 475 | #single-line-if-stmt=no 476 | 477 | 478 | [IMPORTS] 479 | 480 | # Allow wildcard imports from modules that define __all__. 481 | allow-wildcard-with-all=yes 482 | 483 | # Analyse import fallback blocks. This can be used to support both Python 2 and 484 | # 3 compatible code, which means that the block might have code that exists 485 | # only in one or another interpreter, leading to false positives when analysed. 486 | #analyse-fallback-blocks=no 487 | 488 | # Deprecated modules which should not be used, separated by a comma. 489 | #deprecated-modules=optparse,tkinter.tix 490 | 491 | # Create a graph of external dependencies in the given file (report RP0402 must 492 | # not be disabled). 493 | #ext-import-graph= 494 | 495 | # Create a graph of every (i.e. internal and external) dependencies in the 496 | # given file (report RP0402 must not be disabled). 497 | #import-graph= 498 | 499 | # Create a graph of internal dependencies in the given file (report RP0402 must 500 | # not be disabled). 501 | #int-import-graph= 502 | 503 | # Force import order to recognize a module as part of the standard 504 | # compatibility libraries. 505 | #known-standard-library= 506 | 507 | # Force import order to recognize a module as part of a third party library. 508 | #known-third-party=enchant 509 | 510 | 511 | [CLASSES] 512 | 513 | # List of method names used to declare (i.e. assign) instance attributes. 514 | #defining-attr-methods=__init__, 515 | # __new__, 516 | # setUp 517 | 518 | # List of member names, which should be excluded from the protected access 519 | # warning. 520 | #exclude-protected=_asdict, 521 | # _fields, 522 | # _replace, 523 | # _source, 524 | # _make 525 | 526 | # List of valid names for the first argument in a class method. 527 | #valid-classmethod-first-arg=cls 528 | 529 | # List of valid names for the first argument in a metaclass class method. 530 | #valid-metaclass-classmethod-first-arg=cls 531 | 532 | 533 | [DESIGN] 534 | 535 | # Maximum number of arguments for function / method. 536 | #max-args=5 537 | 538 | # Maximum number of attributes for a class (see R0902). 539 | #max-attributes=7 540 | 541 | # Maximum number of boolean expressions in an if statement. 542 | #max-bool-expr=5 543 | 544 | # Maximum number of branch for function / method body. 545 | #max-branches=12 546 | 547 | # Maximum number of locals for function / method body. 548 | #max-locals=15 549 | 550 | # Maximum number of parents for a class (see R0901). 551 | #max-parents=7 552 | 553 | # Maximum number of public methods for a class (see R0904). 554 | #max-public-methods=20 555 | 556 | # Maximum number of return / yield for function / method body. 557 | #max-returns=6 558 | 559 | # Maximum number of statements in function / method body. 560 | #max-statements=50 561 | 562 | # Minimum number of public methods for a class (see R0903). 563 | #min-public-methods=2 564 | 565 | 566 | [EXCEPTIONS] 567 | 568 | # Exceptions that will emit a warning when being caught. Defaults to 569 | # "BaseException, Exception". 570 | #overgeneral-exceptions=BaseException, 571 | # Exception 572 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2019-2100 Michael Ludvig 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README-agent.md: -------------------------------------------------------------------------------- 1 | # ssm-tunnel-agent 2 | 3 | This file / package should be installed on the EC2 instance 4 | through which you want to tunnel traffic to its VPC. 5 | 6 | It is useless on its own, it should be used with **[ssm-tunnel]** from 7 | **[aws-ssm-tools](https://github.com/mludvig/aws-ssm-tools)**. 8 | 9 | ## Installation 10 | 11 | The agent requires Python 2.7 or newer and has no external dependencies. It should 12 | work out of the box on any recently installed *Amazon Linux 2* instance. 13 | 14 | The easiest way to install the agent is from *[PyPI](https://pypi.org/)* repository: 15 | 16 | ``` 17 | sudo pip install aws-ssm-tunnel-agent 18 | ``` 19 | 20 | If `pip` command is not available you can download it straight from GitHub: 21 | 22 | ``` 23 | sudo curl -o /usr/local/bin/ssm-tunnel-agent https://raw.githubusercontent.com/mludvig/aws-ssm-tools/master/ssm-tunnel-agent 24 | sudo chmod +x /usr/local/bin/ssm-tunnel-agent 25 | ``` 26 | 27 | Hint: these commands can be *copy & pasted* to an **[ec2-session](https://raw.githubusercontent.com/mludvig/aws-ssm-tools)** command prompt :) 28 | 29 | ## Author and License 30 | 31 | This script was written by [Michael Ludvig](https://aws.nz/) 32 | and is released under [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0). 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # aws-ssm-tools - AWS System Manager Tools 2 | 3 | [![CircleCI](https://circleci.com/gh/mludvig/aws-ssm-tools.svg?style=shield)](https://circleci.com/gh/mludvig/aws-ssm-tools) 4 | [![PyPI](https://img.shields.io/pypi/v/aws-ssm-tools.svg)](https://pypi.org/project/aws-ssm-tools/) 5 | [![Python Versions](https://img.shields.io/pypi/pyversions/aws-ssm-tools.svg)](https://pypi.org/project/aws-ssm-tools/) 6 | 7 | Helper tools for AWS Systems Manager: `ec2-session`, `ec2-ssh` and `ssm-tunnel`, 8 | and for ECS Docker Exec: `ecs-session` 9 | 10 | ## Scripts included 11 | 12 | * **ec2-session** (formerly _ssm-session_) 13 | 14 | Wrapper around `aws ssm start-session` that can open 15 |  SSM Session to an instance specified by *Name* or *IP Address*. 16 | 17 | It doesn't need user credentials or even `sshd` running on the instance. 18 | 19 | Check out *[SSM Sessions the easy 20 | way](https://aws.nz/projects/ssm-session/)* for an example use. 21 | 22 | Works with any Linux or Windows EC2 instance registered in SSM. 23 | 24 | * **ecs-session** 25 | 26 | Wrapper around `aws ecs execute-command` that can run a command 27 | or open an interactive session to an Exec-enabled ECS container 28 | specified by the service, name, IP address, etc. 29 | 30 | It doesn't need user credentials or `sshd` running on the container, 31 | however the containers must be configured to allow this access. 32 | 33 | Check out *[Interactive shell in ECS Containers](https://aws.nz/projects/ecs-session/)* 34 | for an example use. 35 | 36 | * **ec2-ssh** (formerly _ssm-ssh_) 37 | 38 | Open an SSH connection to the remote server through *Systems Manager* 39 | without the need for open firewall or direct internet access. SSH can 40 | then be used to forward ports, copy files, etc. 41 | 42 | Unlike `ssm-tunnel` it doesn't create a full VPN link, however it's in 43 | some aspects more versatile as it can be used with `rsync`, `scp`, 44 | `sftp`, etc. 45 | 46 | It works with any client that can run SSH (including Mac OS-X) and 47 | doesn't require a special agent on the instance, other than the standard 48 | AWS SSM agent. 49 | 50 | Also supports pushing your SSH key to the instance with `--send-key` (aka 51 | *EC2 Instance Connect*, although that's an odd name for this function). 52 | 53 | * **ssm-tunnel** 54 | 55 | Open *IP tunnel* to the SSM instance and to enable *network access* 56 | to the instance VPC. This requires [ssm-tunnel-agent](README-agent.md) 57 | installed on the instance. 58 | 59 | Works with *Amazon Linux 2* instances and probably other recent Linux 60 | EC2 instances. Requires *Linux* on the client side - if you are on Mac 61 | or Windows you can install a Linux VM in a [VirtualBox](https://virtualbox.org). 62 | 63 | Requires `ssm-tunnel-agent` installed on the instance - see below for 64 | instructions. 65 | 66 | ## Usage 67 | 68 | 1. **List instances** available for connection 69 | 70 | ``` 71 | ~ $ ec2-session --list 72 | i-07c189021bc56e042 test1.aws.nz test1 192.168.45.158 73 | i-094df06d3633f3267 tunnel-test.aws.nz tunnel-test 192.168.44.95 74 | i-02689d593e17f2b75 winbox.aws.nz winbox 192.168.45.5 13.11.22.33 75 | ``` 76 | 77 | If you're like me and have access to many different AWS accounts you 78 | can select the right one with `--profile` and / or change the `--region`: 79 | 80 | ``` 81 | ~ $ ec2-session --profile aws-sandpit --region us-west-2 --list 82 | i-0beb42b1e6b60ac10 uswest2.aws.nz uswest2 172.31.0.92 83 | ``` 84 | 85 | Alternatively use the standard AWS *environment variables*: 86 | 87 | ``` 88 | ~ $ export AWS_DEFAULT_PROFILE=aws-sandpit 89 | ~ $ export AWS_DEFAULT_REGION=us-west-2 90 | ~ $ ec2-session --list 91 | i-0beb42b1e6b60ac10 uswest2.aws.nz uswest2 172.31.0.92 92 | ``` 93 | 94 | 2. **Open SSM session** to an instance: 95 | 96 | This opens an interactive shell session over SSM without the need for 97 | a password or SSH key. Note that by default the login user is `ssm-user`. 98 | You can specify most a different user with e.g. `--user ec2-user` or 99 | even `--user root`. 100 | 101 | ``` 102 | ~ $ ec2-session -v test1 --user ec2-user 103 | Starting session with SessionId: botocore-session-0d381a3ef740153ac 104 | [ec2-user@ip-192-168-45-158] ~ $ hostname 105 | test1.aws.nz 106 | 107 | [ec2-user@ip-192-168-45-158] ~ $ id 108 | uid=1000(ec2-user) gid=1000(ec2-user) groups=1000(ec2-user),... 109 | 110 | [ec2-user@ip-192-168-45-158] ~ $ ^D 111 | Exiting session with sessionId: botocore-session-0d381a3ef740153ac. 112 | ~ $ 113 | ``` 114 | 115 | You can specify other SSM documents to run with `--document-name AWS-...` 116 | to customise your session. Refer to AWS docs for details. 117 | 118 | 3. **Open SSH session** over SSM with *port forwarding*. 119 | 120 | The `ec2-ssh` tool provides a connection and authentication mechanism 121 | for running SSH over Systems Manager. 122 | 123 | The target instance *does not need* a public IP address, it also does 124 | *not* need an open SSH port in the Security Group. All it needs is to be 125 | registered in the Systems Manager. 126 | 127 | All `ssh` options are supported, go wild. In this example we will 128 | forward port 3306 to our MySQL RDS database using the standard 129 | `-L 3306:mysql-rds.aws.nz:3306` SSH port forwarding method. 130 | 131 | ``` 132 | ~ $ ec2-ssh ec2-user@test1 -L 3306:mysql-rds.aws.nz:3306 -i ~/.ssh/aws-nz.pem 133 | [ec2-ssh] INFO: Resolved instance name 'test1' to 'i-07c189021bc56e042' 134 | [ec2-ssh] INFO: Running: ssh -o ProxyCommand='aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p' i-07c189021bc56e042 -l ec2-user -L 3306:mysql-rds.aws.nz:3306 -i ~/.ssh/aws-nz.pem 135 | OpenSSH_7.6p1 Ubuntu-4ubuntu0.3, OpenSSL 1.0.2n 7 Dec 2017 136 | ... 137 | Last login: Sun Apr 12 20:05:09 2020 from localhost 138 | 139 | __| __|_ ) 140 | _| ( / Amazon Linux 2 AMI 141 | ___|\___|___| 142 | 143 | [ec2-user@ip-192-168-45-158] ~ $ 144 | ``` 145 | 146 | From another terminal we can now connect to the MySQL RDS. Since the 147 | port 3306 is forwarded from *localhost* through the tunnel we will 148 | instruct `mysql` client to connect to `127.0.0.1` (localhost). 149 | 150 | ``` 151 | ~ $ mysql -h 127.0.0.1 -u {RdsMasterUser} -p 152 | Enter password: {RdsMasterPassword} 153 | Welcome to the MariaDB monitor. Commands end with ; or \g. 154 | Server version: 5.6.10 MySQL Community Server (GPL) 155 | 156 | MySQL [(none)]> show processlist; 157 | +-----+------------+-----------------------+ 158 | | Id | User | Host | 159 | +-----+------------+-----------------------+ 160 | | 52 | rdsadmin | localhost | 161 | | 289 | masteruser | 192.168.45.158:52182 | <<< Connection from test1 IP 162 | +-----+------------+-----------------------+ 163 | 2 rows in set (0.04 sec) 164 | ``` 165 | 166 | 4. **Use `rsync` with `ec2-ssh`** to copy files to/from EC2 instance. 167 | 168 | Since in the end we run a standard `ssh` client we can use it with 169 | [rsync](https://en.wikipedia.org/wiki/Rsync) to copy files to/from the 170 | EC2 instance. 171 | 172 | ``` 173 | ~ $ rsync -e ec2-ssh -Prv ec2-user@test1:some-file.tar.gz . 174 | some-file.tar.gz 175 | 31,337,841 100% 889.58kB/s 0:00:34 (xfr#1, to-chk=0/1) 176 | sent 43 bytes received 31,345,607 bytes 814,172.73 bytes/sec 177 | total size is 31,337,841 speedup is 1.00 178 | ``` 179 | 180 | We can also select a different AWS profile and/or region: 181 | 182 | ``` 183 | ~ $ rsync -e "ec2-ssh --profile aws-sandpit --region us-west-2" -Prv ... 184 | ``` 185 | 186 | Alternatively set the profile and region through standard AWS 187 | *environment variables* `AWS_DEFAULT_PROFILE` and 188 | `AWS_DEFAULT_REGION`.` 189 | 190 | 5. **Create IP tunnel** and SSH to another instance in the VPC through it. 191 | 192 | We will use `--route 192.168.44.0/23` that gives us access to the VPC CIDR. 193 | 194 | ``` 195 | ~ $ ssm-tunnel -v tunnel-test --route 192.168.44.0/23 196 | [ssm-tunnel] INFO: Local IP: 100.64.160.100 / Remote IP: 100.64.160.101 197 | 00:00:15 | In: 156.0 B @ 5.2 B/s | Out: 509.0 B @ 40.4 B/s 198 | ``` 199 | 200 | Leave it running and from another shell `ssh` to one of the instances listed 201 | with `--list` above. For example to `test1` that's got VPC IP `192.168.45.158`: 202 | 203 | ``` 204 | ~ $ ssh ec2-user@192.168.45.158 205 | Last login: Tue Jun 18 20:50:59 2019 from 100.64.142.232 206 | ... 207 | [ec2-user@test1 ~]$ w -i 208 | 21:20:43 up 1:43, 1 user, load average: 0.00, 0.00, 0.00 209 | USER TTY FROM LOGIN@ IDLE JCPU PCPU WHAT 210 | ec2-user pts/0 192.168.44.95 21:20 3.00s 0.02s 0.00s w -i 211 | ^^^^^^^^^^^^^ 212 | [ec2-user@test1 ~]$ exit 213 | Connection to 192.168.45.158 closed. 214 | ~ $ 215 | ``` 216 | 217 | Note the source IP `192.168.44.95` that belongs to the `tunnel-test` 218 | instance - our connections will *appear* as if they come from this instance. 219 | Obviously the **Security Groups** of your other instances must allow SSH 220 | access from the IP or SG of your tunnelling instance. 221 | 222 | All these tools support `--help` and a set of common parameters: 223 | 224 | --profile PROFILE, -p PROFILE 225 | Configuration profile from ~/.aws/{credentials,config} 226 | --region REGION, -g REGION 227 | Set / override AWS region. 228 | --verbose, -v Increase log level. 229 | --debug, -d Increase log level even more. 230 | 231 | `ec2-ssh` only supports the long options to prevent conflict with `ssh`'s 232 | own short options that are being passed through. 233 | 234 | Standard AWS environment variables like `AWS_DEFAULT_PROFILE`, 235 | `AWS_DEFAULT_REGION`, etc, are also supported. 236 | 237 | ## Installation 238 | 239 | All the tools use **AWS CLI** to open **SSM Session** and then use that 240 | session to run commands on the target instance. The target instances **must be 241 | registered in SSM**, which means they need: 242 | 243 | - **connectivity to SSM endpoint**, e.g. through public IP, NAT Gateway, or 244 | SSM VPC endpoint. 245 | - **EC2 instance IAM Role** with permissions to connect to Systems Manager. 246 | 247 | Follow the detailed instructions at [**Using SSM Session Manager for 248 | interactive instance access**](https://aws.nz/best-practice/ssm-session-manager/) 249 | for more informations. 250 | 251 | ### Install *AWS CLI* and `session-manager-plugin` 252 | 253 | Make sure you've got `aws` and `session-manager-plugin` installed locally 254 | on your laptop. 255 | 256 | ``` 257 | ~ $ aws --version 258 | aws-cli/1.18.31 Python/3.6.9 Linux/5.3.0-42-generic botocore/1.15.31 259 | 260 | ~ $ session-manager-plugin --version 261 | 1.1.56.0 262 | ``` 263 | 264 | Follow [AWS CLI installation 265 | guide](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 266 | and [session-manager-plugin 267 | installation guide](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) to install them if needed. 268 | 269 | Note that `ec2-ssh` needs `session-manager-plugin` version *1.1.23* or 270 | newer. Upgrade if your version is older. 271 | 272 | ### Register your instances with Systems Manager 273 | 274 | *Amazon Linux 2* instances already have the `amazon-ssm-agent` installed and 275 | running. All they need to register with *Systems Manager* is 276 | **AmazonEC2RoleforSSM** managed role in their *IAM Instance Role* and network 277 | access to `ssm.{region}.amazonaws.com` either directly or through a *https proxy*. 278 | 279 | Check out the [detailed instructions](https://aws.nz/best-practice/ssm-session-manager/) for more info. 280 | 281 | ### Install SSM-Tools *(finally! :)* 282 | 283 | The easiest way is to install the ssm-tools from *[PyPI](https://pypi.org/)* repository: 284 | 285 | ``` 286 | sudo pip3 install aws-ssm-tools 287 | ``` 288 | 289 | **NOTE:** SSM Tools require **Python 3.6 or newer**. Only the `ssm-tunnel-agent` 290 | requires **Python 2.7 or newer** as that's what's available by default 291 | on *Amazon Linux 2* instances. 292 | 293 | ### Standalone *ssm-tunnel-agent* installation 294 | 295 | Refer to *[README-agent.md](README-agent.md)* for `ssm-tunnel-agent` 296 | installation details. 297 | 298 | Alternatively it's also bundled with this package, you can take it from here and 299 | copy to `/usr/local/bin/ssm-tunnel-agent` on the instance. Make it executable 300 | and it should just work. 301 | 302 | ## Other AWS Utilities 303 | 304 | Check out **[AWS Utils](https://github.com/mludvig/aws-utils)** 305 | repository for more useful AWS tools. 306 | 307 | ## Author and License 308 | 309 | All these scripts were written by [Michael Ludvig](https://aws.nz/) 310 | and are released under [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0). 311 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | python3 setup.py clean --all 4 | python3 setup.py sdist bdist_wheel 5 | 6 | # Always run 'clean' before building agent to prevent 7 | # inclusion of unneeded files! 8 | python3 setup.py clean --all 9 | python3 setup-agent.py sdist bdist_wheel --universal 10 | -------------------------------------------------------------------------------- /check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Linter and formatter scripts. Used by .circleci/config.yml 4 | 5 | # This can also be called with {black, pylint, mypy} as a parameter 6 | # to run only the specific checks. 7 | 8 | set -x 9 | 10 | PYTHON_SCRIPTS="$(grep -l '#!/usr/bin/env python3' ssm-* ecs-* ec2-*) setup.py" 11 | 12 | test -z "$1" -o "$1" == "black" && black --line-length 250 --check --diff ${PYTHON_SCRIPTS} ssm_tools/*.py 13 | 14 | test -z "$1" -o "$1" == "pylint" && pylint --exit-zero --disable=invalid-name,missing-docstring,line-too-long ${PYTHON_SCRIPTS} ssm_tools/*.py ssm-tunnel-agent 15 | test -z "$1" -o "$1" == "pylint" && pylint --errors-only ${PYTHON_SCRIPTS} ssm_tools/*.py ssm-tunnel-agent 16 | 17 | test -z "$1" -o "$1" == "mypy" && mypy --scripts-are-modules --ignore-missing-imports ${PYTHON_SCRIPTS} 18 | -------------------------------------------------------------------------------- /ec2-session: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | from ssm_tools.ssm_session_cli import main 5 | 6 | if __name__ == "__main__": 7 | sys.exit(main()) 8 | -------------------------------------------------------------------------------- /ec2-ssh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | from ssm_tools.ssm_ssh_cli import main 5 | 6 | if __name__ == "__main__": 7 | sys.exit(main()) 8 | -------------------------------------------------------------------------------- /ecs-session: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | from ssm_tools.ecs_session_cli import main 5 | 6 | if __name__ == "__main__": 7 | sys.exit(main()) 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pexpect 2 | packaging 3 | 4 | # AWS Stuff 5 | botocore 6 | boto3 >= 1.22.0 7 | -------------------------------------------------------------------------------- /sample-templates/README.md: -------------------------------------------------------------------------------- 1 | # Sample CloudFormation and Terraform templates 2 | 3 | This directory contains sample CloudFormation and Terraform templates 4 | configured for use with `ec2-session` and `ecs-session`. 5 | 6 | They include the required IAM roles, ECS configurations, etc. 7 | 8 | ## [template-ecs-task.yml](template-ecs-task.yml) 9 | 10 | CloudFormation template that spins up an ECS cluster with a sample 11 | ECS service (nginx container) to which you can login with `ecs-session`. 12 | 13 | ## [terraform/ecs.tf](terraform/ecs.tf) 14 | 15 | Terraform configuration that spins up an ECS cluster with a sample 16 | ECS service (Apache httpd container) to which you can login with `ecs-session`. 17 | -------------------------------------------------------------------------------- /sample-templates/template-ecs-task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | 4 | Description: ECS Execute configuration demo 5 | 6 | Metadata: 7 | Author: Michael Ludvig (https://aws.nz) 8 | RepoUrl: https://github.com/mludvig/aws-ssm-tools 9 | AWS::CloudFormation::Interface: 10 | ParameterGroups: 11 | - Label: 12 | default: Task Configuration 13 | Parameters: 14 | - ContainerName 15 | - ContainerImage 16 | - NumTasks 17 | 18 | - Label: 19 | default: Network Configuration 20 | Parameters: 21 | - VpcId 22 | - SubnetIds 23 | - PublicIp 24 | 25 | Parameters: 26 | VpcId: 27 | Description: VPC where you want the containers deployed 28 | Type: AWS::EC2::VPC::Id 29 | 30 | SubnetIds: 31 | Description: All subnets must be public or all must be private. Don't mix. 32 | Type: List 33 | 34 | PublicIp: 35 | Description: Select ENABLED when deploying to public subnets, or DISABLED when using private subnets. 36 | Type: String 37 | AllowedValues: 38 | - ENABLED 39 | - DISABLED 40 | 41 | NumTasks: 42 | Type: Number 43 | Default: 2 44 | 45 | ContainerName: 46 | Type: String 47 | Default: nginx 48 | 49 | ContainerImage: 50 | Type: String 51 | Default: docker.io/nginx:latest 52 | 53 | Resources: 54 | Cluster: 55 | Type: AWS::ECS::Cluster 56 | 57 | TaskDefinition: 58 | Type: AWS::ECS::TaskDefinition 59 | Properties: 60 | NetworkMode: awsvpc # Needed for NLB (because NLB doesn't support SG) 61 | TaskRoleArn: !Ref TaskRole 62 | ExecutionRoleArn: !Ref TaskExecutionRole 63 | RequiresCompatibilities: 64 | - FARGATE 65 | Cpu: 256 66 | Memory: 512 67 | ContainerDefinitions: 68 | - Name: !Ref ContainerName 69 | Essential: true 70 | Image: !Ref ContainerImage 71 | LogConfiguration: 72 | LogDriver: awslogs 73 | Options: 74 | awslogs-region: !Ref AWS::Region 75 | awslogs-group: !Ref TaskLogsGroup 76 | awslogs-stream-prefix: !Ref ContainerName 77 | PortMappings: 78 | - ContainerPort: 80 79 | HostPort: 80 80 | 81 | Service: 82 | Type: AWS::ECS::Service 83 | Properties: 84 | Cluster: !Ref Cluster 85 | DesiredCount: !Ref NumTasks 86 | DeploymentConfiguration: 87 | MinimumHealthyPercent: 100 88 | MaximumPercent: 200 89 | EnableExecuteCommand: true # ECS Execute Command enabled 90 | TaskDefinition: !Ref TaskDefinition 91 | LaunchType: FARGATE 92 | PlatformVersion: LATEST # LATEST is 1.4 as of now, that's compatible with ECS Exec 93 | NetworkConfiguration: 94 | AwsvpcConfiguration: 95 | AssignPublicIp: !Ref PublicIp 96 | Subnets: !Ref SubnetIds 97 | SecurityGroups: 98 | - !Ref TaskSecurityGroup 99 | Tags: 100 | - Key: Name 101 | Value: !Sub "${AWS::StackName}-service" 102 | 103 | TaskSecurityGroup: 104 | Type: AWS::EC2::SecurityGroup 105 | Properties: 106 | GroupDescription: !Sub "${AWS::StackName}-task" 107 | SecurityGroupEgress: 108 | - IpProtocol: tcp 109 | FromPort: 443 110 | ToPort: 443 111 | CidrIp: 0.0.0.0/0 # Required for SSM service access 112 | SecurityGroupIngress: 113 | - IpProtocol: tcp 114 | FromPort: 80 115 | ToPort: 80 116 | CidrIp: 0.0.0.0/0 # Only for demo purposes, not needed for SSM 117 | VpcId: !Ref VpcId 118 | Tags: 119 | - Key: Name 120 | Value: !Sub "${AWS::StackName}-task" 121 | 122 | TaskRole: 123 | Type: AWS::IAM::Role 124 | Properties: 125 | AssumeRolePolicyDocument: 126 | Statement: 127 | - Effect: Allow 128 | Principal: 129 | Service: [ ecs-tasks.amazonaws.com ] 130 | Action: 131 | - sts:AssumeRole 132 | Path: / 133 | ManagedPolicyArns: 134 | - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore # Required to use SSM 135 | 136 | TaskExecutionRole: 137 | Type: AWS::IAM::Role 138 | Properties: 139 | AssumeRolePolicyDocument: 140 | Statement: 141 | - Effect: Allow 142 | Principal: 143 | Service: [ ecs-tasks.amazonaws.com ] 144 | Action: 145 | - sts:AssumeRole 146 | Path: / 147 | ManagedPolicyArns: 148 | - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy 149 | - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role 150 | 151 | TaskLogsGroup: 152 | Type: AWS::Logs::LogGroup 153 | Properties: 154 | LogGroupName: !Ref AWS::StackName 155 | RetentionInDays: 400 156 | -------------------------------------------------------------------------------- /sample-templates/terraform/.gitignore: -------------------------------------------------------------------------------- 1 | *.tfstate 2 | *.tfstate.backup 3 | .terraform* 4 | -------------------------------------------------------------------------------- /sample-templates/terraform/ecs.tf: -------------------------------------------------------------------------------- 1 | ## Terraform configuration for "ECS Exec"-enabled containers. 2 | 3 | resource "aws_ecs_cluster" "ecs_cluster" { 4 | name = "${var.project_name}-cluster" 5 | } 6 | 7 | resource "aws_ecs_task_definition" "task_def" { 8 | family = "${var.project_name}-task-def" 9 | network_mode = "awsvpc" 10 | task_role_arn = aws_iam_role.ecs_task_role.arn 11 | execution_role_arn = aws_iam_role.ecs_task_execution_role.arn 12 | requires_compatibilities = ["FARGATE"] 13 | cpu = 256 14 | memory = 512 15 | container_definitions = jsonencode([{ 16 | Name = var.container_name 17 | Essential = true 18 | Image = var.container_image 19 | LogConfiguration = { 20 | LogDriver = "awslogs" 21 | Options = { 22 | awslogs-region = data.aws_region.current.name 23 | awslogs-group = aws_cloudwatch_log_group.ecs_logs.id 24 | awslogs-stream-prefix = "${var.project_name}-logs" 25 | } 26 | } 27 | }]) 28 | } 29 | 30 | resource "aws_ecs_service" "service" { 31 | name = "${var.project_name}-service" 32 | cluster = aws_ecs_cluster.ecs_cluster.id 33 | task_definition = aws_ecs_task_definition.task_def.arn 34 | desired_count = var.task_count 35 | #deployment_maximum_percent = 200 36 | #deployment_minimum_healthy_percent = 100 37 | wait_for_steady_state = true 38 | launch_type = "FARGATE" 39 | platform_version = "LATEST" # LATEST is 1.4.0 -> ok 40 | enable_execute_command = true # Enable ECS Exec 41 | enable_ecs_managed_tags = true 42 | network_configuration { 43 | assign_public_ip = true 44 | subnets = local.default_subnet_ids 45 | security_groups = [aws_security_group.ecs_task_sg.id] 46 | } 47 | } 48 | 49 | resource "aws_iam_role" "ecs_task_role" { 50 | name = "${var.project_name}-task-role" 51 | assume_role_policy = jsonencode({ 52 | Version = "2012-10-17" 53 | Statement = [ 54 | { 55 | Action = "sts:AssumeRole" 56 | Effect = "Allow" 57 | Sid = "" 58 | Principal = { 59 | Service = "ecs-tasks.amazonaws.com" 60 | } 61 | }, 62 | ] 63 | }) 64 | managed_policy_arns = [ 65 | "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" 66 | ] 67 | } 68 | 69 | resource "aws_iam_role" "ecs_task_execution_role" { 70 | name = "${var.project_name}-task-execution-role" 71 | assume_role_policy = jsonencode({ 72 | Version = "2012-10-17" 73 | Statement = [ 74 | { 75 | Action = "sts:AssumeRole" 76 | Effect = "Allow" 77 | Sid = "" 78 | Principal = { 79 | Service = "ecs-tasks.amazonaws.com" 80 | } 81 | }, 82 | ] 83 | }) 84 | managed_policy_arns = [ 85 | "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" 86 | ] 87 | } 88 | 89 | resource "aws_security_group" "ecs_task_sg" { 90 | name = "${var.project_name}-task-sg" 91 | description = "Task Security Group" 92 | vpc_id = aws_default_vpc.default.id 93 | 94 | # Configure 'ingress' rules as required by your containers 95 | #ingress { 96 | # description = "Access to ECS tasks" 97 | # protocol = "tcp" 98 | # from_port = 80 99 | # to_port = 80 100 | # cidr_blocks = ["0.0.0.0/0"] 101 | #} 102 | 103 | egress { 104 | description = "Outbound access from ECS tasks to SSM service" 105 | protocol = "tcp" 106 | from_port = 443 107 | to_port = 443 108 | cidr_blocks = ["0.0.0.0/0"] 109 | } 110 | } 111 | 112 | resource "aws_cloudwatch_log_group" "ecs_logs" { 113 | name = "${var.project_name}-logs" 114 | retention_in_days = 400 115 | } 116 | -------------------------------------------------------------------------------- /sample-templates/terraform/infra.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | data "aws_availability_zones" "available" { 4 | state = "available" 5 | } 6 | 7 | resource "aws_default_vpc" "default" { 8 | } 9 | 10 | resource "aws_default_subnet" "default" { 11 | for_each = toset(data.aws_availability_zones.available.names) 12 | availability_zone = each.key 13 | } 14 | 15 | locals { 16 | default_subnet_ids = [ for entry in aws_default_subnet.default : entry.id ] 17 | } 18 | -------------------------------------------------------------------------------- /sample-templates/terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.25" 8 | } 9 | } 10 | } 11 | 12 | provider "aws" { } 13 | -------------------------------------------------------------------------------- /sample-templates/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_name" { 2 | default = "tf-demo" 3 | } 4 | 5 | variable "container_image" { 6 | default = "docker.io/httpd:latest" 7 | } 8 | 9 | variable "container_name" { 10 | default = "apache" 11 | } 12 | 13 | variable "task_count" { 14 | default = 2 15 | } 16 | -------------------------------------------------------------------------------- /setup-agent.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | from setuptools import setup, find_packages 4 | 5 | import ssm_tools 6 | 7 | HERE = pathlib.Path(__file__).parent 8 | README = (HERE / "README-agent.md").read_text() 9 | 10 | setup( 11 | name="aws-ssm-tunnel-agent", 12 | version=ssm_tools.__version__, 13 | scripts=[ 14 | 'ssm-tunnel-agent' 15 | ], 16 | 17 | python_requires='>=2.7', 18 | 19 | install_requires=[ ], 20 | 21 | author="Michael Ludvig", 22 | author_email="mludvig@logix.net.nz", 23 | description="ssm-tunnel-agent for ssm-tunnel script from aws-ssm-tools package", 24 | long_description=README, 25 | long_description_content_type="text/markdown", 26 | license="Apache License 2.0", 27 | keywords="aws ssm ssm-tunnel ssm-tunnel-agent", 28 | url="https://github.com/mludvig/aws-ssm-tools", 29 | project_urls={ 30 | "Bug Tracker": "https://github.com/mludvig/aws-ssm-tools/issues", 31 | "Documentation": "https://github.com/mludvig/aws-ssm-tools/blob/master/README.md", 32 | "Source Code": "https://github.com/mludvig/aws-ssm-tools", 33 | } 34 | ) 35 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # aws-ssm-tools packaging 2 | 3 | import os 4 | import sys 5 | import pathlib 6 | 7 | from setuptools import setup, find_packages 8 | from setuptools.command.install import install 9 | 10 | import ssm_tools 11 | 12 | HERE = pathlib.Path(__file__).parent 13 | README = (HERE / "README.md").read_text() 14 | 15 | SCRIPTS = [ 16 | "ec2-session", 17 | "ecs-session", 18 | "ec2-ssh", 19 | "ssm-tunnel", 20 | # Renamed, deprecated and soon to be removed... 21 | "ssm-session", 22 | "ssm-ssh", 23 | ] 24 | VERSION = ssm_tools.__version__ 25 | 26 | requirements = HERE / "requirements.txt" 27 | with requirements.open() as f: 28 | reqs = [req.strip() for req in f.readlines() if req.strip() and not req.startswith("#")] 29 | 30 | 31 | class VerifyVersionCommand(install): 32 | """Custom command to verify that the git tag matches our version""" 33 | 34 | description = "verify that the git tag matches our version" 35 | 36 | def run(self) -> None: 37 | tag = os.getenv("CIRCLE_TAG") 38 | if not tag: 39 | sys.exit("Env var $CIRCLE_TAG is not defined - are we running a CircleCI build?") 40 | 41 | if tag.startswith("v"): # If tag is v1.2.3 make it 1.2.3 42 | tag = tag[1:] 43 | 44 | if tag != VERSION: 45 | info = f"Git tag: {tag} does not match the version of this app: {VERSION}" 46 | sys.exit(info) 47 | 48 | 49 | def console_scripts() -> list: 50 | scripts = [] 51 | for script in SCRIPTS: 52 | # All script entries must be in this format: 53 | # "ec2-session = ssm_tools.ec2_session_cli:main" 54 | if script in ["ec2-session", "ec2-ssh"]: 55 | scripts.append(f"{script} = ssm_tools.{script.replace('-','_').replace('ec2','ssm')}_cli:main") 56 | else: 57 | scripts.append(f"{script} = ssm_tools.{script.replace('-','_')}_cli:main") 58 | return scripts 59 | 60 | 61 | setup( 62 | name="aws-ssm-tools", 63 | version=VERSION, 64 | packages=find_packages(), 65 | entry_points={ 66 | "console_scripts": console_scripts(), 67 | }, 68 | python_requires=">=3.6", 69 | install_requires=reqs, 70 | package_data={ 71 | "": ["*.txt", "*.md", "ssm-tunnel-updown.dns-example", "LICENSE"], 72 | }, 73 | author="Michael Ludvig", 74 | author_email="mludvig@logix.net.nz", 75 | description="Tools for AWS Systems Manager: " + " ".join(SCRIPTS), 76 | long_description=README, 77 | long_description_content_type="text/markdown", 78 | license="Apache License 2.0", 79 | keywords="aws ssm " + " ".join(SCRIPTS), 80 | url="https://github.com/mludvig/aws-ssm-tools", 81 | project_urls={ 82 | "Bug Tracker": "https://github.com/mludvig/aws-ssm-tools/issues", 83 | "Documentation": "https://github.com/mludvig/aws-ssm-tools/blob/master/README.md", 84 | "Source Code": "https://github.com/mludvig/aws-ssm-tools", 85 | }, 86 | classifiers=[ 87 | "Environment :: Console", 88 | "Intended Audience :: System Administrators", 89 | "License :: OSI Approved :: Apache Software License", 90 | "Development Status :: 5 - Production/Stable", 91 | "Operating System :: POSIX :: Linux", 92 | "Programming Language :: Python :: 3 :: Only", 93 | "Programming Language :: Python :: 3.6", 94 | "Programming Language :: Python :: 3.7", 95 | "Programming Language :: Python :: 3.8", 96 | "Programming Language :: Python :: 3.9", 97 | "Programming Language :: Python :: 3.10", 98 | "Programming Language :: Python :: 3.11", 99 | "Topic :: System :: Systems Administration", 100 | "Topic :: System :: Networking", 101 | ], 102 | cmdclass={ 103 | "verify": VerifyVersionCommand, 104 | }, 105 | ) 106 | -------------------------------------------------------------------------------- /ssm-tunnel: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | from ssm_tools.ssm_tunnel_cli import main 5 | 6 | if __name__ == "__main__": 7 | sys.exit(main()) 8 | -------------------------------------------------------------------------------- /ssm-tunnel-agent: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # ssm-tunnel-agent - remote agent for ssm-tunnel 4 | # this should be installed on the EC2 instance 5 | # and available in the $PATH (e.g. in /usr/local/bin) 6 | # 7 | # Author: Michael Ludvig 8 | 9 | # Intentionally Python 2 with no external dependencies as that's 10 | # what's available by default on Amazon Linux 2 as of now. 11 | 12 | from __future__ import print_function 13 | 14 | import os 15 | import sys 16 | import time 17 | import errno 18 | import struct 19 | import select 20 | import fcntl 21 | import threading 22 | from base64 import b64encode, b64decode 23 | 24 | timeout_sec = 60 # Exit and cleanup if we don't get any input 25 | keepalive_sec = 10 # Send a dummy message this often 26 | 27 | def run_command(command, assert_0=True): 28 | print("# {}".format(command)) 29 | ret = os.system(command) 30 | if assert_0: 31 | assert ret == 0 32 | 33 | def create_tun(tun_name, local_ip, remote_ip): 34 | params = { 35 | "tun_name": tun_name, 36 | "local_ip": local_ip, 37 | "remote_ip": remote_ip, 38 | "user_id": os.getuid(), 39 | } 40 | try: 41 | run_command("sudo ip tuntap add {tun_name} mode tun user {user_id}".format(**params)) 42 | run_command("sudo ip addr add {local_ip} peer {remote_ip} dev {tun_name}".format(**params)) 43 | run_command("sudo ip link set {tun_name} up".format(**params)) 44 | # Enable forwarding 45 | run_command("sudo sysctl -q -w net.ipv4.ip_forward=1".format(**params), assert_0=False) 46 | run_command("sudo iptables -t nat -I POSTROUTING -m comment --comment \"{tun_name}\" -s {remote_ip} -j MASQUERADE".format(**params), assert_0=False) 47 | except AssertionError: 48 | delete_tun(tun_name, local_ip, remote_ip) 49 | quit(1) 50 | except: 51 | delete_tun(tun_name, local_ip, remote_ip) 52 | raise 53 | 54 | def delete_tun(tun_name, local_ip, remote_ip): 55 | params = { 56 | "tun_name": tun_name, 57 | "local_ip": local_ip, 58 | "remote_ip": remote_ip, 59 | } 60 | # We don't check return code here - best effort to delete the devices 61 | run_command("sudo ip link set {tun_name} down".format(**params), assert_0=False) 62 | run_command("sudo ip tuntap del {tun_name} mode tun".format(**params), assert_0=False) 63 | run_command("sudo iptables -t nat -D POSTROUTING -m comment --comment \"{tun_name}\" -s {remote_ip} -j MASQUERADE".format(**params), assert_0=False) 64 | 65 | def setup_tun(tun_name): 66 | TUNSETIFF = 0x400454ca 67 | IFF_TUN = 0x0001 68 | 69 | tun_fd = os.open("/dev/net/tun", os.O_RDWR) 70 | 71 | flags = IFF_TUN 72 | ifr = struct.pack('16sH22s', tun_name.encode(), flags, b'\x00'*22) 73 | fcntl.ioctl(tun_fd, TUNSETIFF, ifr) 74 | 75 | return tun_fd 76 | 77 | def tun_reader(tun_fd): 78 | while True: 79 | try: 80 | r, w, x = select.select([tun_fd], [], [], keepalive_sec) 81 | if not tun_fd in r: 82 | # Keepalive timeout - send '#' 83 | sys.stdout.write("#\n") 84 | sys.stdout.flush() 85 | continue 86 | buf = os.read(tun_fd, 1504) # Virtual GRE header adds 4 bytes 87 | sys.stdout.write("%{}\n".format(b64encode(buf).decode('ascii'))) 88 | sys.stdout.flush() 89 | except OSError as e: 90 | if e.errno == errno.EBADF: 91 | # Closed FD during exit 92 | break 93 | 94 | def main(): 95 | local_ip = sys.argv[1] 96 | remote_ip = sys.argv[2] 97 | 98 | tun_suffix = ".".join(local_ip.split(".")[2:]) 99 | tun_name = "tunSSM.{}".format(tun_suffix) 100 | 101 | create_tun(tun_name, local_ip, remote_ip) 102 | 103 | tun_fd = setup_tun(tun_name) 104 | print("# Agent device {} is ready [{}]".format(tun_name, sys.argv[1])) 105 | 106 | t = threading.Thread(target=tun_reader, args=(tun_fd,)) 107 | t.daemon = True 108 | t.start() 109 | 110 | try: 111 | last_ts = time.time() 112 | stdin_fd = sys.stdin.fileno() # Should be '0', but still... 113 | while True: 114 | r, w, x = select.select([stdin_fd], [], [], 1) # Wait 1 sec for input 115 | if not stdin_fd in r: 116 | if last_ts + timeout_sec < time.time(): 117 | print("# ERROR: {} sec timeout, exitting...".format(timeout_sec)) 118 | break 119 | continue 120 | line = sys.stdin.readline() 121 | last_ts = time.time() 122 | if line[0] == '%': 123 | buf = b64decode(line[1:].strip('\n\r')) 124 | os.write(tun_fd, buf) 125 | 126 | except KeyboardInterrupt: 127 | pass 128 | 129 | finally: 130 | os.close(tun_fd) 131 | delete_tun(tun_name, local_ip, remote_ip) 132 | 133 | if __name__ == "__main__": 134 | main() 135 | -------------------------------------------------------------------------------- /ssm-tunnel-updown.dns-example: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Simple up-down script that can set DNS servers for domains 4 | # resolvable over the SSM Tunnel link. 5 | # For example if you have a Private VPC Route53 zone you can 6 | # make it resolvable by your laptop with this script. 7 | 8 | # Requires 'update-systemd-resolved' in $PATH. Download it from 9 | # https://github.com/jonathanio/update-systemd-resolved 10 | 11 | # It is called with these parameters: 12 | # {status} {device-name} {local-ip} {remote-ip} [{route} ...] 13 | # Where {status} will be: 'up' or 'down'. 14 | 15 | ## Update these two variables to your needs 16 | 17 | DNS_SERVERS="172.31.0.2" 18 | DNS_DOMAINS="example.com vpc-only.example.com" 19 | 20 | ## Nothing to configure below here 21 | 22 | if [ "$1" != "up" ]; then 23 | # Only handle 'up' events. The config will be auto-cleared 24 | # when the interface goes down. 25 | exit 0 26 | fi 27 | 28 | # Set OpenVPN-style variables 29 | export script_type="up" 30 | export dev="$2" 31 | 32 | # Build the foreign_option_X variables 33 | IDX=1 34 | for DNS_SERVER in ${DNS_SERVERS}; do 35 | export foreign_option_${IDX}="dhcp-option DNS ${DNS_SERVER}" 36 | IDX=$((IDX + 1)) 37 | done 38 | 39 | for DNS_DOMAIN in ${DNS_DOMAINS}; do 40 | export foreign_option_${IDX}="dhcp-option DOMAIN-ROUTE ${DNS_DOMAIN}" 41 | IDX=$((IDX + 1)) 42 | done 43 | 44 | exec update-systemd-resolved 45 | -------------------------------------------------------------------------------- /ssm_tools/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.6.0" 2 | -------------------------------------------------------------------------------- /ssm_tools/common.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pathlib 3 | import logging 4 | import argparse 5 | import subprocess 6 | 7 | from typing import List, Tuple 8 | 9 | import boto3 10 | import botocore.credentials 11 | 12 | import packaging.version 13 | from . import __version__ as ssm_tools_version 14 | 15 | __all__ = [] 16 | 17 | # --------------------------------------------------------- 18 | 19 | __all__.append("configure_logging") 20 | 21 | 22 | def configure_logging(level: int) -> None: 23 | """ 24 | Configure logging format and level. 25 | """ 26 | if level == logging.DEBUG: 27 | logging_format = "[%(name)s] %(levelname)s: %(message)s" 28 | else: 29 | logging_format = "%(levelname)s: %(message)s" 30 | 31 | # Default log level is set to WARNING 32 | logging.basicConfig(level=logging.WARNING, format=logging_format) 33 | # Except for our modules 34 | logging.getLogger("ssm-tools").setLevel(level) 35 | 36 | 37 | # --------------------------------------------------------- 38 | 39 | __all__.append("add_general_parameters") 40 | 41 | 42 | def add_general_parameters(parser: argparse.ArgumentParser, long_only: bool = False) -> argparse._ArgumentGroup: 43 | """ 44 | Add General Options used by all ssm-* tools. 45 | """ 46 | 47 | # Remove short options if long_only==True 48 | def _get_opts(opt_long: str, opt_short: str) -> List[str]: 49 | opts = [opt_long] 50 | if not long_only: 51 | opts.append(opt_short) 52 | return opts 53 | 54 | group_general = parser.add_argument_group("General Options") 55 | group_general.add_argument(*_get_opts("--profile", "-p"), dest="profile", type=str, help="Configuration profile from ~/.aws/{credentials,config}") 56 | group_general.add_argument(*_get_opts("--region", "-g"), dest="region", type=str, help="Set / override AWS region.") 57 | group_general.add_argument(*_get_opts("--verbose", "-v"), action="store_const", dest="log_level", const=logging.INFO, default=logging.INFO, help="Default log level. Show informational messages only.") 58 | group_general.add_argument(*_get_opts("--debug", "-d"), action="store_const", dest="log_level", const=logging.DEBUG, help="Increase log level.") 59 | group_general.add_argument(*_get_opts("--quiet", "-q"), action="store_const", dest="log_level", const=logging.WARNING, help="Decrease log level. Only show warnings and errors.") 60 | group_general.add_argument(*_get_opts("--version", "-V"), action="store_true", dest="show_version", help=f"Show package version and exit. Version is {ssm_tools_version}") 61 | group_general.add_argument(*_get_opts("--help", "-h"), action="help", help="Print this help and exit") 62 | 63 | return group_general 64 | 65 | 66 | # --------------------------------------------------------- 67 | 68 | __all__.append("show_version") 69 | 70 | 71 | def show_version(args: argparse.Namespace) -> None: 72 | """ 73 | Show package version and exit. 74 | """ 75 | version_string = f"ssm-tools/{ssm_tools_version}" 76 | if args.log_level <= logging.INFO: 77 | version_string += f" python/{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}" 78 | version_string += f" boto3/{boto3.__version__}" 79 | print(version_string) 80 | sys.exit(0) 81 | 82 | 83 | # --------------------------------------------------------- 84 | 85 | __all__.append("bytes_to_human") 86 | 87 | 88 | def bytes_to_human(size: float) -> Tuple[float, str]: 89 | """ 90 | Convert Bytes to more readable units 91 | """ 92 | units = ["B", "kB", "MB", "GB", "TB"] 93 | unit_idx = 0 # Start with Bytes 94 | while unit_idx < len(units) - 1: 95 | if size < 2048: 96 | break 97 | size /= 1024.0 98 | unit_idx += 1 99 | return size, units[unit_idx] 100 | 101 | 102 | # --------------------------------------------------------- 103 | 104 | __all__.append("seconds_to_human") 105 | 106 | 107 | def seconds_to_human(seconds: float, decimal: int = 3) -> str: 108 | """ 109 | Convert seconds to HH:MM:SS[.SSS] 110 | 111 | If decimal==0 only full seconds are used. 112 | """ 113 | secs = int(seconds) 114 | fraction = seconds - secs 115 | 116 | mins = int(secs / 60) 117 | secs = secs % 60 118 | 119 | hours = int(mins / 60) 120 | mins = mins % 60 121 | 122 | ret = f"{hours:02d}:{mins:02d}:{secs:02d}" 123 | if decimal: 124 | fraction = int(fraction * (10**decimal)) 125 | ret += f".{fraction:0{decimal}d}" 126 | 127 | return ret 128 | 129 | 130 | # --------------------------------------------------------- 131 | 132 | __all__.append("verify_plugin_version") 133 | 134 | 135 | def verify_plugin_version(version_required: str, logger: logging.Logger) -> bool: 136 | """ 137 | Verify that a session-manager-plugin is installed 138 | and is of a required version or newer. 139 | """ 140 | session_manager_plugin = "session-manager-plugin" 141 | 142 | try: 143 | result = subprocess.run([session_manager_plugin, "--version"], stdout=subprocess.PIPE, check=False) 144 | plugin_version = result.stdout.decode("ascii").strip() 145 | logger.debug(f"{session_manager_plugin} version {plugin_version}") 146 | 147 | if packaging.version.parse(plugin_version) >= packaging.version.parse(version_required): 148 | return True 149 | 150 | logger.error(f"session-manager-plugin version {plugin_version} is installed, {version_required} is required") 151 | except FileNotFoundError: 152 | logger.error(f"{session_manager_plugin} not installed") 153 | 154 | logger.error("Check out https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html for instructions") 155 | 156 | return False 157 | 158 | 159 | # --------------------------------------------------------- 160 | 161 | 162 | __all__.append("verify_awscli_version") 163 | 164 | 165 | def verify_awscli_version(version_required: str, logger: logging.Logger) -> bool: 166 | """ 167 | Verify that the aws-cli is installed and is of a required version or newer. 168 | """ 169 | aws_cli = "aws" 170 | 171 | try: 172 | result = subprocess.run([aws_cli, "--version"], stdout=subprocess.PIPE, check=False) 173 | plugin_version = result.stdout.decode("ascii").strip().split(" ")[0].split("/")[1] 174 | logger.debug(f"AWS-CLI version {plugin_version}") 175 | 176 | if packaging.version.parse(plugin_version) >= packaging.version.parse(version_required): 177 | return True 178 | 179 | logger.error(f"AWS-CLI version {plugin_version} is installed, {version_required} is required") 180 | except FileNotFoundError: 181 | logger.error("AWS-CLI is not installed") 182 | 183 | return False 184 | 185 | 186 | # --------------------------------------------------------- 187 | 188 | __all__.append("AWSSessionBase") 189 | 190 | 191 | class AWSSessionBase: 192 | def __init__(self, args: argparse.Namespace) -> None: 193 | # aws-cli compatible MFA cache 194 | cli_cache = pathlib.Path("~/.aws/cli/cache").expanduser() 195 | 196 | # Construct boto3 session with MFA cache 197 | self.session = boto3.session.Session(profile_name=args.profile, region_name=args.region) 198 | self.session._session.get_component("credential_provider").get_provider("assume-role").cache = botocore.credentials.JSONFileCache(cli_cache) 199 | -------------------------------------------------------------------------------- /ssm_tools/ec2_instance_connect.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | import pathlib 4 | import subprocess 5 | 6 | # Needed for type hints 7 | import argparse 8 | from typing import List, Tuple, Optional 9 | 10 | from .common import AWSSessionBase 11 | 12 | logger = logging.getLogger("ssm-tools.ec2-instance-connect") 13 | 14 | 15 | class EC2InstanceConnectHelper(AWSSessionBase): 16 | SSH_AGENT_LABEL = "{SSH-AGENT}" 17 | 18 | def __init__(self, args: argparse.Namespace) -> None: 19 | super().__init__(args) 20 | 21 | # Create boto3 client from session 22 | self.ec2ic_client = self.session.client("ec2-instance-connect") 23 | 24 | def obtain_ssh_key(self, key_file_name: str) -> Tuple[str, Optional[str]]: 25 | def _read_ssh_agent_keys() -> List[str]: 26 | cp = subprocess.run(["ssh-add", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) 27 | if cp.returncode != 0: 28 | logger.debug("Failed to run: ssh-add -L: %s", cp.stderr.decode("utf-8").strip().replace("\n", " ")) 29 | return [] 30 | return cp.stdout.decode("utf-8").split("\n") 31 | 32 | def _read_ssh_public_key(key_file_name_pub: str) -> str: 33 | try: 34 | key_path = pathlib.Path(key_file_name_pub).expanduser() 35 | logger.debug("Going to read key from: %s", key_path) 36 | pub_key_raw = key_path.read_text().split("\n") 37 | for line in pub_key_raw: 38 | if line.startswith("ecdsa-sha2-"): 39 | logger.error("ECDSA keys are not yet supported by EC2 Instance Connect: %s", key_file_name_pub) 40 | sys.exit(1) 41 | if line.startswith("ssh-"): 42 | return line 43 | except (FileNotFoundError, PermissionError) as ex: 44 | logger.debug("Could not read the public key: %s", ex) 45 | return "" 46 | 47 | if not key_file_name: 48 | # No key_file_name was specified, try ... 49 | # - SSH Agent keys (doesn't matter which one - we'll immediately connect to it) 50 | ssh_keys = _read_ssh_agent_keys() 51 | if ssh_keys: 52 | logger.info("Using SSH key from SSH Agent, should be as good as any.") 53 | return ssh_keys[0], self.SSH_AGENT_LABEL 54 | 55 | # - ~/.ssh/id_rsa.pub 56 | ssh_key = _read_ssh_public_key("~/.ssh/id_rsa.pub") 57 | if ssh_key: 58 | logger.info("Using SSH key from ~/.ssh/id_rsa.pub - should work in most cases") 59 | return ssh_key, "~/.ssh/id_rsa" 60 | 61 | # - ~/.ssh/id_dsa.pub 62 | ssh_key = _read_ssh_public_key("~/.ssh/id_dsa.pub") 63 | if ssh_key: 64 | logger.info("Using SSH key from ~/.ssh/id_dsa.pub - should work in most cases") 65 | return ssh_key, "~/.ssh/id_dsa" 66 | 67 | else: # i.e. key_file_name is set 68 | logger.info("Looking for a public key matching: %s", key_file_name) 69 | 70 | # Try reading the public key file (key_file_name + ".pub" suffix) 71 | key_file_name_pub = key_file_name + ".pub" 72 | ssh_key = _read_ssh_public_key(key_file_name_pub) 73 | if ssh_key: 74 | logger.info("Found a matching SSH Public Key in %s", key_file_name_pub) 75 | return ssh_key, key_file_name 76 | 77 | # Try reading the public key from SSH Agent 78 | for line in _read_ssh_agent_keys(): 79 | if line.endswith(key_file_name): 80 | logger.info("Found a matching SSH Public Key through SSH Agent") 81 | return line, self.SSH_AGENT_LABEL 82 | logger.debug("Could not find the public key for %s in SSH Agent", key_file_name) 83 | 84 | # Try extracting the public key from the provided private key 85 | logger.warning("Trying to extract the public key from %s - you may be asked for a passphrase!", key_file_name) 86 | cp = subprocess.run(["ssh-keygen", "-y", "-f", key_file_name], stdout=subprocess.PIPE, check=False) 87 | if cp.returncode == 0: 88 | logger.info("Extracted the public key from: %s", key_file_name) 89 | return cp.stdout.decode("utf-8").split("\n")[0], key_file_name 90 | logger.debug("Could not extract the public key from %s", key_file_name) 91 | 92 | logger.warning("Unable to find SSH public key from any available source.") 93 | logger.warning("Use --debug for more details on what we tried.") 94 | sys.exit(1) 95 | 96 | def send_ssh_key(self, instance_id: str, login_name: str, key_file_name: str) -> None: 97 | if not login_name: 98 | logger.error('Unable to figure out the EC2 login name. Use "-l {user}" or {user}@{instance}.') 99 | sys.exit(1) 100 | 101 | public_key, private_key_file = self.obtain_ssh_key(key_file_name) 102 | logger.debug("SSH Key from %s: %s", private_key_file, public_key) 103 | 104 | result = self.ec2ic_client.send_ssh_public_key( 105 | InstanceId=instance_id, 106 | InstanceOSUser=login_name, 107 | SSHPublicKey=public_key, 108 | ) 109 | 110 | if not result["Success"]: 111 | logger.error("Failed to send SSH Key to %s", instance_id) 112 | sys.exit(1) 113 | -------------------------------------------------------------------------------- /ssm_tools/ecs_session_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Convenience wrapper around 'aws ecs execute-command' 4 | # 5 | # See https://aws.nz/aws-utils/ecs-session for more info. 6 | # 7 | # Author: Michael Ludvig (https://aws.nz) 8 | 9 | # The script can list the available containers across all your ECS clusters. 10 | # In the end it executes 'aws ecs execute-command' with the appropriate parameters. 11 | # Supports both EC2 and Fargate ECS tasks. 12 | 13 | import os 14 | import sys 15 | import logging 16 | import argparse 17 | 18 | from typing import Tuple, List, Dict, Any 19 | 20 | import botocore.exceptions 21 | 22 | from .common import add_general_parameters, configure_logging, show_version 23 | from .resolver import ContainerResolver 24 | 25 | logger = logging.getLogger("ssm-tools.ecs-session") 26 | 27 | 28 | def parse_args(argv: list) -> Tuple[argparse.Namespace, List[str]]: 29 | """ 30 | Parse command line arguments. 31 | """ 32 | 33 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False) 34 | 35 | add_general_parameters(parser) 36 | 37 | group_container = parser.add_argument_group("Container Selection") 38 | group_container.add_argument("CONTAINER", nargs=argparse.ZERO_OR_MORE, help="Task ID, Container Name or IP address. Use multiple keywords (e.g. Task Name and IP) to narrow down ambiguous selections.") 39 | group_container.add_argument("--list", "-l", dest="list", action="store_true", help="List available containers configured for ECS RunTask") 40 | group_container.add_argument("--cluster", dest="cluster", metavar="CLUSTER", help="Specify an ECS cluster. (optional)") 41 | 42 | group_session = parser.add_argument_group("Session Parameters") 43 | group_session.add_argument("--command", dest="command", metavar="COMMAND", default="/bin/sh", help="Command to run inside the container. Default: /bin/sh") 44 | 45 | parser.description = "Execute 'ECS Run Task' in a given container" 46 | parser.epilog = f""" 47 | IMPORTANT: containers must have "execute-command" setting enabled or they 48 | will not be recognised by {parser.prog} nor show up in --list output. 49 | 50 | Visit https://aws.nz/aws-utils/ecs-session for more info and usage examples. 51 | 52 | Author: Michael Ludvig 53 | """ 54 | 55 | # Parse supplied arguments 56 | args, extras = parser.parse_known_args(argv) 57 | 58 | # If --version do it now and exit 59 | if args.show_version: 60 | show_version(args) 61 | 62 | # Require exactly one of CONTAINER or --list 63 | if bool(args.CONTAINER) + bool(args.list) != 1: 64 | parser.error("Specify either CONTAINER or --list") 65 | 66 | return args, extras 67 | 68 | 69 | def start_session(container: Dict[str, Any], args: argparse.Namespace, command: str) -> None: 70 | exec_args = ["aws", "ecs", "execute-command"] 71 | if args.profile: 72 | exec_args += ["--profile", args.profile] 73 | if args.region: 74 | exec_args += ["--region", args.region] 75 | 76 | # fmt: off 77 | exec_args += [ 78 | "--cluster", container["cluster_arn"], 79 | "--task", container["task_arn"], 80 | "--container", container["container_name"], 81 | "--command", command, 82 | "--interactive", 83 | ] 84 | # fmt: on 85 | 86 | logger.debug("Running: %s", exec_args) 87 | os.execvp(exec_args[0], exec_args) 88 | 89 | 90 | def main() -> int: 91 | ## Split command line to main args and optional command to run 92 | args, _ = parse_args(sys.argv[1:]) 93 | 94 | configure_logging(args.log_level) 95 | 96 | try: 97 | if args.list: 98 | ContainerResolver(args).print_list() 99 | sys.exit(0) 100 | 101 | container = ContainerResolver(args).resolve_container(keywords=args.CONTAINER) 102 | 103 | if not container: 104 | logger.warning("Could not find any container matching: %s", " AND ".join(args.CONTAINER)) 105 | sys.exit(1) 106 | 107 | start_session(container, args, args.command) 108 | 109 | except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: 110 | logger.error(e) 111 | sys.exit(1) 112 | 113 | return 0 114 | 115 | 116 | if __name__ == "__main__": 117 | main() 118 | -------------------------------------------------------------------------------- /ssm_tools/resolver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import re 5 | import logging 6 | import argparse 7 | 8 | from typing import Dict, List, Any, Tuple 9 | 10 | import botocore.session 11 | 12 | from .common import AWSSessionBase 13 | 14 | logger = logging.getLogger("ssm-tools.resolver") 15 | 16 | 17 | class InstanceResolver(AWSSessionBase): 18 | def __init__(self, args: argparse.Namespace) -> None: 19 | super().__init__(args) 20 | 21 | # Create boto3 clients from session 22 | self.ssm_client = self.session.client("ssm") 23 | self.ec2_client = self.session.client("ec2") 24 | 25 | def get_list(self) -> Dict[str, Dict[str, Any]]: 26 | def _try_append(_list: list, _dict: dict, _key: str) -> None: 27 | if _key in _dict: 28 | _list.append(_dict[_key]) 29 | 30 | items = {} 31 | 32 | # List instances from SSM 33 | logger.debug("Fetching SSM inventory") 34 | paginator = self.ssm_client.get_paginator("get_inventory") 35 | response_iterator = paginator.paginate( 36 | Filters=[ 37 | {"Key": "AWS:InstanceInformation.ResourceType", "Values": ["EC2Instance", "ManagedInstance"], "Type": "Equal"}, 38 | {"Key": "AWS:InstanceInformation.InstanceStatus", "Values": ["Terminated", "Stopped", "ConnectionLost"], "Type": "NotEqual"}, 39 | ] 40 | ) 41 | 42 | for inventory in response_iterator: 43 | for entity in inventory["Entities"]: 44 | logger.debug(entity) 45 | content = entity["Data"]["AWS:InstanceInformation"]["Content"][0] 46 | instance_id = content["InstanceId"] 47 | items[instance_id] = { 48 | "InstanceId": instance_id, 49 | "InstanceName": "", 50 | "HostName": content.get("ComputerName", ""), 51 | "Addresses": [content.get("IpAddress")], 52 | } 53 | logger.debug("Added instance: %s: %r", instance_id, items[instance_id]) 54 | 55 | # Add attributes from EC2 56 | paginator = self.ec2_client.get_paginator("describe_instances") 57 | ec2_instance_ids = list(filter(lambda x: x.startswith("i-"), items)) 58 | 59 | tries = 5 60 | while tries: 61 | # The SSM inventory sometimes returns instances that have been terminated 62 | # a short while ago which makes the following call fail 63 | # with InvalidInstanceID.NotFound exception. We'll try and remove the invalid 64 | # instance ids a {tries} times or until we succeed. If unsuccessful we'll remove 65 | # the list obtained from SSM without extra details (host name, public IPs, etc). 66 | # This mostly / only affects accounts with high churn of starting / stopping 67 | # instances - most users will pass this loop only once. 68 | try: 69 | response_iterator = paginator.paginate(InstanceIds=ec2_instance_ids) 70 | for reservations in response_iterator: 71 | for reservation in reservations["Reservations"]: 72 | for instance in reservation["Instances"]: 73 | instance_id = instance["InstanceId"] 74 | if not instance_id in items: 75 | continue 76 | 77 | # Find instance IPs 78 | items[instance_id]["Addresses"] = [] 79 | _try_append(items[instance_id]["Addresses"], instance, "PrivateIpAddress") 80 | _try_append(items[instance_id]["Addresses"], instance, "PublicIpAddress") 81 | 82 | # Store instance AZ - useful for EC2 Instance Connect 83 | items[instance_id]["AvailabilityZone"] = instance["Placement"]["AvailabilityZone"] 84 | 85 | # Find instance name from tag Name 86 | for tag in instance.get("Tags", []): 87 | if tag["Key"] == "Name" and tag["Value"]: 88 | items[instance_id]["InstanceName"] = tag["Value"] 89 | 90 | logger.debug("Updated instance: %s: %r", instance_id, items[instance_id]) 91 | return items 92 | 93 | except botocore.exceptions.ClientError as ex: 94 | if ex.response.get("Error", {}).get("Code", "") != "InvalidInstanceID.NotFound": 95 | raise 96 | message = ex.response.get("Error", {}).get("Message", "") 97 | if not message.startswith("The instance ID") or not message.endswith("not exist"): 98 | logger.warning("Unexpected InvalidInstanceID.NotFound message: %s", message) 99 | # Try to extract instace ids ... 100 | remove_instance_ids = re.findall("i-[0-9a-f]+", message) 101 | logger.debug("Removing non-existent InstanceIds: %s", remove_instance_ids) 102 | # Remove the failed ids from the list and try again 103 | ec2_instance_ids = list(set(ec2_instance_ids) - set(remove_instance_ids)) 104 | tries -= 1 105 | 106 | if not tries: 107 | logger.warning("Unable to list instance details. Some instance names and IPs may be missing.") 108 | 109 | return items 110 | 111 | def print_list(self) -> None: 112 | hostname_len = 1 # Minimum of 1 char, otherwise f-string below fails for empty hostnames 113 | instname_len = 1 114 | 115 | items = self.get_list().values() 116 | 117 | if not items: 118 | logger.warning("No instances registered in SSM!") 119 | return 120 | 121 | items_list = list(items) 122 | del items 123 | items_list.sort(key=lambda x: x.get("InstanceName") or x.get("HostName")) # type: ignore 124 | 125 | for item in items_list: 126 | hostname_len = max(hostname_len, len(item["HostName"])) 127 | instname_len = max(instname_len, len(item["InstanceName"])) 128 | 129 | for item in items_list: 130 | print(f"{item['InstanceId']:20} {item['HostName']:{hostname_len}} {item['InstanceName']:{instname_len}} {' '.join(item['Addresses'])}") 131 | 132 | def resolve_instance(self, instance: str) -> Tuple[str, Dict[str, Any]]: 133 | # Is it a valid Instance ID? 134 | if re.match("^m?i-[a-f0-9]+$", instance): 135 | return instance, {} 136 | 137 | # It is not - find it in the list 138 | instances = [] 139 | 140 | items = self.get_list() 141 | for instance_id in items: 142 | item = items[instance_id] 143 | if instance.lower() in [item["HostName"].lower(), item["InstanceName"].lower()] + item["Addresses"]: 144 | instances.append(instance_id) 145 | 146 | if not instances: 147 | return "", {} 148 | 149 | if len(instances) > 1: 150 | logger.warning("Found %d instances for '%s': %s", len(instances), instance, " ".join(instances)) 151 | logger.warning("Use INSTANCE_ID to connect to a specific one") 152 | sys.exit(1) 153 | 154 | # Found only one instance - return it 155 | return instances[0], items[instances[0]] 156 | 157 | 158 | class ContainerResolver(AWSSessionBase): 159 | def __init__(self, args: argparse.Namespace) -> None: 160 | super().__init__(args) 161 | 162 | # Create boto3 clients from session 163 | self.ecs_client = self.session.client("ecs") 164 | 165 | self.args = args 166 | self.containers: List[Dict[str, Any]] = [] 167 | self._tasks: Dict[str, Any] = {} 168 | 169 | def add_container(self, container: Dict[str, Any]) -> None: 170 | _task_parsed = container["taskArn"].split(":")[-1].split("/") 171 | self.containers.append( 172 | { 173 | "cluster_name": _task_parsed[1], 174 | "task_id": _task_parsed[2], 175 | "cluster_arn": self._tasks[container["taskArn"]]["clusterArn"], 176 | "task_arn": container["taskArn"], 177 | "group_name": self._tasks[container["taskArn"]]["group"], 178 | "container_name": container["name"], 179 | "container_ip": container["networkInterfaces"][0]["privateIpv4Address"], 180 | } 181 | ) 182 | 183 | def get_list(self) -> List[Dict[str, Any]]: 184 | def _try_append(_list: list, _dict: dict, _key: str) -> None: 185 | if _key in _dict: 186 | _list.append(_dict[_key]) 187 | 188 | # List ECS Clusters 189 | clusters = [] 190 | logger.debug("Listing ECS Clusters") 191 | paginator = self.ecs_client.get_paginator("list_clusters") 192 | for page in paginator.paginate(): 193 | clusters.extend(page["clusterArns"]) 194 | 195 | if self.args.cluster: 196 | filtered_clusters = [] 197 | for cluster in clusters: 198 | if (self.args.cluster.startswith("arn:") and cluster == self.args.cluster) or cluster.endswith(f"/{self.args.cluster}"): 199 | filtered_clusters.append(cluster) 200 | break 201 | clusters = filtered_clusters 202 | 203 | if not clusters: 204 | logger.warning("No ECS Clusters found.") 205 | return [] 206 | 207 | # List tasks in each cluster 208 | paginator = self.ecs_client.get_paginator("list_tasks") 209 | for cluster in clusters: 210 | logger.debug("Listing tasks in cluster: %s", cluster) 211 | 212 | # maxResults must be <= 100 because describe_tasks() doesn't accept more than that 213 | for page in paginator.paginate(cluster=cluster, maxResults=100): 214 | if "taskArns" not in page or not page["taskArns"]: 215 | logger.debug(f"No tasks found in cluster {cluster}") 216 | break 217 | response = self.ecs_client.describe_tasks(cluster=cluster, tasks=page["taskArns"]) 218 | 219 | # Filter containers that have a running ExecuteCommandAgent 220 | for task in response["tasks"]: 221 | logger.debug(task) 222 | self._tasks[task["taskArn"]] = task 223 | for container in task["containers"]: 224 | if not "managedAgents" in container: 225 | continue 226 | for agent in container["managedAgents"]: 227 | if agent["name"] == "ExecuteCommandAgent" and agent["lastStatus"] == "RUNNING": 228 | self.add_container(container) 229 | 230 | return self.containers 231 | 232 | def print_containers(self, containers: List[Dict[str, Any]]) -> None: 233 | max_len = {} 234 | for container in containers: 235 | for key in container.keys(): 236 | if not key in max_len: 237 | max_len[key] = len(container[key]) 238 | else: 239 | max_len[key] = max(max_len[key], len(container[key])) 240 | containers.sort(key=lambda x: [x["cluster_name"], x["container_name"]]) 241 | for container in containers: 242 | print( 243 | f"{container['cluster_name']:{max_len['cluster_name']}} {container['group_name']:{max_len['group_name']}} {container['task_id']:{max_len['task_id']}} {container['container_name']:{max_len['container_name']}} {container['container_ip']:{max_len['container_ip']}}" 244 | ) 245 | 246 | def print_list(self) -> None: 247 | containers = self.get_list() 248 | 249 | if not containers: 250 | logger.warning("No Execute-Command capable contaianers found!") 251 | sys.exit(1) 252 | 253 | self.print_containers(containers) 254 | 255 | def resolve_container(self, keywords: List[str]) -> Dict[str, Any]: 256 | containers = self.get_list() 257 | 258 | if not containers: 259 | logger.warning("No Execute-Command capable contaianers found!") 260 | sys.exit(1) 261 | 262 | logger.debug("Searching for containers matching all keywords: %s", " ".join(keywords)) 263 | 264 | candidates: List[Dict[str, Any]] = [] 265 | for container in containers: 266 | for keyword in keywords: 267 | if keyword not in (container["group_name"], container["task_id"], container["container_name"], container["container_ip"]): 268 | logger.debug("IGNORED: Container %s/%s doesn't match keyword: %s", container["task_id"], container["container_name"], keyword) 269 | container = {} 270 | break 271 | if container: 272 | logger.debug("ADDED: Container %s/%s matches all keywords: %s", container["task_id"], container["container_name"], " ".join(keywords)) 273 | candidates.append(container) 274 | if not candidates: 275 | logger.warning("No container matches: %s", " AND ".join(keywords)) 276 | sys.exit(1) 277 | elif len(candidates) == 1: 278 | self.print_containers(candidates) 279 | return candidates[0] 280 | else: 281 | logger.warning("Found %d instances for: %s", len(candidates), keyword) 282 | logger.warning("Use Container IP or Task ID to connect to a specific one") 283 | self.print_containers(candidates) 284 | sys.exit(1) 285 | -------------------------------------------------------------------------------- /ssm_tools/ssm_session_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Convenience wrapper around 'aws ssm start-session' 4 | # can resolve instance id from Name tag, hostname, IP address, etc. 5 | # 6 | # See https://aws.nz/aws-utils/ssm-session for more info. 7 | # 8 | # Author: Michael Ludvig (https://aws.nz) 9 | 10 | # The script can list available instances, resolve instance names, 11 | # and host names, etc. In the end it executes 'aws' to actually 12 | # start the session. 13 | 14 | import os 15 | import sys 16 | import time 17 | import logging 18 | import signal 19 | import argparse 20 | 21 | from typing import Tuple, List 22 | 23 | import botocore.exceptions 24 | 25 | from .common import add_general_parameters, show_version, configure_logging 26 | from .resolver import InstanceResolver 27 | 28 | logger = logging.getLogger("ssm-tools.ec2-session") 29 | 30 | # Only on Linux / Unix / Mac because Windows don't interpret Ctrl-Z 31 | if os.name == "posix": 32 | # Ignore Ctrl-Z - pass it to the shell 33 | signal.signal(signal.SIGTSTP, signal.SIG_IGN) 34 | 35 | 36 | def parse_args(argv: list) -> argparse.Namespace: 37 | """ 38 | Parse command line arguments. 39 | """ 40 | 41 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False) 42 | 43 | add_general_parameters(parser) 44 | 45 | # fmt: off 46 | group_instance = parser.add_argument_group("Instance Selection") 47 | group_instance.add_argument("INSTANCE", nargs="?", help="Instance ID, Name, Host name or IP address") 48 | group_instance.add_argument("--list", "-l", dest="list", action="store_true", help="List instances available for SSM Session") 49 | 50 | group_session = parser.add_argument_group("Session Parameters") 51 | group_session.add_argument("--user", "-u", "--sudo", dest="user", metavar="USER", help="SUDO to USER after opening the session. Can't be used together with --document-name / --parameters. (optional)") 52 | group_session.add_argument("--command", "-c", dest="command", metavar="COMMAND", help="Command to run in the SSM Session. Can't be used together with --user. " 53 | "If you need to run the COMMAND as a different USER prepend the command with the appropriate 'sudo -u USER ...'. (optional)") 54 | group_session.add_argument("--document-name", dest="document_name", help="Document to execute, e.g. AWS-StartInteractiveCommand (optional)") 55 | group_session.add_argument("--parameters", dest="parameters", help="Parameters for the --document-name, e.g. 'command=[\"sudo -i -u ec2-user\"]' (optional)") 56 | # fmt: on 57 | 58 | parser.description = "Start SSM Shell Session to an EC2 instance" 59 | parser.epilog = f""" 60 | IMPORTANT: instances must be registered in AWS Systems Manager (SSM) 61 | before you can start a shell session! Instances not registered in SSM 62 | will not be recognised by {parser.prog} nor show up in --list output. 63 | 64 | Visit https://aws.nz/aws-utils/ssm-session for more info and usage examples. 65 | 66 | Author: Michael Ludvig 67 | """ 68 | 69 | # Parse supplied arguments 70 | args = parser.parse_args(argv) 71 | 72 | # If --version do it now and exit 73 | if args.show_version: 74 | show_version(args) 75 | 76 | # Require exactly one of INSTANCE or --list 77 | if bool(args.INSTANCE) + bool(args.list) != 1: 78 | parser.error("Specify either INSTANCE or --list") 79 | 80 | if args.parameters and not args.document_name: 81 | parser.error("--parameters can only be used together with --document-name") 82 | 83 | if bool(args.user) + bool(args.command) + bool(args.document_name) > 1: 84 | parser.error( 85 | """ 86 | Use only one of --user / --command / --document-name 87 | If you need to run the COMMAND as a specific USER then prepend 88 | the command with the appropriate: sudo -i -u USER COMMAND 89 | """ 90 | ) 91 | 92 | return args 93 | 94 | 95 | def start_session(instance_id: str, args: argparse.Namespace) -> None: 96 | exec_args = ["aws", "ssm", "start-session"] 97 | if args.profile: 98 | exec_args += ["--profile", args.profile] 99 | if args.region: 100 | exec_args += ["--region", args.region] 101 | 102 | if args.user: 103 | # Fake --document-name / --parameters for --user 104 | exec_args += ["--document-name", "AWS-StartInteractiveCommand", "--parameters", f'command=["sudo -i -u {args.user}"]'] 105 | if args.command: 106 | # Fake --document-name / --parameters for --command 107 | exec_args += ["--document-name", "AWS-StartInteractiveCommand", "--parameters", f"command={args.command}"] 108 | else: 109 | # Or use the provided values 110 | if args.document_name: 111 | exec_args += ["--document-name", args.document_name] 112 | if args.parameters: 113 | exec_args += ["--parameters", args.parameters] 114 | 115 | exec_args += ["--target", instance_id] 116 | logger.debug("Running: %s", exec_args) 117 | os.execvp(exec_args[0], exec_args) 118 | 119 | 120 | def main() -> int: 121 | ## Deprecate old script name 122 | if sys.argv[0].endswith("/ssm-session"): 123 | print('\033[31;1mWARNING:\033[33;1m "ssm-session" has been renamed to "ec2-session" - please update your scripts.\033[0m', file=sys.stderr) 124 | time.sleep(3) 125 | print(file=sys.stderr) 126 | 127 | ## Split command line to main args and optional command to run 128 | args = parse_args(sys.argv[1:]) 129 | 130 | configure_logging(args.log_level) 131 | 132 | try: 133 | if args.list: 134 | InstanceResolver(args).print_list() 135 | sys.exit(0) 136 | 137 | instance_id, _ = InstanceResolver(args).resolve_instance(args.INSTANCE) 138 | 139 | if not instance_id: 140 | logger.warning("Could not resolve Instance ID for '%s'", args.INSTANCE) 141 | logger.warning("Perhaps the '%s' is not registered in SSM?", args.INSTANCE) 142 | sys.exit(1) 143 | 144 | start_session(instance_id, args) 145 | 146 | except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: 147 | logger.error(e) 148 | sys.exit(1) 149 | 150 | return 0 151 | 152 | 153 | if __name__ == "__main__": 154 | main() 155 | -------------------------------------------------------------------------------- /ssm_tools/ssm_ssh_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Open SSH connections through AWS Session Manager 4 | # 5 | # See https://aws.nz/aws-utils/ec2-ssh for more info. 6 | # 7 | # Author: Michael Ludvig (https://aws.nz) 8 | 9 | # The script can list available instances, resolve instance names, 10 | # and host names, etc. In the end it executes 'ssh' with the correct 11 | # parameters to actually start the SSH session. 12 | 13 | import os 14 | import sys 15 | import time 16 | import logging 17 | import argparse 18 | 19 | from typing import Tuple, List 20 | 21 | import botocore.exceptions 22 | 23 | from .common import add_general_parameters, show_version, configure_logging, verify_plugin_version, verify_awscli_version 24 | from .resolver import InstanceResolver 25 | from .ec2_instance_connect import EC2InstanceConnectHelper 26 | 27 | logger = logging.getLogger("ssm-tools.ec2-ssh") 28 | 29 | 30 | def parse_args(argv: list) -> Tuple[argparse.Namespace, List[str]]: 31 | """ 32 | Parse command line arguments. 33 | """ 34 | 35 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False) 36 | 37 | add_general_parameters(parser, long_only=True) 38 | 39 | group_instance = parser.add_argument_group("Instance Selection") 40 | group_instance.add_argument("--list", dest="list", action="store_true", help="List instances available for SSM Session") 41 | 42 | group_ec2ic = parser.add_argument_group("EC2 Instance Connect") 43 | group_ec2ic.add_argument("--send-key", dest="send_key", action="store_true", default=True, help="Send the SSH key to instance metadata using EC2 Instance Connect (default and deprecated - use --no-send-key instead)") 44 | group_ec2ic.add_argument("--no-send-key", dest="send_key", action="store_false", help="Send the SSH key to instance metadata using EC2 Instance Connect") 45 | group_ec2ic.add_argument("--use-endpoint", dest="use_endpoint", action="store_true", default=False, help="Connect using 'EC2 Instance Connect Endpoint'") 46 | 47 | parser.description = "Open SSH connection through Session Manager" 48 | parser.epilog = f""" 49 | IMPORTANT: instances must be registered in AWS Systems Manager (SSM) 50 | before you can start a shell session! Instances not registered in SSM 51 | will not be recognised by {parser.prog} nor show up in --list output. 52 | 53 | Visit https://aws.nz/aws-utils/ec2-ssh for more info and usage examples. 54 | 55 | Author: Michael Ludvig 56 | """ 57 | 58 | # Parse supplied arguments 59 | args, extra_args = parser.parse_known_args(argv) 60 | 61 | # If --version do it now and exit 62 | if args.show_version: 63 | show_version(args) 64 | 65 | # Require exactly one of INSTANCE or --list 66 | if bool(extra_args) + bool(args.list) != 1: 67 | parser.error("Specify either --list or SSH Options including instance name") 68 | 69 | return args, extra_args 70 | 71 | 72 | def start_ssh_session(ssh_args: list, profile: str, region: str, use_endpoint: bool) -> None: 73 | aws_args = "" 74 | if profile: 75 | aws_args += f"--profile {profile} " 76 | if region: 77 | aws_args += f"--region {region} " 78 | if use_endpoint: 79 | min_awscli_version = "2.12.0" 80 | if not verify_awscli_version(min_awscli_version, logger): 81 | logger.error(f"AWS CLI v{min_awscli_version} or newer is required for --use-endpoint, falling back to SSM Session Manager") 82 | use_endpoint = False 83 | if use_endpoint: 84 | proxy_option = ["-o", f"ProxyCommand=aws {aws_args} ec2-instance-connect open-tunnel --instance-id %h"] 85 | else: 86 | proxy_option = ["-o", f"ProxyCommand=aws {aws_args} ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p"] 87 | command = ["ssh"] + proxy_option + ssh_args 88 | logger.debug("Running: %s", command) 89 | os.execvp(command[0], command) 90 | 91 | 92 | def main() -> int: 93 | ## Deprecate old script name 94 | if sys.argv[0].endswith("/ssm-ssh"): 95 | print('\033[31;1mWARNING:\033[33;1m "ssm-session" has been renamed to "ec2-session" - please update your scripts.\033[0m', file=sys.stderr) 96 | time.sleep(3) 97 | print(file=sys.stderr) 98 | 99 | ## Split command line to main args and optional command to run 100 | args, extra_args = parse_args(sys.argv[1:]) 101 | 102 | if args.log_level == logging.DEBUG: 103 | extra_args.append("-v") 104 | 105 | configure_logging(args.log_level) 106 | 107 | if not verify_plugin_version("1.1.23", logger): 108 | sys.exit(1) 109 | 110 | try: 111 | instance_resolver = InstanceResolver(args) 112 | 113 | if args.list: 114 | instance_resolver.print_list() 115 | sys.exit(0) 116 | 117 | # Loop through all SSH args to find: 118 | # - instance name 119 | # - user name (for use with --send-key) 120 | # - key name (for use with --send-key) 121 | ssh_args = [] 122 | instance_id = "" 123 | login_name = "" 124 | key_file_name = "" 125 | 126 | extra_args_iter = iter(extra_args) 127 | for arg in extra_args_iter: 128 | # User name argument 129 | if arg.startswith("-l"): 130 | ssh_args.append(arg) 131 | if len(arg) > 2: 132 | login_name = arg[2:] 133 | else: 134 | login_name = next(extra_args_iter) 135 | ssh_args.append(login_name) 136 | continue 137 | 138 | # SSH key argument 139 | if arg.startswith("-i"): 140 | ssh_args.append(arg) 141 | if len(arg) > 2: 142 | key_file_name = arg[2:] 143 | else: 144 | key_file_name = next(extra_args_iter) 145 | ssh_args.append(key_file_name) 146 | continue 147 | 148 | # If we already have instance id just copy the args 149 | if instance_id: 150 | ssh_args.append(arg) 151 | continue 152 | 153 | # Some args that can't be an instance name 154 | if arg.startswith("-") or arg.find(":") > -1 or arg.find(os.path.sep) > -1: 155 | ssh_args.append(arg) 156 | continue 157 | 158 | # This may be an instance name - try to resolve it 159 | maybe_login_name = None 160 | if arg.find("@") > -1: # username@hostname format 161 | maybe_login_name, instance = arg.split("@", 1) 162 | else: 163 | instance = arg 164 | 165 | instance_id, _ = instance_resolver.resolve_instance(instance) 166 | if not instance_id: 167 | # Not resolved as an instance name - put back to args 168 | ssh_args.append(arg) 169 | maybe_login_name = None 170 | continue 171 | 172 | # We got a login name from 'login_name@instance' 173 | if maybe_login_name: 174 | login_name = maybe_login_name 175 | 176 | # Woohoo we've got an instance id! 177 | logger.info("Resolved instance name '%s' to '%s'", instance, instance_id) 178 | ssh_args.append(instance_id) 179 | 180 | if login_name: 181 | ssh_args.extend(["-l", login_name]) 182 | 183 | if not instance_id: 184 | logger.warning("Could not resolve Instance ID for '%s'", instance) 185 | logger.warning("Perhaps the '%s' is not registered in SSM?", instance) 186 | sys.exit(1) 187 | 188 | if args.send_key: 189 | EC2InstanceConnectHelper(args).send_ssh_key(instance_id, login_name, key_file_name) 190 | 191 | start_ssh_session(ssh_args=ssh_args, profile=args.profile, region=args.region, use_endpoint=args.use_endpoint) 192 | 193 | except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: 194 | logger.error(e) 195 | sys.exit(1) 196 | 197 | return 0 198 | 199 | 200 | if __name__ == "__main__": 201 | main() 202 | -------------------------------------------------------------------------------- /ssm_tools/ssm_tunnel_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Set up IP tunnel through SSM-enabled instance. 4 | # 5 | # See https://aws.nz/aws-utils/ssm-tunnel for more info. 6 | # 7 | # Author: Michael Ludvig (https://aws.nz) 8 | 9 | import os 10 | import sys 11 | import time 12 | import copy 13 | import errno 14 | import logging 15 | import threading 16 | import random 17 | import struct 18 | import select 19 | import fcntl 20 | import argparse 21 | import ipaddress 22 | from base64 import b64encode, b64decode 23 | 24 | from typing import List, Any, Tuple 25 | 26 | import pexpect 27 | import botocore.exceptions 28 | 29 | from .common import add_general_parameters, show_version, configure_logging, bytes_to_human, seconds_to_human 30 | from .talker import SsmTalker 31 | from .resolver import InstanceResolver 32 | 33 | logger = logging.getLogger("ssm-tools.ssm-tunnel") 34 | 35 | tunnel_cidr = "100.64.0.0/16" 36 | keepalive_sec = 10 37 | 38 | 39 | def parse_args(argv: list) -> argparse.Namespace: 40 | """ 41 | Parse command line arguments. 42 | """ 43 | 44 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False) 45 | 46 | add_general_parameters(parser) 47 | 48 | # fmt: off 49 | group_instance = parser.add_argument_group("Instance Selection") 50 | group_instance.add_argument("INSTANCE", nargs="?", help="Instance ID, Name, Host name or IP address") 51 | group_instance.add_argument("--list", "-l", dest="list", action="store_true", help="List instances registered in SSM.") 52 | 53 | group_network = parser.add_argument_group("Networking Options") 54 | group_network.add_argument("--route", "-r", metavar="ROUTE", dest="routes", type=str, action="append", default=[], help="CIDR(s) to route through this tunnel. May be used multiple times.") 55 | group_network.add_argument("--tunnel-cidr", metavar="CIDR", type=str, default=tunnel_cidr, 56 | help=f"By default the tunnel endpoint IPs are randomly assigned from the reserved {tunnel_cidr} block (RFC6598). This should be ok for most users." 57 | ) 58 | group_network.add_argument("--up-down", metavar="SCRIPT", dest="updown_script", type=str, 59 | help="Script to call during tunnel start up and close down. Check out 'ssm-tunnel-updown.dns-example' that supports setting a custom DNS server when the tunnel goes up." 60 | ) 61 | # fmt: on 62 | 63 | parser.description = "Start IP tunnel to a given SSM instance" 64 | parser.epilog = f""" 65 | IMPORTANT: instances must be registered in AWS Systems Manager (SSM) 66 | before you can copy files to/from them! Instances not registered in SSM 67 | will not be recognised by {parser.prog} nor show up in --list output. 68 | 69 | Visit https://aws.nz/aws-utils/ssm-tunnel for more info and usage examples. 70 | 71 | Author: Michael Ludvig 72 | """ 73 | 74 | # Parse supplied arguments 75 | args = parser.parse_args(argv) 76 | 77 | # If --version do it now and exit 78 | if args.show_version: 79 | show_version(args) 80 | 81 | # Require exactly one of INSTANCE or --list 82 | if bool(args.INSTANCE) + bool(args.list) != 1: 83 | parser.error("Specify either INSTANCE or --list") 84 | 85 | return args 86 | 87 | 88 | class SsmTunnel(SsmTalker): 89 | def __init__(self, *args: Any, **kwargs: Any) -> None: 90 | super().__init__(*args, **kwargs) 91 | 92 | # Stats structure 93 | self.stats = {"ts": 0.0, "l2r": 0, "r2l": 0} 94 | self.stats_lock = threading.Lock() 95 | self.stats_secs = 10 96 | self.stats_refresh = 0.5 # Print stats every this many seconds 97 | 98 | self._exiting = False 99 | 100 | self.tun_name = "" 101 | self._tun_fd = -1 102 | self.local_ip = self.remote_ip = "" 103 | self.routes: List[str] = [] 104 | self.updown_script = "" 105 | self.updown_up_success = False 106 | 107 | def run_command(self, command: str, assert_0: bool = True) -> None: 108 | logger.debug("command: %s", command) 109 | ret = os.system(command) 110 | if assert_0: 111 | assert ret == 0 112 | 113 | def open_remote_tunnel(self) -> None: 114 | logger.debug("Creating tunnel") 115 | 116 | # Open remote tun0 device 117 | self._child.sendline(f"ssm-tunnel-agent {self.remote_ip} {self.local_ip}") 118 | patterns = ["# Agent device .* is ready", "command not found"] 119 | match = self._child.expect(patterns) 120 | if match != 0: # Index matched in the 'patterns' 121 | logger.error("Unable to establish the tunnel!") 122 | logger.error("ssm-tunnel-agent: command not found on the target instance %s.", self._instance_id) 123 | logger.error("Install with: ssm-session %s --command 'sudo pip install aws-ssm-tunnel-agent'", self._instance_id) 124 | logger.error("(replace 'pip' with 'pip3' above if the former doesn't work)") 125 | sys.exit(1) 126 | logger.debug(self._child.after) 127 | 128 | def open_local_tunnel(self) -> None: 129 | tun_suffix = ".".join(self.local_ip.split(".")[2:]) 130 | self.tun_name = f"tunSSM.{tun_suffix}" 131 | 132 | self.create_tun() 133 | self._tun_fd = self.open_tun() 134 | 135 | logger.debug("# Local device %s is ready", self.tun_name) 136 | logger.info("Local IP: %s / Remote IP: %s", self.local_ip, self.remote_ip) 137 | 138 | def create_tun(self) -> None: 139 | try: 140 | user_id = os.getuid() 141 | self.run_command(f"sudo ip tuntap add {self.tun_name} mode tun user {user_id}") 142 | self.run_command(f"sudo ip addr add {self.local_ip} peer {self.remote_ip} dev {self.tun_name}") 143 | self.run_command(f"sudo ip link set {self.tun_name} up") 144 | # Configure routes 145 | for route in self.routes: 146 | self.run_command(f"sudo ip route add {route} via {self.remote_ip}") 147 | except AssertionError: 148 | self.delete_tun() 149 | sys.exit(1) 150 | except Exception as e: 151 | logger.exception(e) 152 | self.delete_tun() 153 | raise 154 | 155 | def delete_tun(self) -> None: 156 | # We don't check return code here - best effort to close and delete the device 157 | if self._tun_fd >= 0: 158 | try: 159 | os.close(self._tun_fd) 160 | self._tun_fd = -1 161 | except Exception as e: 162 | logger.exception(e) 163 | if self.tun_name: 164 | self.run_command(f"sudo ip link set {self.tun_name} down", assert_0=False) 165 | self.run_command(f"sudo ip tuntap del {self.tun_name} mode tun", assert_0=False) 166 | self.tun_name = "" 167 | 168 | def open_tun(self) -> int: 169 | TUNSETIFF = 0x400454CA 170 | IFF_TUN = 0x0001 171 | 172 | tun_fd = os.open("/dev/net/tun", os.O_RDWR) 173 | 174 | flags = IFF_TUN 175 | ifr = struct.pack("16sH22s", self.tun_name.encode(), flags, b"\x00" * 22) 176 | fcntl.ioctl(tun_fd, TUNSETIFF, ifr) 177 | 178 | return tun_fd 179 | 180 | def local_to_remote(self) -> None: 181 | last_ts = time.time() 182 | while True: 183 | if self._exiting: 184 | break 185 | try: 186 | r, w, x = select.select([self._tun_fd], [], [], 1) 187 | if not self._tun_fd in r: 188 | if last_ts + keepalive_sec < time.time(): 189 | # Keepalive timeout - send '#' 190 | self._child.sendline("#") 191 | last_ts = time.time() 192 | continue 193 | buf = os.read(self._tun_fd, 1504) # Virtual GRE header adds 4 bytes 194 | self._child.sendline("%{}".format(b64encode(buf).decode("ascii"))) 195 | except OSError as e: 196 | if e.errno == errno.EBADF and self._exiting: 197 | break 198 | 199 | last_ts = time.time() 200 | # Update stats 201 | self.stats_lock.acquire() 202 | self.stats["l2r"] += len(buf) 203 | self.stats_lock.release() 204 | 205 | logger.debug("local_to_remote() has exited.") 206 | 207 | def remote_to_local(self) -> None: 208 | while True: 209 | if self._exiting: 210 | break 211 | try: 212 | line = self._child.readline() 213 | except pexpect.exceptions.TIMEOUT: 214 | # This is a long timeout, 30 sec, not very useful 215 | continue 216 | if type(self._child.after) == pexpect.exceptions.EOF: 217 | logger.warning("Received unexpected EOF - tunnel went down?") 218 | self._exiting = True 219 | break 220 | if not line or line[0] != "%": 221 | continue 222 | 223 | buf = b64decode(line[1:].strip("\r\n")) 224 | os.write(self._tun_fd, buf) 225 | # Update stats 226 | self.stats_lock.acquire() 227 | self.stats["r2l"] += len(buf) 228 | self.stats_lock.release() 229 | 230 | logger.debug("remote_to_local() has exited.") 231 | 232 | def process_traffic(self) -> None: 233 | tr_l2r = threading.Thread(target=self.local_to_remote, args=[]) 234 | tr_l2r.daemon = True 235 | tr_l2r.start() 236 | 237 | tr_r2l = threading.Thread(target=self.remote_to_local, args=[]) 238 | tr_r2l.daemon = True 239 | tr_r2l.start() 240 | 241 | try: 242 | self.display_stats() 243 | 244 | except KeyboardInterrupt: 245 | print("") # Just to avoid "^C" at the end of line 246 | 247 | def run_updown(self, status: str) -> None: 248 | if not self.updown_script: 249 | return 250 | 251 | if status == "down" and not self.updown_up_success: 252 | # If 'up' failed we are immediately called with 'down' - don't do anything. 253 | return 254 | 255 | routes = " ".join(self.routes) 256 | try: 257 | cmd = f"{self.updown_script} {status} {self.tun_name} {self.local_ip} {self.remote_ip} {routes}" 258 | logger.info("Running --up-down script: %s", cmd) 259 | self.run_command(cmd) 260 | self.updown_up_success = True 261 | except AssertionError: 262 | logger.error("Updown script %s exitted with error.", self.updown_script) 263 | sys.exit(1) 264 | 265 | def start(self, local_ip: str, remote_ip: str, routes: List[str], updown_script: str) -> None: 266 | self.local_ip = local_ip 267 | self.remote_ip = remote_ip 268 | self.routes = routes 269 | self.updown_script = updown_script 270 | 271 | try: 272 | self.open_remote_tunnel() 273 | self.open_local_tunnel() 274 | self.run_updown("up") 275 | self.process_traffic() 276 | 277 | finally: 278 | logger.info("Closing tunnel, please wait...") 279 | self.run_updown("down") 280 | self.exit() 281 | self._exiting = True 282 | self.delete_tun() 283 | 284 | def display_stats(self) -> None: 285 | def _erase_line() -> None: 286 | print("\r\x1B[K", end="") # Erase line 287 | 288 | stat_history = [self.stats] 289 | stat_history_len = int(self.stats_secs / self.stats_refresh) 290 | start_ts = time.time() 291 | 292 | while True: 293 | time.sleep(self.stats_refresh) 294 | 295 | # Take another 'stat' snapshot 296 | self.stats_lock.acquire() 297 | stat_history.insert(1, copy.copy(self.stats)) 298 | self.stats_lock.release() 299 | stat_history[1]["ts"] = time.time() 300 | 301 | # Calculate sliding window average 302 | if stat_history[1]["ts"] > stat_history[-1]["ts"]: 303 | l2r_avg = (stat_history[1]["l2r"] - stat_history[-1]["l2r"]) / (stat_history[1]["ts"] - stat_history[-1]["ts"]) 304 | r2l_avg = (stat_history[1]["r2l"] - stat_history[-1]["r2l"]) / (stat_history[1]["ts"] - stat_history[-1]["ts"]) 305 | else: 306 | l2r_avg = r2l_avg = 0.0 307 | 308 | # Trim the oldest points 309 | del stat_history[stat_history_len + 1 :] 310 | 311 | uptime = seconds_to_human(time.time() - start_ts, decimal=0) 312 | l2r_t_h, l2r_t_u = bytes_to_human(stat_history[1]["l2r"]) 313 | r2l_t_h, r2l_t_u = bytes_to_human(stat_history[1]["r2l"]) 314 | l2r_a_h, l2r_a_u = bytes_to_human(l2r_avg) 315 | r2l_a_h, r2l_a_u = bytes_to_human(r2l_avg) 316 | 317 | _erase_line() 318 | print(f"{uptime} | In: {r2l_t_h:6.1f}{r2l_t_u:>2s} @ {r2l_a_h:6.1f}{r2l_a_u:>2s}/s | Out: {l2r_t_h:6.1f}{l2r_t_u:>2s} @ {l2r_a_h:6.1f}{l2r_a_u:>2s}/s", end="", flush=True) 319 | 320 | 321 | def random_ips(network: str) -> Tuple[str, str]: 322 | # Network address 323 | net = ipaddress.ip_network(network) 324 | # Random host-part 325 | host_bytes = int(random.uniform(2, 2 ** (net.max_prefixlen - net.prefixlen) - 4)) & 0xFFFFFFFE 326 | # Construct local/remote IP 327 | local_ip = net.network_address + host_bytes 328 | remote_ip = net.network_address + host_bytes + 1 329 | return local_ip.compressed, remote_ip.compressed 330 | 331 | 332 | def main() -> int: 333 | if sys.platform != "linux": 334 | print("The 'ssm-tunnel' program only works on Linux at the moment!", file=sys.stderr) 335 | print("In other systems you are welcome to install it in VirtualBox or in a similar virtual environment running Linux.", file=sys.stderr) 336 | sys.exit(1) 337 | 338 | ## Split command line args 339 | args = parse_args(sys.argv[1:]) 340 | 341 | configure_logging(args.log_level) 342 | 343 | tunnel = None 344 | try: 345 | if args.list: 346 | # --list 347 | InstanceResolver(args).print_list() 348 | sys.exit(0) 349 | 350 | instance_id, _ = InstanceResolver(args).resolve_instance(args.INSTANCE) 351 | if not instance_id: 352 | logger.warning("Could not resolve Instance ID for '%s'", args.INSTANCE) 353 | logger.warning("Perhaps the '%s' is not registered in SSM?", args.INSTANCE) 354 | sys.exit(1) 355 | 356 | local_ip, remote_ip = random_ips(args.tunnel_cidr) 357 | tunnel = SsmTunnel(instance_id, profile=args.profile, region=args.region) 358 | tunnel.start(local_ip, remote_ip, list(args.routes) or [], args.updown_script) 359 | 360 | except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: 361 | logger.error(e) 362 | sys.exit(1) 363 | 364 | finally: 365 | if tunnel: 366 | tunnel.delete_tun() 367 | 368 | return 0 369 | 370 | 371 | if __name__ == "__main__": 372 | main() 373 | -------------------------------------------------------------------------------- /ssm_tools/talker.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | import pexpect 4 | 5 | logger = logging.getLogger("ssm-tools.talker") 6 | 7 | 8 | class SsmTalker: 9 | def __init__(self, instance_id: str, profile: str, region: str) -> None: 10 | self._instance_id = instance_id 11 | self.connect(instance_id, profile, region) 12 | 13 | def connect(self, instance_id: str, profile: str, region: str) -> None: 14 | extra_args = "" 15 | if profile: 16 | extra_args += f"--profile {profile} " 17 | if region: 18 | extra_args += f"--region {region} " 19 | command = f"aws {extra_args} ssm start-session --target {instance_id}" 20 | logger.debug("Spawning: %s", command) 21 | self._child = pexpect.spawn(command, echo=False, encoding="utf-8", timeout=10) 22 | logger.debug("PID: %s", self._child.pid) 23 | 24 | self.wait_for_prompt() 25 | logger.debug(self._child.before.strip()) 26 | self.shell_prompt = self._child.after 27 | 28 | # Turn off input echo 29 | self._child.sendline("stty -echo") 30 | self.wait_for_prompt() 31 | 32 | # Change to home directory (SSM session starts in '/') 33 | self._child.sendline("cd") 34 | self.wait_for_prompt() 35 | 36 | def exit(self) -> None: 37 | logger.debug("Closing session") 38 | self._child.sendcontrol("c") 39 | time.sleep(0.5) 40 | self._child.sendline("exit") 41 | try: 42 | self._child.expect(["Exiting session", pexpect.EOF]) 43 | except (OSError, pexpect.exceptions.EOF): 44 | pass 45 | 46 | def wait_for_prompt(self) -> None: 47 | """ 48 | As of now a typical SSM prompt is 'sh-4.2$ ' 49 | """ 50 | self._child.expect(".*\$ $") 51 | -------------------------------------------------------------------------------- /upload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | VERSION=$(python -c 'import ssm_tools; print(ssm_tools.__version__)') 4 | twine check dist/*${VERSION}* 5 | twine upload dist/*${VERSION}* 6 | --------------------------------------------------------------------------------